1 // SPDX-License-Identifier: GPL-2.0 2 3 // Generated by scripts/atomic/gen-atomic-fallback.sh 4 // DO NOT MODIFY THIS FILE DIRECTLY 5 6 #ifndef _LINUX_ATOMIC_FALLBACK_H 7 #define _LINUX_ATOMIC_FALLBACK_H 8 9 #include <linux/compiler.h> 10 11 #if defined(arch_xchg) 12 #define raw_xchg arch_xchg 13 #elif defined(arch_xchg_relaxed) 14 #define raw_xchg(...) \ 15 __atomic_op_fence(arch_xchg, __VA_ARGS__) 16 #else 17 extern void raw_xchg_not_implemented(void); 18 #define raw_xchg(...) raw_xchg_not_implemented() 19 #endif 20 21 #if defined(arch_xchg_acquire) 22 #define raw_xchg_acquire arch_xchg_acquire 23 #elif defined(arch_xchg_relaxed) 24 #define raw_xchg_acquire(...) \ 25 __atomic_op_acquire(arch_xchg, __VA_ARGS__) 26 #elif defined(arch_xchg) 27 #define raw_xchg_acquire arch_xchg 28 #else 29 extern void raw_xchg_acquire_not_implemented(void); 30 #define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented() 31 #endif 32 33 #if defined(arch_xchg_release) 34 #define raw_xchg_release arch_xchg_release 35 #elif defined(arch_xchg_relaxed) 36 #define raw_xchg_release(...) \ 37 __atomic_op_release(arch_xchg, __VA_ARGS__) 38 #elif defined(arch_xchg) 39 #define raw_xchg_release arch_xchg 40 #else 41 extern void raw_xchg_release_not_implemented(void); 42 #define raw_xchg_release(...) raw_xchg_release_not_implemented() 43 #endif 44 45 #if defined(arch_xchg_relaxed) 46 #define raw_xchg_relaxed arch_xchg_relaxed 47 #elif defined(arch_xchg) 48 #define raw_xchg_relaxed arch_xchg 49 #else 50 extern void raw_xchg_relaxed_not_implemented(void); 51 #define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented() 52 #endif 53 54 #if defined(arch_cmpxchg) 55 #define raw_cmpxchg arch_cmpxchg 56 #elif defined(arch_cmpxchg_relaxed) 57 #define raw_cmpxchg(...) \ 58 __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) 59 #else 60 extern void raw_cmpxchg_not_implemented(void); 61 #define raw_cmpxchg(...) raw_cmpxchg_not_implemented() 62 #endif 63 64 #if defined(arch_cmpxchg_acquire) 65 #define raw_cmpxchg_acquire arch_cmpxchg_acquire 66 #elif defined(arch_cmpxchg_relaxed) 67 #define raw_cmpxchg_acquire(...) \ 68 __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) 69 #elif defined(arch_cmpxchg) 70 #define raw_cmpxchg_acquire arch_cmpxchg 71 #else 72 extern void raw_cmpxchg_acquire_not_implemented(void); 73 #define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented() 74 #endif 75 76 #if defined(arch_cmpxchg_release) 77 #define raw_cmpxchg_release arch_cmpxchg_release 78 #elif defined(arch_cmpxchg_relaxed) 79 #define raw_cmpxchg_release(...) \ 80 __atomic_op_release(arch_cmpxchg, __VA_ARGS__) 81 #elif defined(arch_cmpxchg) 82 #define raw_cmpxchg_release arch_cmpxchg 83 #else 84 extern void raw_cmpxchg_release_not_implemented(void); 85 #define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented() 86 #endif 87 88 #if defined(arch_cmpxchg_relaxed) 89 #define raw_cmpxchg_relaxed arch_cmpxchg_relaxed 90 #elif defined(arch_cmpxchg) 91 #define raw_cmpxchg_relaxed arch_cmpxchg 92 #else 93 extern void raw_cmpxchg_relaxed_not_implemented(void); 94 #define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented() 95 #endif 96 97 #if defined(arch_cmpxchg64) 98 #define raw_cmpxchg64 arch_cmpxchg64 99 #elif defined(arch_cmpxchg64_relaxed) 100 #define raw_cmpxchg64(...) \ 101 __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) 102 #else 103 extern void raw_cmpxchg64_not_implemented(void); 104 #define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented() 105 #endif 106 107 #if defined(arch_cmpxchg64_acquire) 108 #define raw_cmpxchg64_acquire arch_cmpxchg64_acquire 109 #elif defined(arch_cmpxchg64_relaxed) 110 #define raw_cmpxchg64_acquire(...) \ 111 __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) 112 #elif defined(arch_cmpxchg64) 113 #define raw_cmpxchg64_acquire arch_cmpxchg64 114 #else 115 extern void raw_cmpxchg64_acquire_not_implemented(void); 116 #define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented() 117 #endif 118 119 #if defined(arch_cmpxchg64_release) 120 #define raw_cmpxchg64_release arch_cmpxchg64_release 121 #elif defined(arch_cmpxchg64_relaxed) 122 #define raw_cmpxchg64_release(...) \ 123 __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) 124 #elif defined(arch_cmpxchg64) 125 #define raw_cmpxchg64_release arch_cmpxchg64 126 #else 127 extern void raw_cmpxchg64_release_not_implemented(void); 128 #define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented() 129 #endif 130 131 #if defined(arch_cmpxchg64_relaxed) 132 #define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed 133 #elif defined(arch_cmpxchg64) 134 #define raw_cmpxchg64_relaxed arch_cmpxchg64 135 #else 136 extern void raw_cmpxchg64_relaxed_not_implemented(void); 137 #define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented() 138 #endif 139 140 #if defined(arch_cmpxchg128) 141 #define raw_cmpxchg128 arch_cmpxchg128 142 #elif defined(arch_cmpxchg128_relaxed) 143 #define raw_cmpxchg128(...) \ 144 __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__) 145 #else 146 extern void raw_cmpxchg128_not_implemented(void); 147 #define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented() 148 #endif 149 150 #if defined(arch_cmpxchg128_acquire) 151 #define raw_cmpxchg128_acquire arch_cmpxchg128_acquire 152 #elif defined(arch_cmpxchg128_relaxed) 153 #define raw_cmpxchg128_acquire(...) \ 154 __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__) 155 #elif defined(arch_cmpxchg128) 156 #define raw_cmpxchg128_acquire arch_cmpxchg128 157 #else 158 extern void raw_cmpxchg128_acquire_not_implemented(void); 159 #define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented() 160 #endif 161 162 #if defined(arch_cmpxchg128_release) 163 #define raw_cmpxchg128_release arch_cmpxchg128_release 164 #elif defined(arch_cmpxchg128_relaxed) 165 #define raw_cmpxchg128_release(...) \ 166 __atomic_op_release(arch_cmpxchg128, __VA_ARGS__) 167 #elif defined(arch_cmpxchg128) 168 #define raw_cmpxchg128_release arch_cmpxchg128 169 #else 170 extern void raw_cmpxchg128_release_not_implemented(void); 171 #define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented() 172 #endif 173 174 #if defined(arch_cmpxchg128_relaxed) 175 #define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed 176 #elif defined(arch_cmpxchg128) 177 #define raw_cmpxchg128_relaxed arch_cmpxchg128 178 #else 179 extern void raw_cmpxchg128_relaxed_not_implemented(void); 180 #define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented() 181 #endif 182 183 #if defined(arch_try_cmpxchg) 184 #define raw_try_cmpxchg arch_try_cmpxchg 185 #elif defined(arch_try_cmpxchg_relaxed) 186 #define raw_try_cmpxchg(...) \ 187 __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__) 188 #else 189 #define raw_try_cmpxchg(_ptr, _oldp, _new) \ 190 ({ \ 191 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 192 ___r = raw_cmpxchg((_ptr), ___o, (_new)); \ 193 if (unlikely(___r != ___o)) \ 194 *___op = ___r; \ 195 likely(___r == ___o); \ 196 }) 197 #endif 198 199 #if defined(arch_try_cmpxchg_acquire) 200 #define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire 201 #elif defined(arch_try_cmpxchg_relaxed) 202 #define raw_try_cmpxchg_acquire(...) \ 203 __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__) 204 #elif defined(arch_try_cmpxchg) 205 #define raw_try_cmpxchg_acquire arch_try_cmpxchg 206 #else 207 #define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \ 208 ({ \ 209 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 210 ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \ 211 if (unlikely(___r != ___o)) \ 212 *___op = ___r; \ 213 likely(___r == ___o); \ 214 }) 215 #endif 216 217 #if defined(arch_try_cmpxchg_release) 218 #define raw_try_cmpxchg_release arch_try_cmpxchg_release 219 #elif defined(arch_try_cmpxchg_relaxed) 220 #define raw_try_cmpxchg_release(...) \ 221 __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__) 222 #elif defined(arch_try_cmpxchg) 223 #define raw_try_cmpxchg_release arch_try_cmpxchg 224 #else 225 #define raw_try_cmpxchg_release(_ptr, _oldp, _new) \ 226 ({ \ 227 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 228 ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \ 229 if (unlikely(___r != ___o)) \ 230 *___op = ___r; \ 231 likely(___r == ___o); \ 232 }) 233 #endif 234 235 #if defined(arch_try_cmpxchg_relaxed) 236 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed 237 #elif defined(arch_try_cmpxchg) 238 #define raw_try_cmpxchg_relaxed arch_try_cmpxchg 239 #else 240 #define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \ 241 ({ \ 242 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 243 ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \ 244 if (unlikely(___r != ___o)) \ 245 *___op = ___r; \ 246 likely(___r == ___o); \ 247 }) 248 #endif 249 250 #if defined(arch_try_cmpxchg64) 251 #define raw_try_cmpxchg64 arch_try_cmpxchg64 252 #elif defined(arch_try_cmpxchg64_relaxed) 253 #define raw_try_cmpxchg64(...) \ 254 __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__) 255 #else 256 #define raw_try_cmpxchg64(_ptr, _oldp, _new) \ 257 ({ \ 258 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 259 ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \ 260 if (unlikely(___r != ___o)) \ 261 *___op = ___r; \ 262 likely(___r == ___o); \ 263 }) 264 #endif 265 266 #if defined(arch_try_cmpxchg64_acquire) 267 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire 268 #elif defined(arch_try_cmpxchg64_relaxed) 269 #define raw_try_cmpxchg64_acquire(...) \ 270 __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__) 271 #elif defined(arch_try_cmpxchg64) 272 #define raw_try_cmpxchg64_acquire arch_try_cmpxchg64 273 #else 274 #define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \ 275 ({ \ 276 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 277 ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \ 278 if (unlikely(___r != ___o)) \ 279 *___op = ___r; \ 280 likely(___r == ___o); \ 281 }) 282 #endif 283 284 #if defined(arch_try_cmpxchg64_release) 285 #define raw_try_cmpxchg64_release arch_try_cmpxchg64_release 286 #elif defined(arch_try_cmpxchg64_relaxed) 287 #define raw_try_cmpxchg64_release(...) \ 288 __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__) 289 #elif defined(arch_try_cmpxchg64) 290 #define raw_try_cmpxchg64_release arch_try_cmpxchg64 291 #else 292 #define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \ 293 ({ \ 294 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 295 ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \ 296 if (unlikely(___r != ___o)) \ 297 *___op = ___r; \ 298 likely(___r == ___o); \ 299 }) 300 #endif 301 302 #if defined(arch_try_cmpxchg64_relaxed) 303 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed 304 #elif defined(arch_try_cmpxchg64) 305 #define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64 306 #else 307 #define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \ 308 ({ \ 309 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 310 ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \ 311 if (unlikely(___r != ___o)) \ 312 *___op = ___r; \ 313 likely(___r == ___o); \ 314 }) 315 #endif 316 317 #if defined(arch_try_cmpxchg128) 318 #define raw_try_cmpxchg128 arch_try_cmpxchg128 319 #elif defined(arch_try_cmpxchg128_relaxed) 320 #define raw_try_cmpxchg128(...) \ 321 __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__) 322 #else 323 #define raw_try_cmpxchg128(_ptr, _oldp, _new) \ 324 ({ \ 325 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 326 ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \ 327 if (unlikely(___r != ___o)) \ 328 *___op = ___r; \ 329 likely(___r == ___o); \ 330 }) 331 #endif 332 333 #if defined(arch_try_cmpxchg128_acquire) 334 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire 335 #elif defined(arch_try_cmpxchg128_relaxed) 336 #define raw_try_cmpxchg128_acquire(...) \ 337 __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__) 338 #elif defined(arch_try_cmpxchg128) 339 #define raw_try_cmpxchg128_acquire arch_try_cmpxchg128 340 #else 341 #define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \ 342 ({ \ 343 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 344 ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \ 345 if (unlikely(___r != ___o)) \ 346 *___op = ___r; \ 347 likely(___r == ___o); \ 348 }) 349 #endif 350 351 #if defined(arch_try_cmpxchg128_release) 352 #define raw_try_cmpxchg128_release arch_try_cmpxchg128_release 353 #elif defined(arch_try_cmpxchg128_relaxed) 354 #define raw_try_cmpxchg128_release(...) \ 355 __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__) 356 #elif defined(arch_try_cmpxchg128) 357 #define raw_try_cmpxchg128_release arch_try_cmpxchg128 358 #else 359 #define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \ 360 ({ \ 361 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 362 ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \ 363 if (unlikely(___r != ___o)) \ 364 *___op = ___r; \ 365 likely(___r == ___o); \ 366 }) 367 #endif 368 369 #if defined(arch_try_cmpxchg128_relaxed) 370 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed 371 #elif defined(arch_try_cmpxchg128) 372 #define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128 373 #else 374 #define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \ 375 ({ \ 376 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 377 ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \ 378 if (unlikely(___r != ___o)) \ 379 *___op = ___r; \ 380 likely(___r == ___o); \ 381 }) 382 #endif 383 384 #define raw_cmpxchg_local arch_cmpxchg_local 385 386 #ifdef arch_try_cmpxchg_local 387 #define raw_try_cmpxchg_local arch_try_cmpxchg_local 388 #else 389 #define raw_try_cmpxchg_local(_ptr, _oldp, _new) \ 390 ({ \ 391 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 392 ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \ 393 if (unlikely(___r != ___o)) \ 394 *___op = ___r; \ 395 likely(___r == ___o); \ 396 }) 397 #endif 398 399 #define raw_cmpxchg64_local arch_cmpxchg64_local 400 401 #ifdef arch_try_cmpxchg64_local 402 #define raw_try_cmpxchg64_local arch_try_cmpxchg64_local 403 #else 404 #define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \ 405 ({ \ 406 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 407 ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \ 408 if (unlikely(___r != ___o)) \ 409 *___op = ___r; \ 410 likely(___r == ___o); \ 411 }) 412 #endif 413 414 #define raw_cmpxchg128_local arch_cmpxchg128_local 415 416 #ifdef arch_try_cmpxchg128_local 417 #define raw_try_cmpxchg128_local arch_try_cmpxchg128_local 418 #else 419 #define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \ 420 ({ \ 421 typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ 422 ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \ 423 if (unlikely(___r != ___o)) \ 424 *___op = ___r; \ 425 likely(___r == ___o); \ 426 }) 427 #endif 428 429 #define raw_sync_cmpxchg arch_sync_cmpxchg 430 431 /** 432 * raw_atomic_read() - atomic load with relaxed ordering 433 * @v: pointer to atomic_t 434 * 435 * Atomically loads the value of @v with relaxed ordering. 436 * 437 * Safe to use in noinstr code; prefer atomic_read() elsewhere. 438 * 439 * Return: The value loaded from @v. 440 */ 441 static __always_inline int 442 raw_atomic_read(const atomic_t *v) 443 { 444 return arch_atomic_read(v); 445 } 446 447 /** 448 * raw_atomic_read_acquire() - atomic load with acquire ordering 449 * @v: pointer to atomic_t 450 * 451 * Atomically loads the value of @v with acquire ordering. 452 * 453 * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere. 454 * 455 * Return: The value loaded from @v. 456 */ 457 static __always_inline int 458 raw_atomic_read_acquire(const atomic_t *v) 459 { 460 #if defined(arch_atomic_read_acquire) 461 return arch_atomic_read_acquire(v); 462 #elif defined(arch_atomic_read) 463 return arch_atomic_read(v); 464 #else 465 int ret; 466 467 if (__native_word(atomic_t)) { 468 ret = smp_load_acquire(&(v)->counter); 469 } else { 470 ret = raw_atomic_read(v); 471 __atomic_acquire_fence(); 472 } 473 474 return ret; 475 #endif 476 } 477 478 /** 479 * raw_atomic_set() - atomic set with relaxed ordering 480 * @v: pointer to atomic_t 481 * @i: int value to assign 482 * 483 * Atomically sets @v to @i with relaxed ordering. 484 * 485 * Safe to use in noinstr code; prefer atomic_set() elsewhere. 486 * 487 * Return: Nothing. 488 */ 489 static __always_inline void 490 raw_atomic_set(atomic_t *v, int i) 491 { 492 arch_atomic_set(v, i); 493 } 494 495 /** 496 * raw_atomic_set_release() - atomic set with release ordering 497 * @v: pointer to atomic_t 498 * @i: int value to assign 499 * 500 * Atomically sets @v to @i with release ordering. 501 * 502 * Safe to use in noinstr code; prefer atomic_set_release() elsewhere. 503 * 504 * Return: Nothing. 505 */ 506 static __always_inline void 507 raw_atomic_set_release(atomic_t *v, int i) 508 { 509 #if defined(arch_atomic_set_release) 510 arch_atomic_set_release(v, i); 511 #elif defined(arch_atomic_set) 512 arch_atomic_set(v, i); 513 #else 514 if (__native_word(atomic_t)) { 515 smp_store_release(&(v)->counter, i); 516 } else { 517 __atomic_release_fence(); 518 raw_atomic_set(v, i); 519 } 520 #endif 521 } 522 523 /** 524 * raw_atomic_add() - atomic add with relaxed ordering 525 * @i: int value to add 526 * @v: pointer to atomic_t 527 * 528 * Atomically updates @v to (@v + @i) with relaxed ordering. 529 * 530 * Safe to use in noinstr code; prefer atomic_add() elsewhere. 531 * 532 * Return: Nothing. 533 */ 534 static __always_inline void 535 raw_atomic_add(int i, atomic_t *v) 536 { 537 arch_atomic_add(i, v); 538 } 539 540 /** 541 * raw_atomic_add_return() - atomic add with full ordering 542 * @i: int value to add 543 * @v: pointer to atomic_t 544 * 545 * Atomically updates @v to (@v + @i) with full ordering. 546 * 547 * Safe to use in noinstr code; prefer atomic_add_return() elsewhere. 548 * 549 * Return: The updated value of @v. 550 */ 551 static __always_inline int 552 raw_atomic_add_return(int i, atomic_t *v) 553 { 554 #if defined(arch_atomic_add_return) 555 return arch_atomic_add_return(i, v); 556 #elif defined(arch_atomic_add_return_relaxed) 557 int ret; 558 __atomic_pre_full_fence(); 559 ret = arch_atomic_add_return_relaxed(i, v); 560 __atomic_post_full_fence(); 561 return ret; 562 #else 563 #error "Unable to define raw_atomic_add_return" 564 #endif 565 } 566 567 /** 568 * raw_atomic_add_return_acquire() - atomic add with acquire ordering 569 * @i: int value to add 570 * @v: pointer to atomic_t 571 * 572 * Atomically updates @v to (@v + @i) with acquire ordering. 573 * 574 * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere. 575 * 576 * Return: The updated value of @v. 577 */ 578 static __always_inline int 579 raw_atomic_add_return_acquire(int i, atomic_t *v) 580 { 581 #if defined(arch_atomic_add_return_acquire) 582 return arch_atomic_add_return_acquire(i, v); 583 #elif defined(arch_atomic_add_return_relaxed) 584 int ret = arch_atomic_add_return_relaxed(i, v); 585 __atomic_acquire_fence(); 586 return ret; 587 #elif defined(arch_atomic_add_return) 588 return arch_atomic_add_return(i, v); 589 #else 590 #error "Unable to define raw_atomic_add_return_acquire" 591 #endif 592 } 593 594 /** 595 * raw_atomic_add_return_release() - atomic add with release ordering 596 * @i: int value to add 597 * @v: pointer to atomic_t 598 * 599 * Atomically updates @v to (@v + @i) with release ordering. 600 * 601 * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere. 602 * 603 * Return: The updated value of @v. 604 */ 605 static __always_inline int 606 raw_atomic_add_return_release(int i, atomic_t *v) 607 { 608 #if defined(arch_atomic_add_return_release) 609 return arch_atomic_add_return_release(i, v); 610 #elif defined(arch_atomic_add_return_relaxed) 611 __atomic_release_fence(); 612 return arch_atomic_add_return_relaxed(i, v); 613 #elif defined(arch_atomic_add_return) 614 return arch_atomic_add_return(i, v); 615 #else 616 #error "Unable to define raw_atomic_add_return_release" 617 #endif 618 } 619 620 /** 621 * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering 622 * @i: int value to add 623 * @v: pointer to atomic_t 624 * 625 * Atomically updates @v to (@v + @i) with relaxed ordering. 626 * 627 * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere. 628 * 629 * Return: The updated value of @v. 630 */ 631 static __always_inline int 632 raw_atomic_add_return_relaxed(int i, atomic_t *v) 633 { 634 #if defined(arch_atomic_add_return_relaxed) 635 return arch_atomic_add_return_relaxed(i, v); 636 #elif defined(arch_atomic_add_return) 637 return arch_atomic_add_return(i, v); 638 #else 639 #error "Unable to define raw_atomic_add_return_relaxed" 640 #endif 641 } 642 643 /** 644 * raw_atomic_fetch_add() - atomic add with full ordering 645 * @i: int value to add 646 * @v: pointer to atomic_t 647 * 648 * Atomically updates @v to (@v + @i) with full ordering. 649 * 650 * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere. 651 * 652 * Return: The original value of @v. 653 */ 654 static __always_inline int 655 raw_atomic_fetch_add(int i, atomic_t *v) 656 { 657 #if defined(arch_atomic_fetch_add) 658 return arch_atomic_fetch_add(i, v); 659 #elif defined(arch_atomic_fetch_add_relaxed) 660 int ret; 661 __atomic_pre_full_fence(); 662 ret = arch_atomic_fetch_add_relaxed(i, v); 663 __atomic_post_full_fence(); 664 return ret; 665 #else 666 #error "Unable to define raw_atomic_fetch_add" 667 #endif 668 } 669 670 /** 671 * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering 672 * @i: int value to add 673 * @v: pointer to atomic_t 674 * 675 * Atomically updates @v to (@v + @i) with acquire ordering. 676 * 677 * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere. 678 * 679 * Return: The original value of @v. 680 */ 681 static __always_inline int 682 raw_atomic_fetch_add_acquire(int i, atomic_t *v) 683 { 684 #if defined(arch_atomic_fetch_add_acquire) 685 return arch_atomic_fetch_add_acquire(i, v); 686 #elif defined(arch_atomic_fetch_add_relaxed) 687 int ret = arch_atomic_fetch_add_relaxed(i, v); 688 __atomic_acquire_fence(); 689 return ret; 690 #elif defined(arch_atomic_fetch_add) 691 return arch_atomic_fetch_add(i, v); 692 #else 693 #error "Unable to define raw_atomic_fetch_add_acquire" 694 #endif 695 } 696 697 /** 698 * raw_atomic_fetch_add_release() - atomic add with release ordering 699 * @i: int value to add 700 * @v: pointer to atomic_t 701 * 702 * Atomically updates @v to (@v + @i) with release ordering. 703 * 704 * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere. 705 * 706 * Return: The original value of @v. 707 */ 708 static __always_inline int 709 raw_atomic_fetch_add_release(int i, atomic_t *v) 710 { 711 #if defined(arch_atomic_fetch_add_release) 712 return arch_atomic_fetch_add_release(i, v); 713 #elif defined(arch_atomic_fetch_add_relaxed) 714 __atomic_release_fence(); 715 return arch_atomic_fetch_add_relaxed(i, v); 716 #elif defined(arch_atomic_fetch_add) 717 return arch_atomic_fetch_add(i, v); 718 #else 719 #error "Unable to define raw_atomic_fetch_add_release" 720 #endif 721 } 722 723 /** 724 * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering 725 * @i: int value to add 726 * @v: pointer to atomic_t 727 * 728 * Atomically updates @v to (@v + @i) with relaxed ordering. 729 * 730 * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere. 731 * 732 * Return: The original value of @v. 733 */ 734 static __always_inline int 735 raw_atomic_fetch_add_relaxed(int i, atomic_t *v) 736 { 737 #if defined(arch_atomic_fetch_add_relaxed) 738 return arch_atomic_fetch_add_relaxed(i, v); 739 #elif defined(arch_atomic_fetch_add) 740 return arch_atomic_fetch_add(i, v); 741 #else 742 #error "Unable to define raw_atomic_fetch_add_relaxed" 743 #endif 744 } 745 746 /** 747 * raw_atomic_sub() - atomic subtract with relaxed ordering 748 * @i: int value to subtract 749 * @v: pointer to atomic_t 750 * 751 * Atomically updates @v to (@v - @i) with relaxed ordering. 752 * 753 * Safe to use in noinstr code; prefer atomic_sub() elsewhere. 754 * 755 * Return: Nothing. 756 */ 757 static __always_inline void 758 raw_atomic_sub(int i, atomic_t *v) 759 { 760 arch_atomic_sub(i, v); 761 } 762 763 /** 764 * raw_atomic_sub_return() - atomic subtract with full ordering 765 * @i: int value to subtract 766 * @v: pointer to atomic_t 767 * 768 * Atomically updates @v to (@v - @i) with full ordering. 769 * 770 * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere. 771 * 772 * Return: The updated value of @v. 773 */ 774 static __always_inline int 775 raw_atomic_sub_return(int i, atomic_t *v) 776 { 777 #if defined(arch_atomic_sub_return) 778 return arch_atomic_sub_return(i, v); 779 #elif defined(arch_atomic_sub_return_relaxed) 780 int ret; 781 __atomic_pre_full_fence(); 782 ret = arch_atomic_sub_return_relaxed(i, v); 783 __atomic_post_full_fence(); 784 return ret; 785 #else 786 #error "Unable to define raw_atomic_sub_return" 787 #endif 788 } 789 790 /** 791 * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering 792 * @i: int value to subtract 793 * @v: pointer to atomic_t 794 * 795 * Atomically updates @v to (@v - @i) with acquire ordering. 796 * 797 * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere. 798 * 799 * Return: The updated value of @v. 800 */ 801 static __always_inline int 802 raw_atomic_sub_return_acquire(int i, atomic_t *v) 803 { 804 #if defined(arch_atomic_sub_return_acquire) 805 return arch_atomic_sub_return_acquire(i, v); 806 #elif defined(arch_atomic_sub_return_relaxed) 807 int ret = arch_atomic_sub_return_relaxed(i, v); 808 __atomic_acquire_fence(); 809 return ret; 810 #elif defined(arch_atomic_sub_return) 811 return arch_atomic_sub_return(i, v); 812 #else 813 #error "Unable to define raw_atomic_sub_return_acquire" 814 #endif 815 } 816 817 /** 818 * raw_atomic_sub_return_release() - atomic subtract with release ordering 819 * @i: int value to subtract 820 * @v: pointer to atomic_t 821 * 822 * Atomically updates @v to (@v - @i) with release ordering. 823 * 824 * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere. 825 * 826 * Return: The updated value of @v. 827 */ 828 static __always_inline int 829 raw_atomic_sub_return_release(int i, atomic_t *v) 830 { 831 #if defined(arch_atomic_sub_return_release) 832 return arch_atomic_sub_return_release(i, v); 833 #elif defined(arch_atomic_sub_return_relaxed) 834 __atomic_release_fence(); 835 return arch_atomic_sub_return_relaxed(i, v); 836 #elif defined(arch_atomic_sub_return) 837 return arch_atomic_sub_return(i, v); 838 #else 839 #error "Unable to define raw_atomic_sub_return_release" 840 #endif 841 } 842 843 /** 844 * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering 845 * @i: int value to subtract 846 * @v: pointer to atomic_t 847 * 848 * Atomically updates @v to (@v - @i) with relaxed ordering. 849 * 850 * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere. 851 * 852 * Return: The updated value of @v. 853 */ 854 static __always_inline int 855 raw_atomic_sub_return_relaxed(int i, atomic_t *v) 856 { 857 #if defined(arch_atomic_sub_return_relaxed) 858 return arch_atomic_sub_return_relaxed(i, v); 859 #elif defined(arch_atomic_sub_return) 860 return arch_atomic_sub_return(i, v); 861 #else 862 #error "Unable to define raw_atomic_sub_return_relaxed" 863 #endif 864 } 865 866 /** 867 * raw_atomic_fetch_sub() - atomic subtract with full ordering 868 * @i: int value to subtract 869 * @v: pointer to atomic_t 870 * 871 * Atomically updates @v to (@v - @i) with full ordering. 872 * 873 * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere. 874 * 875 * Return: The original value of @v. 876 */ 877 static __always_inline int 878 raw_atomic_fetch_sub(int i, atomic_t *v) 879 { 880 #if defined(arch_atomic_fetch_sub) 881 return arch_atomic_fetch_sub(i, v); 882 #elif defined(arch_atomic_fetch_sub_relaxed) 883 int ret; 884 __atomic_pre_full_fence(); 885 ret = arch_atomic_fetch_sub_relaxed(i, v); 886 __atomic_post_full_fence(); 887 return ret; 888 #else 889 #error "Unable to define raw_atomic_fetch_sub" 890 #endif 891 } 892 893 /** 894 * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering 895 * @i: int value to subtract 896 * @v: pointer to atomic_t 897 * 898 * Atomically updates @v to (@v - @i) with acquire ordering. 899 * 900 * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere. 901 * 902 * Return: The original value of @v. 903 */ 904 static __always_inline int 905 raw_atomic_fetch_sub_acquire(int i, atomic_t *v) 906 { 907 #if defined(arch_atomic_fetch_sub_acquire) 908 return arch_atomic_fetch_sub_acquire(i, v); 909 #elif defined(arch_atomic_fetch_sub_relaxed) 910 int ret = arch_atomic_fetch_sub_relaxed(i, v); 911 __atomic_acquire_fence(); 912 return ret; 913 #elif defined(arch_atomic_fetch_sub) 914 return arch_atomic_fetch_sub(i, v); 915 #else 916 #error "Unable to define raw_atomic_fetch_sub_acquire" 917 #endif 918 } 919 920 /** 921 * raw_atomic_fetch_sub_release() - atomic subtract with release ordering 922 * @i: int value to subtract 923 * @v: pointer to atomic_t 924 * 925 * Atomically updates @v to (@v - @i) with release ordering. 926 * 927 * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere. 928 * 929 * Return: The original value of @v. 930 */ 931 static __always_inline int 932 raw_atomic_fetch_sub_release(int i, atomic_t *v) 933 { 934 #if defined(arch_atomic_fetch_sub_release) 935 return arch_atomic_fetch_sub_release(i, v); 936 #elif defined(arch_atomic_fetch_sub_relaxed) 937 __atomic_release_fence(); 938 return arch_atomic_fetch_sub_relaxed(i, v); 939 #elif defined(arch_atomic_fetch_sub) 940 return arch_atomic_fetch_sub(i, v); 941 #else 942 #error "Unable to define raw_atomic_fetch_sub_release" 943 #endif 944 } 945 946 /** 947 * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering 948 * @i: int value to subtract 949 * @v: pointer to atomic_t 950 * 951 * Atomically updates @v to (@v - @i) with relaxed ordering. 952 * 953 * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere. 954 * 955 * Return: The original value of @v. 956 */ 957 static __always_inline int 958 raw_atomic_fetch_sub_relaxed(int i, atomic_t *v) 959 { 960 #if defined(arch_atomic_fetch_sub_relaxed) 961 return arch_atomic_fetch_sub_relaxed(i, v); 962 #elif defined(arch_atomic_fetch_sub) 963 return arch_atomic_fetch_sub(i, v); 964 #else 965 #error "Unable to define raw_atomic_fetch_sub_relaxed" 966 #endif 967 } 968 969 /** 970 * raw_atomic_inc() - atomic increment with relaxed ordering 971 * @v: pointer to atomic_t 972 * 973 * Atomically updates @v to (@v + 1) with relaxed ordering. 974 * 975 * Safe to use in noinstr code; prefer atomic_inc() elsewhere. 976 * 977 * Return: Nothing. 978 */ 979 static __always_inline void 980 raw_atomic_inc(atomic_t *v) 981 { 982 #if defined(arch_atomic_inc) 983 arch_atomic_inc(v); 984 #else 985 raw_atomic_add(1, v); 986 #endif 987 } 988 989 /** 990 * raw_atomic_inc_return() - atomic increment with full ordering 991 * @v: pointer to atomic_t 992 * 993 * Atomically updates @v to (@v + 1) with full ordering. 994 * 995 * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere. 996 * 997 * Return: The updated value of @v. 998 */ 999 static __always_inline int 1000 raw_atomic_inc_return(atomic_t *v) 1001 { 1002 #if defined(arch_atomic_inc_return) 1003 return arch_atomic_inc_return(v); 1004 #elif defined(arch_atomic_inc_return_relaxed) 1005 int ret; 1006 __atomic_pre_full_fence(); 1007 ret = arch_atomic_inc_return_relaxed(v); 1008 __atomic_post_full_fence(); 1009 return ret; 1010 #else 1011 return raw_atomic_add_return(1, v); 1012 #endif 1013 } 1014 1015 /** 1016 * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering 1017 * @v: pointer to atomic_t 1018 * 1019 * Atomically updates @v to (@v + 1) with acquire ordering. 1020 * 1021 * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere. 1022 * 1023 * Return: The updated value of @v. 1024 */ 1025 static __always_inline int 1026 raw_atomic_inc_return_acquire(atomic_t *v) 1027 { 1028 #if defined(arch_atomic_inc_return_acquire) 1029 return arch_atomic_inc_return_acquire(v); 1030 #elif defined(arch_atomic_inc_return_relaxed) 1031 int ret = arch_atomic_inc_return_relaxed(v); 1032 __atomic_acquire_fence(); 1033 return ret; 1034 #elif defined(arch_atomic_inc_return) 1035 return arch_atomic_inc_return(v); 1036 #else 1037 return raw_atomic_add_return_acquire(1, v); 1038 #endif 1039 } 1040 1041 /** 1042 * raw_atomic_inc_return_release() - atomic increment with release ordering 1043 * @v: pointer to atomic_t 1044 * 1045 * Atomically updates @v to (@v + 1) with release ordering. 1046 * 1047 * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere. 1048 * 1049 * Return: The updated value of @v. 1050 */ 1051 static __always_inline int 1052 raw_atomic_inc_return_release(atomic_t *v) 1053 { 1054 #if defined(arch_atomic_inc_return_release) 1055 return arch_atomic_inc_return_release(v); 1056 #elif defined(arch_atomic_inc_return_relaxed) 1057 __atomic_release_fence(); 1058 return arch_atomic_inc_return_relaxed(v); 1059 #elif defined(arch_atomic_inc_return) 1060 return arch_atomic_inc_return(v); 1061 #else 1062 return raw_atomic_add_return_release(1, v); 1063 #endif 1064 } 1065 1066 /** 1067 * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering 1068 * @v: pointer to atomic_t 1069 * 1070 * Atomically updates @v to (@v + 1) with relaxed ordering. 1071 * 1072 * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere. 1073 * 1074 * Return: The updated value of @v. 1075 */ 1076 static __always_inline int 1077 raw_atomic_inc_return_relaxed(atomic_t *v) 1078 { 1079 #if defined(arch_atomic_inc_return_relaxed) 1080 return arch_atomic_inc_return_relaxed(v); 1081 #elif defined(arch_atomic_inc_return) 1082 return arch_atomic_inc_return(v); 1083 #else 1084 return raw_atomic_add_return_relaxed(1, v); 1085 #endif 1086 } 1087 1088 /** 1089 * raw_atomic_fetch_inc() - atomic increment with full ordering 1090 * @v: pointer to atomic_t 1091 * 1092 * Atomically updates @v to (@v + 1) with full ordering. 1093 * 1094 * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere. 1095 * 1096 * Return: The original value of @v. 1097 */ 1098 static __always_inline int 1099 raw_atomic_fetch_inc(atomic_t *v) 1100 { 1101 #if defined(arch_atomic_fetch_inc) 1102 return arch_atomic_fetch_inc(v); 1103 #elif defined(arch_atomic_fetch_inc_relaxed) 1104 int ret; 1105 __atomic_pre_full_fence(); 1106 ret = arch_atomic_fetch_inc_relaxed(v); 1107 __atomic_post_full_fence(); 1108 return ret; 1109 #else 1110 return raw_atomic_fetch_add(1, v); 1111 #endif 1112 } 1113 1114 /** 1115 * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering 1116 * @v: pointer to atomic_t 1117 * 1118 * Atomically updates @v to (@v + 1) with acquire ordering. 1119 * 1120 * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere. 1121 * 1122 * Return: The original value of @v. 1123 */ 1124 static __always_inline int 1125 raw_atomic_fetch_inc_acquire(atomic_t *v) 1126 { 1127 #if defined(arch_atomic_fetch_inc_acquire) 1128 return arch_atomic_fetch_inc_acquire(v); 1129 #elif defined(arch_atomic_fetch_inc_relaxed) 1130 int ret = arch_atomic_fetch_inc_relaxed(v); 1131 __atomic_acquire_fence(); 1132 return ret; 1133 #elif defined(arch_atomic_fetch_inc) 1134 return arch_atomic_fetch_inc(v); 1135 #else 1136 return raw_atomic_fetch_add_acquire(1, v); 1137 #endif 1138 } 1139 1140 /** 1141 * raw_atomic_fetch_inc_release() - atomic increment with release ordering 1142 * @v: pointer to atomic_t 1143 * 1144 * Atomically updates @v to (@v + 1) with release ordering. 1145 * 1146 * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere. 1147 * 1148 * Return: The original value of @v. 1149 */ 1150 static __always_inline int 1151 raw_atomic_fetch_inc_release(atomic_t *v) 1152 { 1153 #if defined(arch_atomic_fetch_inc_release) 1154 return arch_atomic_fetch_inc_release(v); 1155 #elif defined(arch_atomic_fetch_inc_relaxed) 1156 __atomic_release_fence(); 1157 return arch_atomic_fetch_inc_relaxed(v); 1158 #elif defined(arch_atomic_fetch_inc) 1159 return arch_atomic_fetch_inc(v); 1160 #else 1161 return raw_atomic_fetch_add_release(1, v); 1162 #endif 1163 } 1164 1165 /** 1166 * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering 1167 * @v: pointer to atomic_t 1168 * 1169 * Atomically updates @v to (@v + 1) with relaxed ordering. 1170 * 1171 * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere. 1172 * 1173 * Return: The original value of @v. 1174 */ 1175 static __always_inline int 1176 raw_atomic_fetch_inc_relaxed(atomic_t *v) 1177 { 1178 #if defined(arch_atomic_fetch_inc_relaxed) 1179 return arch_atomic_fetch_inc_relaxed(v); 1180 #elif defined(arch_atomic_fetch_inc) 1181 return arch_atomic_fetch_inc(v); 1182 #else 1183 return raw_atomic_fetch_add_relaxed(1, v); 1184 #endif 1185 } 1186 1187 /** 1188 * raw_atomic_dec() - atomic decrement with relaxed ordering 1189 * @v: pointer to atomic_t 1190 * 1191 * Atomically updates @v to (@v - 1) with relaxed ordering. 1192 * 1193 * Safe to use in noinstr code; prefer atomic_dec() elsewhere. 1194 * 1195 * Return: Nothing. 1196 */ 1197 static __always_inline void 1198 raw_atomic_dec(atomic_t *v) 1199 { 1200 #if defined(arch_atomic_dec) 1201 arch_atomic_dec(v); 1202 #else 1203 raw_atomic_sub(1, v); 1204 #endif 1205 } 1206 1207 /** 1208 * raw_atomic_dec_return() - atomic decrement with full ordering 1209 * @v: pointer to atomic_t 1210 * 1211 * Atomically updates @v to (@v - 1) with full ordering. 1212 * 1213 * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere. 1214 * 1215 * Return: The updated value of @v. 1216 */ 1217 static __always_inline int 1218 raw_atomic_dec_return(atomic_t *v) 1219 { 1220 #if defined(arch_atomic_dec_return) 1221 return arch_atomic_dec_return(v); 1222 #elif defined(arch_atomic_dec_return_relaxed) 1223 int ret; 1224 __atomic_pre_full_fence(); 1225 ret = arch_atomic_dec_return_relaxed(v); 1226 __atomic_post_full_fence(); 1227 return ret; 1228 #else 1229 return raw_atomic_sub_return(1, v); 1230 #endif 1231 } 1232 1233 /** 1234 * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering 1235 * @v: pointer to atomic_t 1236 * 1237 * Atomically updates @v to (@v - 1) with acquire ordering. 1238 * 1239 * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere. 1240 * 1241 * Return: The updated value of @v. 1242 */ 1243 static __always_inline int 1244 raw_atomic_dec_return_acquire(atomic_t *v) 1245 { 1246 #if defined(arch_atomic_dec_return_acquire) 1247 return arch_atomic_dec_return_acquire(v); 1248 #elif defined(arch_atomic_dec_return_relaxed) 1249 int ret = arch_atomic_dec_return_relaxed(v); 1250 __atomic_acquire_fence(); 1251 return ret; 1252 #elif defined(arch_atomic_dec_return) 1253 return arch_atomic_dec_return(v); 1254 #else 1255 return raw_atomic_sub_return_acquire(1, v); 1256 #endif 1257 } 1258 1259 /** 1260 * raw_atomic_dec_return_release() - atomic decrement with release ordering 1261 * @v: pointer to atomic_t 1262 * 1263 * Atomically updates @v to (@v - 1) with release ordering. 1264 * 1265 * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere. 1266 * 1267 * Return: The updated value of @v. 1268 */ 1269 static __always_inline int 1270 raw_atomic_dec_return_release(atomic_t *v) 1271 { 1272 #if defined(arch_atomic_dec_return_release) 1273 return arch_atomic_dec_return_release(v); 1274 #elif defined(arch_atomic_dec_return_relaxed) 1275 __atomic_release_fence(); 1276 return arch_atomic_dec_return_relaxed(v); 1277 #elif defined(arch_atomic_dec_return) 1278 return arch_atomic_dec_return(v); 1279 #else 1280 return raw_atomic_sub_return_release(1, v); 1281 #endif 1282 } 1283 1284 /** 1285 * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering 1286 * @v: pointer to atomic_t 1287 * 1288 * Atomically updates @v to (@v - 1) with relaxed ordering. 1289 * 1290 * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere. 1291 * 1292 * Return: The updated value of @v. 1293 */ 1294 static __always_inline int 1295 raw_atomic_dec_return_relaxed(atomic_t *v) 1296 { 1297 #if defined(arch_atomic_dec_return_relaxed) 1298 return arch_atomic_dec_return_relaxed(v); 1299 #elif defined(arch_atomic_dec_return) 1300 return arch_atomic_dec_return(v); 1301 #else 1302 return raw_atomic_sub_return_relaxed(1, v); 1303 #endif 1304 } 1305 1306 /** 1307 * raw_atomic_fetch_dec() - atomic decrement with full ordering 1308 * @v: pointer to atomic_t 1309 * 1310 * Atomically updates @v to (@v - 1) with full ordering. 1311 * 1312 * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere. 1313 * 1314 * Return: The original value of @v. 1315 */ 1316 static __always_inline int 1317 raw_atomic_fetch_dec(atomic_t *v) 1318 { 1319 #if defined(arch_atomic_fetch_dec) 1320 return arch_atomic_fetch_dec(v); 1321 #elif defined(arch_atomic_fetch_dec_relaxed) 1322 int ret; 1323 __atomic_pre_full_fence(); 1324 ret = arch_atomic_fetch_dec_relaxed(v); 1325 __atomic_post_full_fence(); 1326 return ret; 1327 #else 1328 return raw_atomic_fetch_sub(1, v); 1329 #endif 1330 } 1331 1332 /** 1333 * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering 1334 * @v: pointer to atomic_t 1335 * 1336 * Atomically updates @v to (@v - 1) with acquire ordering. 1337 * 1338 * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere. 1339 * 1340 * Return: The original value of @v. 1341 */ 1342 static __always_inline int 1343 raw_atomic_fetch_dec_acquire(atomic_t *v) 1344 { 1345 #if defined(arch_atomic_fetch_dec_acquire) 1346 return arch_atomic_fetch_dec_acquire(v); 1347 #elif defined(arch_atomic_fetch_dec_relaxed) 1348 int ret = arch_atomic_fetch_dec_relaxed(v); 1349 __atomic_acquire_fence(); 1350 return ret; 1351 #elif defined(arch_atomic_fetch_dec) 1352 return arch_atomic_fetch_dec(v); 1353 #else 1354 return raw_atomic_fetch_sub_acquire(1, v); 1355 #endif 1356 } 1357 1358 /** 1359 * raw_atomic_fetch_dec_release() - atomic decrement with release ordering 1360 * @v: pointer to atomic_t 1361 * 1362 * Atomically updates @v to (@v - 1) with release ordering. 1363 * 1364 * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere. 1365 * 1366 * Return: The original value of @v. 1367 */ 1368 static __always_inline int 1369 raw_atomic_fetch_dec_release(atomic_t *v) 1370 { 1371 #if defined(arch_atomic_fetch_dec_release) 1372 return arch_atomic_fetch_dec_release(v); 1373 #elif defined(arch_atomic_fetch_dec_relaxed) 1374 __atomic_release_fence(); 1375 return arch_atomic_fetch_dec_relaxed(v); 1376 #elif defined(arch_atomic_fetch_dec) 1377 return arch_atomic_fetch_dec(v); 1378 #else 1379 return raw_atomic_fetch_sub_release(1, v); 1380 #endif 1381 } 1382 1383 /** 1384 * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering 1385 * @v: pointer to atomic_t 1386 * 1387 * Atomically updates @v to (@v - 1) with relaxed ordering. 1388 * 1389 * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere. 1390 * 1391 * Return: The original value of @v. 1392 */ 1393 static __always_inline int 1394 raw_atomic_fetch_dec_relaxed(atomic_t *v) 1395 { 1396 #if defined(arch_atomic_fetch_dec_relaxed) 1397 return arch_atomic_fetch_dec_relaxed(v); 1398 #elif defined(arch_atomic_fetch_dec) 1399 return arch_atomic_fetch_dec(v); 1400 #else 1401 return raw_atomic_fetch_sub_relaxed(1, v); 1402 #endif 1403 } 1404 1405 /** 1406 * raw_atomic_and() - atomic bitwise AND with relaxed ordering 1407 * @i: int value 1408 * @v: pointer to atomic_t 1409 * 1410 * Atomically updates @v to (@v & @i) with relaxed ordering. 1411 * 1412 * Safe to use in noinstr code; prefer atomic_and() elsewhere. 1413 * 1414 * Return: Nothing. 1415 */ 1416 static __always_inline void 1417 raw_atomic_and(int i, atomic_t *v) 1418 { 1419 arch_atomic_and(i, v); 1420 } 1421 1422 /** 1423 * raw_atomic_fetch_and() - atomic bitwise AND with full ordering 1424 * @i: int value 1425 * @v: pointer to atomic_t 1426 * 1427 * Atomically updates @v to (@v & @i) with full ordering. 1428 * 1429 * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere. 1430 * 1431 * Return: The original value of @v. 1432 */ 1433 static __always_inline int 1434 raw_atomic_fetch_and(int i, atomic_t *v) 1435 { 1436 #if defined(arch_atomic_fetch_and) 1437 return arch_atomic_fetch_and(i, v); 1438 #elif defined(arch_atomic_fetch_and_relaxed) 1439 int ret; 1440 __atomic_pre_full_fence(); 1441 ret = arch_atomic_fetch_and_relaxed(i, v); 1442 __atomic_post_full_fence(); 1443 return ret; 1444 #else 1445 #error "Unable to define raw_atomic_fetch_and" 1446 #endif 1447 } 1448 1449 /** 1450 * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering 1451 * @i: int value 1452 * @v: pointer to atomic_t 1453 * 1454 * Atomically updates @v to (@v & @i) with acquire ordering. 1455 * 1456 * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere. 1457 * 1458 * Return: The original value of @v. 1459 */ 1460 static __always_inline int 1461 raw_atomic_fetch_and_acquire(int i, atomic_t *v) 1462 { 1463 #if defined(arch_atomic_fetch_and_acquire) 1464 return arch_atomic_fetch_and_acquire(i, v); 1465 #elif defined(arch_atomic_fetch_and_relaxed) 1466 int ret = arch_atomic_fetch_and_relaxed(i, v); 1467 __atomic_acquire_fence(); 1468 return ret; 1469 #elif defined(arch_atomic_fetch_and) 1470 return arch_atomic_fetch_and(i, v); 1471 #else 1472 #error "Unable to define raw_atomic_fetch_and_acquire" 1473 #endif 1474 } 1475 1476 /** 1477 * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering 1478 * @i: int value 1479 * @v: pointer to atomic_t 1480 * 1481 * Atomically updates @v to (@v & @i) with release ordering. 1482 * 1483 * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere. 1484 * 1485 * Return: The original value of @v. 1486 */ 1487 static __always_inline int 1488 raw_atomic_fetch_and_release(int i, atomic_t *v) 1489 { 1490 #if defined(arch_atomic_fetch_and_release) 1491 return arch_atomic_fetch_and_release(i, v); 1492 #elif defined(arch_atomic_fetch_and_relaxed) 1493 __atomic_release_fence(); 1494 return arch_atomic_fetch_and_relaxed(i, v); 1495 #elif defined(arch_atomic_fetch_and) 1496 return arch_atomic_fetch_and(i, v); 1497 #else 1498 #error "Unable to define raw_atomic_fetch_and_release" 1499 #endif 1500 } 1501 1502 /** 1503 * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering 1504 * @i: int value 1505 * @v: pointer to atomic_t 1506 * 1507 * Atomically updates @v to (@v & @i) with relaxed ordering. 1508 * 1509 * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere. 1510 * 1511 * Return: The original value of @v. 1512 */ 1513 static __always_inline int 1514 raw_atomic_fetch_and_relaxed(int i, atomic_t *v) 1515 { 1516 #if defined(arch_atomic_fetch_and_relaxed) 1517 return arch_atomic_fetch_and_relaxed(i, v); 1518 #elif defined(arch_atomic_fetch_and) 1519 return arch_atomic_fetch_and(i, v); 1520 #else 1521 #error "Unable to define raw_atomic_fetch_and_relaxed" 1522 #endif 1523 } 1524 1525 /** 1526 * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering 1527 * @i: int value 1528 * @v: pointer to atomic_t 1529 * 1530 * Atomically updates @v to (@v & ~@i) with relaxed ordering. 1531 * 1532 * Safe to use in noinstr code; prefer atomic_andnot() elsewhere. 1533 * 1534 * Return: Nothing. 1535 */ 1536 static __always_inline void 1537 raw_atomic_andnot(int i, atomic_t *v) 1538 { 1539 #if defined(arch_atomic_andnot) 1540 arch_atomic_andnot(i, v); 1541 #else 1542 raw_atomic_and(~i, v); 1543 #endif 1544 } 1545 1546 /** 1547 * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering 1548 * @i: int value 1549 * @v: pointer to atomic_t 1550 * 1551 * Atomically updates @v to (@v & ~@i) with full ordering. 1552 * 1553 * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere. 1554 * 1555 * Return: The original value of @v. 1556 */ 1557 static __always_inline int 1558 raw_atomic_fetch_andnot(int i, atomic_t *v) 1559 { 1560 #if defined(arch_atomic_fetch_andnot) 1561 return arch_atomic_fetch_andnot(i, v); 1562 #elif defined(arch_atomic_fetch_andnot_relaxed) 1563 int ret; 1564 __atomic_pre_full_fence(); 1565 ret = arch_atomic_fetch_andnot_relaxed(i, v); 1566 __atomic_post_full_fence(); 1567 return ret; 1568 #else 1569 return raw_atomic_fetch_and(~i, v); 1570 #endif 1571 } 1572 1573 /** 1574 * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering 1575 * @i: int value 1576 * @v: pointer to atomic_t 1577 * 1578 * Atomically updates @v to (@v & ~@i) with acquire ordering. 1579 * 1580 * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere. 1581 * 1582 * Return: The original value of @v. 1583 */ 1584 static __always_inline int 1585 raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) 1586 { 1587 #if defined(arch_atomic_fetch_andnot_acquire) 1588 return arch_atomic_fetch_andnot_acquire(i, v); 1589 #elif defined(arch_atomic_fetch_andnot_relaxed) 1590 int ret = arch_atomic_fetch_andnot_relaxed(i, v); 1591 __atomic_acquire_fence(); 1592 return ret; 1593 #elif defined(arch_atomic_fetch_andnot) 1594 return arch_atomic_fetch_andnot(i, v); 1595 #else 1596 return raw_atomic_fetch_and_acquire(~i, v); 1597 #endif 1598 } 1599 1600 /** 1601 * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering 1602 * @i: int value 1603 * @v: pointer to atomic_t 1604 * 1605 * Atomically updates @v to (@v & ~@i) with release ordering. 1606 * 1607 * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere. 1608 * 1609 * Return: The original value of @v. 1610 */ 1611 static __always_inline int 1612 raw_atomic_fetch_andnot_release(int i, atomic_t *v) 1613 { 1614 #if defined(arch_atomic_fetch_andnot_release) 1615 return arch_atomic_fetch_andnot_release(i, v); 1616 #elif defined(arch_atomic_fetch_andnot_relaxed) 1617 __atomic_release_fence(); 1618 return arch_atomic_fetch_andnot_relaxed(i, v); 1619 #elif defined(arch_atomic_fetch_andnot) 1620 return arch_atomic_fetch_andnot(i, v); 1621 #else 1622 return raw_atomic_fetch_and_release(~i, v); 1623 #endif 1624 } 1625 1626 /** 1627 * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering 1628 * @i: int value 1629 * @v: pointer to atomic_t 1630 * 1631 * Atomically updates @v to (@v & ~@i) with relaxed ordering. 1632 * 1633 * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere. 1634 * 1635 * Return: The original value of @v. 1636 */ 1637 static __always_inline int 1638 raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v) 1639 { 1640 #if defined(arch_atomic_fetch_andnot_relaxed) 1641 return arch_atomic_fetch_andnot_relaxed(i, v); 1642 #elif defined(arch_atomic_fetch_andnot) 1643 return arch_atomic_fetch_andnot(i, v); 1644 #else 1645 return raw_atomic_fetch_and_relaxed(~i, v); 1646 #endif 1647 } 1648 1649 /** 1650 * raw_atomic_or() - atomic bitwise OR with relaxed ordering 1651 * @i: int value 1652 * @v: pointer to atomic_t 1653 * 1654 * Atomically updates @v to (@v | @i) with relaxed ordering. 1655 * 1656 * Safe to use in noinstr code; prefer atomic_or() elsewhere. 1657 * 1658 * Return: Nothing. 1659 */ 1660 static __always_inline void 1661 raw_atomic_or(int i, atomic_t *v) 1662 { 1663 arch_atomic_or(i, v); 1664 } 1665 1666 /** 1667 * raw_atomic_fetch_or() - atomic bitwise OR with full ordering 1668 * @i: int value 1669 * @v: pointer to atomic_t 1670 * 1671 * Atomically updates @v to (@v | @i) with full ordering. 1672 * 1673 * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere. 1674 * 1675 * Return: The original value of @v. 1676 */ 1677 static __always_inline int 1678 raw_atomic_fetch_or(int i, atomic_t *v) 1679 { 1680 #if defined(arch_atomic_fetch_or) 1681 return arch_atomic_fetch_or(i, v); 1682 #elif defined(arch_atomic_fetch_or_relaxed) 1683 int ret; 1684 __atomic_pre_full_fence(); 1685 ret = arch_atomic_fetch_or_relaxed(i, v); 1686 __atomic_post_full_fence(); 1687 return ret; 1688 #else 1689 #error "Unable to define raw_atomic_fetch_or" 1690 #endif 1691 } 1692 1693 /** 1694 * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering 1695 * @i: int value 1696 * @v: pointer to atomic_t 1697 * 1698 * Atomically updates @v to (@v | @i) with acquire ordering. 1699 * 1700 * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere. 1701 * 1702 * Return: The original value of @v. 1703 */ 1704 static __always_inline int 1705 raw_atomic_fetch_or_acquire(int i, atomic_t *v) 1706 { 1707 #if defined(arch_atomic_fetch_or_acquire) 1708 return arch_atomic_fetch_or_acquire(i, v); 1709 #elif defined(arch_atomic_fetch_or_relaxed) 1710 int ret = arch_atomic_fetch_or_relaxed(i, v); 1711 __atomic_acquire_fence(); 1712 return ret; 1713 #elif defined(arch_atomic_fetch_or) 1714 return arch_atomic_fetch_or(i, v); 1715 #else 1716 #error "Unable to define raw_atomic_fetch_or_acquire" 1717 #endif 1718 } 1719 1720 /** 1721 * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering 1722 * @i: int value 1723 * @v: pointer to atomic_t 1724 * 1725 * Atomically updates @v to (@v | @i) with release ordering. 1726 * 1727 * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere. 1728 * 1729 * Return: The original value of @v. 1730 */ 1731 static __always_inline int 1732 raw_atomic_fetch_or_release(int i, atomic_t *v) 1733 { 1734 #if defined(arch_atomic_fetch_or_release) 1735 return arch_atomic_fetch_or_release(i, v); 1736 #elif defined(arch_atomic_fetch_or_relaxed) 1737 __atomic_release_fence(); 1738 return arch_atomic_fetch_or_relaxed(i, v); 1739 #elif defined(arch_atomic_fetch_or) 1740 return arch_atomic_fetch_or(i, v); 1741 #else 1742 #error "Unable to define raw_atomic_fetch_or_release" 1743 #endif 1744 } 1745 1746 /** 1747 * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering 1748 * @i: int value 1749 * @v: pointer to atomic_t 1750 * 1751 * Atomically updates @v to (@v | @i) with relaxed ordering. 1752 * 1753 * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere. 1754 * 1755 * Return: The original value of @v. 1756 */ 1757 static __always_inline int 1758 raw_atomic_fetch_or_relaxed(int i, atomic_t *v) 1759 { 1760 #if defined(arch_atomic_fetch_or_relaxed) 1761 return arch_atomic_fetch_or_relaxed(i, v); 1762 #elif defined(arch_atomic_fetch_or) 1763 return arch_atomic_fetch_or(i, v); 1764 #else 1765 #error "Unable to define raw_atomic_fetch_or_relaxed" 1766 #endif 1767 } 1768 1769 /** 1770 * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering 1771 * @i: int value 1772 * @v: pointer to atomic_t 1773 * 1774 * Atomically updates @v to (@v ^ @i) with relaxed ordering. 1775 * 1776 * Safe to use in noinstr code; prefer atomic_xor() elsewhere. 1777 * 1778 * Return: Nothing. 1779 */ 1780 static __always_inline void 1781 raw_atomic_xor(int i, atomic_t *v) 1782 { 1783 arch_atomic_xor(i, v); 1784 } 1785 1786 /** 1787 * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering 1788 * @i: int value 1789 * @v: pointer to atomic_t 1790 * 1791 * Atomically updates @v to (@v ^ @i) with full ordering. 1792 * 1793 * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere. 1794 * 1795 * Return: The original value of @v. 1796 */ 1797 static __always_inline int 1798 raw_atomic_fetch_xor(int i, atomic_t *v) 1799 { 1800 #if defined(arch_atomic_fetch_xor) 1801 return arch_atomic_fetch_xor(i, v); 1802 #elif defined(arch_atomic_fetch_xor_relaxed) 1803 int ret; 1804 __atomic_pre_full_fence(); 1805 ret = arch_atomic_fetch_xor_relaxed(i, v); 1806 __atomic_post_full_fence(); 1807 return ret; 1808 #else 1809 #error "Unable to define raw_atomic_fetch_xor" 1810 #endif 1811 } 1812 1813 /** 1814 * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering 1815 * @i: int value 1816 * @v: pointer to atomic_t 1817 * 1818 * Atomically updates @v to (@v ^ @i) with acquire ordering. 1819 * 1820 * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere. 1821 * 1822 * Return: The original value of @v. 1823 */ 1824 static __always_inline int 1825 raw_atomic_fetch_xor_acquire(int i, atomic_t *v) 1826 { 1827 #if defined(arch_atomic_fetch_xor_acquire) 1828 return arch_atomic_fetch_xor_acquire(i, v); 1829 #elif defined(arch_atomic_fetch_xor_relaxed) 1830 int ret = arch_atomic_fetch_xor_relaxed(i, v); 1831 __atomic_acquire_fence(); 1832 return ret; 1833 #elif defined(arch_atomic_fetch_xor) 1834 return arch_atomic_fetch_xor(i, v); 1835 #else 1836 #error "Unable to define raw_atomic_fetch_xor_acquire" 1837 #endif 1838 } 1839 1840 /** 1841 * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering 1842 * @i: int value 1843 * @v: pointer to atomic_t 1844 * 1845 * Atomically updates @v to (@v ^ @i) with release ordering. 1846 * 1847 * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere. 1848 * 1849 * Return: The original value of @v. 1850 */ 1851 static __always_inline int 1852 raw_atomic_fetch_xor_release(int i, atomic_t *v) 1853 { 1854 #if defined(arch_atomic_fetch_xor_release) 1855 return arch_atomic_fetch_xor_release(i, v); 1856 #elif defined(arch_atomic_fetch_xor_relaxed) 1857 __atomic_release_fence(); 1858 return arch_atomic_fetch_xor_relaxed(i, v); 1859 #elif defined(arch_atomic_fetch_xor) 1860 return arch_atomic_fetch_xor(i, v); 1861 #else 1862 #error "Unable to define raw_atomic_fetch_xor_release" 1863 #endif 1864 } 1865 1866 /** 1867 * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering 1868 * @i: int value 1869 * @v: pointer to atomic_t 1870 * 1871 * Atomically updates @v to (@v ^ @i) with relaxed ordering. 1872 * 1873 * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere. 1874 * 1875 * Return: The original value of @v. 1876 */ 1877 static __always_inline int 1878 raw_atomic_fetch_xor_relaxed(int i, atomic_t *v) 1879 { 1880 #if defined(arch_atomic_fetch_xor_relaxed) 1881 return arch_atomic_fetch_xor_relaxed(i, v); 1882 #elif defined(arch_atomic_fetch_xor) 1883 return arch_atomic_fetch_xor(i, v); 1884 #else 1885 #error "Unable to define raw_atomic_fetch_xor_relaxed" 1886 #endif 1887 } 1888 1889 /** 1890 * raw_atomic_xchg() - atomic exchange with full ordering 1891 * @v: pointer to atomic_t 1892 * @new: int value to assign 1893 * 1894 * Atomically updates @v to @new with full ordering. 1895 * 1896 * Safe to use in noinstr code; prefer atomic_xchg() elsewhere. 1897 * 1898 * Return: The original value of @v. 1899 */ 1900 static __always_inline int 1901 raw_atomic_xchg(atomic_t *v, int new) 1902 { 1903 #if defined(arch_atomic_xchg) 1904 return arch_atomic_xchg(v, new); 1905 #elif defined(arch_atomic_xchg_relaxed) 1906 int ret; 1907 __atomic_pre_full_fence(); 1908 ret = arch_atomic_xchg_relaxed(v, new); 1909 __atomic_post_full_fence(); 1910 return ret; 1911 #else 1912 return raw_xchg(&v->counter, new); 1913 #endif 1914 } 1915 1916 /** 1917 * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering 1918 * @v: pointer to atomic_t 1919 * @new: int value to assign 1920 * 1921 * Atomically updates @v to @new with acquire ordering. 1922 * 1923 * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere. 1924 * 1925 * Return: The original value of @v. 1926 */ 1927 static __always_inline int 1928 raw_atomic_xchg_acquire(atomic_t *v, int new) 1929 { 1930 #if defined(arch_atomic_xchg_acquire) 1931 return arch_atomic_xchg_acquire(v, new); 1932 #elif defined(arch_atomic_xchg_relaxed) 1933 int ret = arch_atomic_xchg_relaxed(v, new); 1934 __atomic_acquire_fence(); 1935 return ret; 1936 #elif defined(arch_atomic_xchg) 1937 return arch_atomic_xchg(v, new); 1938 #else 1939 return raw_xchg_acquire(&v->counter, new); 1940 #endif 1941 } 1942 1943 /** 1944 * raw_atomic_xchg_release() - atomic exchange with release ordering 1945 * @v: pointer to atomic_t 1946 * @new: int value to assign 1947 * 1948 * Atomically updates @v to @new with release ordering. 1949 * 1950 * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere. 1951 * 1952 * Return: The original value of @v. 1953 */ 1954 static __always_inline int 1955 raw_atomic_xchg_release(atomic_t *v, int new) 1956 { 1957 #if defined(arch_atomic_xchg_release) 1958 return arch_atomic_xchg_release(v, new); 1959 #elif defined(arch_atomic_xchg_relaxed) 1960 __atomic_release_fence(); 1961 return arch_atomic_xchg_relaxed(v, new); 1962 #elif defined(arch_atomic_xchg) 1963 return arch_atomic_xchg(v, new); 1964 #else 1965 return raw_xchg_release(&v->counter, new); 1966 #endif 1967 } 1968 1969 /** 1970 * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering 1971 * @v: pointer to atomic_t 1972 * @new: int value to assign 1973 * 1974 * Atomically updates @v to @new with relaxed ordering. 1975 * 1976 * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere. 1977 * 1978 * Return: The original value of @v. 1979 */ 1980 static __always_inline int 1981 raw_atomic_xchg_relaxed(atomic_t *v, int new) 1982 { 1983 #if defined(arch_atomic_xchg_relaxed) 1984 return arch_atomic_xchg_relaxed(v, new); 1985 #elif defined(arch_atomic_xchg) 1986 return arch_atomic_xchg(v, new); 1987 #else 1988 return raw_xchg_relaxed(&v->counter, new); 1989 #endif 1990 } 1991 1992 /** 1993 * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering 1994 * @v: pointer to atomic_t 1995 * @old: int value to compare with 1996 * @new: int value to assign 1997 * 1998 * If (@v == @old), atomically updates @v to @new with full ordering. 1999 * 2000 * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere. 2001 * 2002 * Return: The original value of @v. 2003 */ 2004 static __always_inline int 2005 raw_atomic_cmpxchg(atomic_t *v, int old, int new) 2006 { 2007 #if defined(arch_atomic_cmpxchg) 2008 return arch_atomic_cmpxchg(v, old, new); 2009 #elif defined(arch_atomic_cmpxchg_relaxed) 2010 int ret; 2011 __atomic_pre_full_fence(); 2012 ret = arch_atomic_cmpxchg_relaxed(v, old, new); 2013 __atomic_post_full_fence(); 2014 return ret; 2015 #else 2016 return raw_cmpxchg(&v->counter, old, new); 2017 #endif 2018 } 2019 2020 /** 2021 * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering 2022 * @v: pointer to atomic_t 2023 * @old: int value to compare with 2024 * @new: int value to assign 2025 * 2026 * If (@v == @old), atomically updates @v to @new with acquire ordering. 2027 * 2028 * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere. 2029 * 2030 * Return: The original value of @v. 2031 */ 2032 static __always_inline int 2033 raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) 2034 { 2035 #if defined(arch_atomic_cmpxchg_acquire) 2036 return arch_atomic_cmpxchg_acquire(v, old, new); 2037 #elif defined(arch_atomic_cmpxchg_relaxed) 2038 int ret = arch_atomic_cmpxchg_relaxed(v, old, new); 2039 __atomic_acquire_fence(); 2040 return ret; 2041 #elif defined(arch_atomic_cmpxchg) 2042 return arch_atomic_cmpxchg(v, old, new); 2043 #else 2044 return raw_cmpxchg_acquire(&v->counter, old, new); 2045 #endif 2046 } 2047 2048 /** 2049 * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering 2050 * @v: pointer to atomic_t 2051 * @old: int value to compare with 2052 * @new: int value to assign 2053 * 2054 * If (@v == @old), atomically updates @v to @new with release ordering. 2055 * 2056 * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere. 2057 * 2058 * Return: The original value of @v. 2059 */ 2060 static __always_inline int 2061 raw_atomic_cmpxchg_release(atomic_t *v, int old, int new) 2062 { 2063 #if defined(arch_atomic_cmpxchg_release) 2064 return arch_atomic_cmpxchg_release(v, old, new); 2065 #elif defined(arch_atomic_cmpxchg_relaxed) 2066 __atomic_release_fence(); 2067 return arch_atomic_cmpxchg_relaxed(v, old, new); 2068 #elif defined(arch_atomic_cmpxchg) 2069 return arch_atomic_cmpxchg(v, old, new); 2070 #else 2071 return raw_cmpxchg_release(&v->counter, old, new); 2072 #endif 2073 } 2074 2075 /** 2076 * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering 2077 * @v: pointer to atomic_t 2078 * @old: int value to compare with 2079 * @new: int value to assign 2080 * 2081 * If (@v == @old), atomically updates @v to @new with relaxed ordering. 2082 * 2083 * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere. 2084 * 2085 * Return: The original value of @v. 2086 */ 2087 static __always_inline int 2088 raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) 2089 { 2090 #if defined(arch_atomic_cmpxchg_relaxed) 2091 return arch_atomic_cmpxchg_relaxed(v, old, new); 2092 #elif defined(arch_atomic_cmpxchg) 2093 return arch_atomic_cmpxchg(v, old, new); 2094 #else 2095 return raw_cmpxchg_relaxed(&v->counter, old, new); 2096 #endif 2097 } 2098 2099 /** 2100 * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering 2101 * @v: pointer to atomic_t 2102 * @old: pointer to int value to compare with 2103 * @new: int value to assign 2104 * 2105 * If (@v == @old), atomically updates @v to @new with full ordering. 2106 * Otherwise, updates @old to the current value of @v. 2107 * 2108 * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere. 2109 * 2110 * Return: @true if the exchange occured, @false otherwise. 2111 */ 2112 static __always_inline bool 2113 raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new) 2114 { 2115 #if defined(arch_atomic_try_cmpxchg) 2116 return arch_atomic_try_cmpxchg(v, old, new); 2117 #elif defined(arch_atomic_try_cmpxchg_relaxed) 2118 bool ret; 2119 __atomic_pre_full_fence(); 2120 ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); 2121 __atomic_post_full_fence(); 2122 return ret; 2123 #else 2124 int r, o = *old; 2125 r = raw_atomic_cmpxchg(v, o, new); 2126 if (unlikely(r != o)) 2127 *old = r; 2128 return likely(r == o); 2129 #endif 2130 } 2131 2132 /** 2133 * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering 2134 * @v: pointer to atomic_t 2135 * @old: pointer to int value to compare with 2136 * @new: int value to assign 2137 * 2138 * If (@v == @old), atomically updates @v to @new with acquire ordering. 2139 * Otherwise, updates @old to the current value of @v. 2140 * 2141 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere. 2142 * 2143 * Return: @true if the exchange occured, @false otherwise. 2144 */ 2145 static __always_inline bool 2146 raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) 2147 { 2148 #if defined(arch_atomic_try_cmpxchg_acquire) 2149 return arch_atomic_try_cmpxchg_acquire(v, old, new); 2150 #elif defined(arch_atomic_try_cmpxchg_relaxed) 2151 bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); 2152 __atomic_acquire_fence(); 2153 return ret; 2154 #elif defined(arch_atomic_try_cmpxchg) 2155 return arch_atomic_try_cmpxchg(v, old, new); 2156 #else 2157 int r, o = *old; 2158 r = raw_atomic_cmpxchg_acquire(v, o, new); 2159 if (unlikely(r != o)) 2160 *old = r; 2161 return likely(r == o); 2162 #endif 2163 } 2164 2165 /** 2166 * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering 2167 * @v: pointer to atomic_t 2168 * @old: pointer to int value to compare with 2169 * @new: int value to assign 2170 * 2171 * If (@v == @old), atomically updates @v to @new with release ordering. 2172 * Otherwise, updates @old to the current value of @v. 2173 * 2174 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere. 2175 * 2176 * Return: @true if the exchange occured, @false otherwise. 2177 */ 2178 static __always_inline bool 2179 raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) 2180 { 2181 #if defined(arch_atomic_try_cmpxchg_release) 2182 return arch_atomic_try_cmpxchg_release(v, old, new); 2183 #elif defined(arch_atomic_try_cmpxchg_relaxed) 2184 __atomic_release_fence(); 2185 return arch_atomic_try_cmpxchg_relaxed(v, old, new); 2186 #elif defined(arch_atomic_try_cmpxchg) 2187 return arch_atomic_try_cmpxchg(v, old, new); 2188 #else 2189 int r, o = *old; 2190 r = raw_atomic_cmpxchg_release(v, o, new); 2191 if (unlikely(r != o)) 2192 *old = r; 2193 return likely(r == o); 2194 #endif 2195 } 2196 2197 /** 2198 * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering 2199 * @v: pointer to atomic_t 2200 * @old: pointer to int value to compare with 2201 * @new: int value to assign 2202 * 2203 * If (@v == @old), atomically updates @v to @new with relaxed ordering. 2204 * Otherwise, updates @old to the current value of @v. 2205 * 2206 * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere. 2207 * 2208 * Return: @true if the exchange occured, @false otherwise. 2209 */ 2210 static __always_inline bool 2211 raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) 2212 { 2213 #if defined(arch_atomic_try_cmpxchg_relaxed) 2214 return arch_atomic_try_cmpxchg_relaxed(v, old, new); 2215 #elif defined(arch_atomic_try_cmpxchg) 2216 return arch_atomic_try_cmpxchg(v, old, new); 2217 #else 2218 int r, o = *old; 2219 r = raw_atomic_cmpxchg_relaxed(v, o, new); 2220 if (unlikely(r != o)) 2221 *old = r; 2222 return likely(r == o); 2223 #endif 2224 } 2225 2226 /** 2227 * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering 2228 * @i: int value to add 2229 * @v: pointer to atomic_t 2230 * 2231 * Atomically updates @v to (@v - @i) with full ordering. 2232 * 2233 * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere. 2234 * 2235 * Return: @true if the resulting value of @v is zero, @false otherwise. 2236 */ 2237 static __always_inline bool 2238 raw_atomic_sub_and_test(int i, atomic_t *v) 2239 { 2240 #if defined(arch_atomic_sub_and_test) 2241 return arch_atomic_sub_and_test(i, v); 2242 #else 2243 return raw_atomic_sub_return(i, v) == 0; 2244 #endif 2245 } 2246 2247 /** 2248 * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering 2249 * @v: pointer to atomic_t 2250 * 2251 * Atomically updates @v to (@v - 1) with full ordering. 2252 * 2253 * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere. 2254 * 2255 * Return: @true if the resulting value of @v is zero, @false otherwise. 2256 */ 2257 static __always_inline bool 2258 raw_atomic_dec_and_test(atomic_t *v) 2259 { 2260 #if defined(arch_atomic_dec_and_test) 2261 return arch_atomic_dec_and_test(v); 2262 #else 2263 return raw_atomic_dec_return(v) == 0; 2264 #endif 2265 } 2266 2267 /** 2268 * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering 2269 * @v: pointer to atomic_t 2270 * 2271 * Atomically updates @v to (@v + 1) with full ordering. 2272 * 2273 * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere. 2274 * 2275 * Return: @true if the resulting value of @v is zero, @false otherwise. 2276 */ 2277 static __always_inline bool 2278 raw_atomic_inc_and_test(atomic_t *v) 2279 { 2280 #if defined(arch_atomic_inc_and_test) 2281 return arch_atomic_inc_and_test(v); 2282 #else 2283 return raw_atomic_inc_return(v) == 0; 2284 #endif 2285 } 2286 2287 /** 2288 * raw_atomic_add_negative() - atomic add and test if negative with full ordering 2289 * @i: int value to add 2290 * @v: pointer to atomic_t 2291 * 2292 * Atomically updates @v to (@v + @i) with full ordering. 2293 * 2294 * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere. 2295 * 2296 * Return: @true if the resulting value of @v is negative, @false otherwise. 2297 */ 2298 static __always_inline bool 2299 raw_atomic_add_negative(int i, atomic_t *v) 2300 { 2301 #if defined(arch_atomic_add_negative) 2302 return arch_atomic_add_negative(i, v); 2303 #elif defined(arch_atomic_add_negative_relaxed) 2304 bool ret; 2305 __atomic_pre_full_fence(); 2306 ret = arch_atomic_add_negative_relaxed(i, v); 2307 __atomic_post_full_fence(); 2308 return ret; 2309 #else 2310 return raw_atomic_add_return(i, v) < 0; 2311 #endif 2312 } 2313 2314 /** 2315 * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering 2316 * @i: int value to add 2317 * @v: pointer to atomic_t 2318 * 2319 * Atomically updates @v to (@v + @i) with acquire ordering. 2320 * 2321 * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere. 2322 * 2323 * Return: @true if the resulting value of @v is negative, @false otherwise. 2324 */ 2325 static __always_inline bool 2326 raw_atomic_add_negative_acquire(int i, atomic_t *v) 2327 { 2328 #if defined(arch_atomic_add_negative_acquire) 2329 return arch_atomic_add_negative_acquire(i, v); 2330 #elif defined(arch_atomic_add_negative_relaxed) 2331 bool ret = arch_atomic_add_negative_relaxed(i, v); 2332 __atomic_acquire_fence(); 2333 return ret; 2334 #elif defined(arch_atomic_add_negative) 2335 return arch_atomic_add_negative(i, v); 2336 #else 2337 return raw_atomic_add_return_acquire(i, v) < 0; 2338 #endif 2339 } 2340 2341 /** 2342 * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering 2343 * @i: int value to add 2344 * @v: pointer to atomic_t 2345 * 2346 * Atomically updates @v to (@v + @i) with release ordering. 2347 * 2348 * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere. 2349 * 2350 * Return: @true if the resulting value of @v is negative, @false otherwise. 2351 */ 2352 static __always_inline bool 2353 raw_atomic_add_negative_release(int i, atomic_t *v) 2354 { 2355 #if defined(arch_atomic_add_negative_release) 2356 return arch_atomic_add_negative_release(i, v); 2357 #elif defined(arch_atomic_add_negative_relaxed) 2358 __atomic_release_fence(); 2359 return arch_atomic_add_negative_relaxed(i, v); 2360 #elif defined(arch_atomic_add_negative) 2361 return arch_atomic_add_negative(i, v); 2362 #else 2363 return raw_atomic_add_return_release(i, v) < 0; 2364 #endif 2365 } 2366 2367 /** 2368 * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering 2369 * @i: int value to add 2370 * @v: pointer to atomic_t 2371 * 2372 * Atomically updates @v to (@v + @i) with relaxed ordering. 2373 * 2374 * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere. 2375 * 2376 * Return: @true if the resulting value of @v is negative, @false otherwise. 2377 */ 2378 static __always_inline bool 2379 raw_atomic_add_negative_relaxed(int i, atomic_t *v) 2380 { 2381 #if defined(arch_atomic_add_negative_relaxed) 2382 return arch_atomic_add_negative_relaxed(i, v); 2383 #elif defined(arch_atomic_add_negative) 2384 return arch_atomic_add_negative(i, v); 2385 #else 2386 return raw_atomic_add_return_relaxed(i, v) < 0; 2387 #endif 2388 } 2389 2390 /** 2391 * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering 2392 * @v: pointer to atomic_t 2393 * @a: int value to add 2394 * @u: int value to compare with 2395 * 2396 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. 2397 * 2398 * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere. 2399 * 2400 * Return: The original value of @v. 2401 */ 2402 static __always_inline int 2403 raw_atomic_fetch_add_unless(atomic_t *v, int a, int u) 2404 { 2405 #if defined(arch_atomic_fetch_add_unless) 2406 return arch_atomic_fetch_add_unless(v, a, u); 2407 #else 2408 int c = raw_atomic_read(v); 2409 2410 do { 2411 if (unlikely(c == u)) 2412 break; 2413 } while (!raw_atomic_try_cmpxchg(v, &c, c + a)); 2414 2415 return c; 2416 #endif 2417 } 2418 2419 /** 2420 * raw_atomic_add_unless() - atomic add unless value with full ordering 2421 * @v: pointer to atomic_t 2422 * @a: int value to add 2423 * @u: int value to compare with 2424 * 2425 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. 2426 * 2427 * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere. 2428 * 2429 * Return: @true if @v was updated, @false otherwise. 2430 */ 2431 static __always_inline bool 2432 raw_atomic_add_unless(atomic_t *v, int a, int u) 2433 { 2434 #if defined(arch_atomic_add_unless) 2435 return arch_atomic_add_unless(v, a, u); 2436 #else 2437 return raw_atomic_fetch_add_unless(v, a, u) != u; 2438 #endif 2439 } 2440 2441 /** 2442 * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering 2443 * @v: pointer to atomic_t 2444 * 2445 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. 2446 * 2447 * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere. 2448 * 2449 * Return: @true if @v was updated, @false otherwise. 2450 */ 2451 static __always_inline bool 2452 raw_atomic_inc_not_zero(atomic_t *v) 2453 { 2454 #if defined(arch_atomic_inc_not_zero) 2455 return arch_atomic_inc_not_zero(v); 2456 #else 2457 return raw_atomic_add_unless(v, 1, 0); 2458 #endif 2459 } 2460 2461 /** 2462 * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering 2463 * @v: pointer to atomic_t 2464 * 2465 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. 2466 * 2467 * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere. 2468 * 2469 * Return: @true if @v was updated, @false otherwise. 2470 */ 2471 static __always_inline bool 2472 raw_atomic_inc_unless_negative(atomic_t *v) 2473 { 2474 #if defined(arch_atomic_inc_unless_negative) 2475 return arch_atomic_inc_unless_negative(v); 2476 #else 2477 int c = raw_atomic_read(v); 2478 2479 do { 2480 if (unlikely(c < 0)) 2481 return false; 2482 } while (!raw_atomic_try_cmpxchg(v, &c, c + 1)); 2483 2484 return true; 2485 #endif 2486 } 2487 2488 /** 2489 * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering 2490 * @v: pointer to atomic_t 2491 * 2492 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. 2493 * 2494 * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere. 2495 * 2496 * Return: @true if @v was updated, @false otherwise. 2497 */ 2498 static __always_inline bool 2499 raw_atomic_dec_unless_positive(atomic_t *v) 2500 { 2501 #if defined(arch_atomic_dec_unless_positive) 2502 return arch_atomic_dec_unless_positive(v); 2503 #else 2504 int c = raw_atomic_read(v); 2505 2506 do { 2507 if (unlikely(c > 0)) 2508 return false; 2509 } while (!raw_atomic_try_cmpxchg(v, &c, c - 1)); 2510 2511 return true; 2512 #endif 2513 } 2514 2515 /** 2516 * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering 2517 * @v: pointer to atomic_t 2518 * 2519 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. 2520 * 2521 * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere. 2522 * 2523 * Return: The old value of (@v - 1), regardless of whether @v was updated. 2524 */ 2525 static __always_inline int 2526 raw_atomic_dec_if_positive(atomic_t *v) 2527 { 2528 #if defined(arch_atomic_dec_if_positive) 2529 return arch_atomic_dec_if_positive(v); 2530 #else 2531 int dec, c = raw_atomic_read(v); 2532 2533 do { 2534 dec = c - 1; 2535 if (unlikely(dec < 0)) 2536 break; 2537 } while (!raw_atomic_try_cmpxchg(v, &c, dec)); 2538 2539 return dec; 2540 #endif 2541 } 2542 2543 #ifdef CONFIG_GENERIC_ATOMIC64 2544 #include <asm-generic/atomic64.h> 2545 #endif 2546 2547 /** 2548 * raw_atomic64_read() - atomic load with relaxed ordering 2549 * @v: pointer to atomic64_t 2550 * 2551 * Atomically loads the value of @v with relaxed ordering. 2552 * 2553 * Safe to use in noinstr code; prefer atomic64_read() elsewhere. 2554 * 2555 * Return: The value loaded from @v. 2556 */ 2557 static __always_inline s64 2558 raw_atomic64_read(const atomic64_t *v) 2559 { 2560 return arch_atomic64_read(v); 2561 } 2562 2563 /** 2564 * raw_atomic64_read_acquire() - atomic load with acquire ordering 2565 * @v: pointer to atomic64_t 2566 * 2567 * Atomically loads the value of @v with acquire ordering. 2568 * 2569 * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere. 2570 * 2571 * Return: The value loaded from @v. 2572 */ 2573 static __always_inline s64 2574 raw_atomic64_read_acquire(const atomic64_t *v) 2575 { 2576 #if defined(arch_atomic64_read_acquire) 2577 return arch_atomic64_read_acquire(v); 2578 #elif defined(arch_atomic64_read) 2579 return arch_atomic64_read(v); 2580 #else 2581 s64 ret; 2582 2583 if (__native_word(atomic64_t)) { 2584 ret = smp_load_acquire(&(v)->counter); 2585 } else { 2586 ret = raw_atomic64_read(v); 2587 __atomic_acquire_fence(); 2588 } 2589 2590 return ret; 2591 #endif 2592 } 2593 2594 /** 2595 * raw_atomic64_set() - atomic set with relaxed ordering 2596 * @v: pointer to atomic64_t 2597 * @i: s64 value to assign 2598 * 2599 * Atomically sets @v to @i with relaxed ordering. 2600 * 2601 * Safe to use in noinstr code; prefer atomic64_set() elsewhere. 2602 * 2603 * Return: Nothing. 2604 */ 2605 static __always_inline void 2606 raw_atomic64_set(atomic64_t *v, s64 i) 2607 { 2608 arch_atomic64_set(v, i); 2609 } 2610 2611 /** 2612 * raw_atomic64_set_release() - atomic set with release ordering 2613 * @v: pointer to atomic64_t 2614 * @i: s64 value to assign 2615 * 2616 * Atomically sets @v to @i with release ordering. 2617 * 2618 * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere. 2619 * 2620 * Return: Nothing. 2621 */ 2622 static __always_inline void 2623 raw_atomic64_set_release(atomic64_t *v, s64 i) 2624 { 2625 #if defined(arch_atomic64_set_release) 2626 arch_atomic64_set_release(v, i); 2627 #elif defined(arch_atomic64_set) 2628 arch_atomic64_set(v, i); 2629 #else 2630 if (__native_word(atomic64_t)) { 2631 smp_store_release(&(v)->counter, i); 2632 } else { 2633 __atomic_release_fence(); 2634 raw_atomic64_set(v, i); 2635 } 2636 #endif 2637 } 2638 2639 /** 2640 * raw_atomic64_add() - atomic add with relaxed ordering 2641 * @i: s64 value to add 2642 * @v: pointer to atomic64_t 2643 * 2644 * Atomically updates @v to (@v + @i) with relaxed ordering. 2645 * 2646 * Safe to use in noinstr code; prefer atomic64_add() elsewhere. 2647 * 2648 * Return: Nothing. 2649 */ 2650 static __always_inline void 2651 raw_atomic64_add(s64 i, atomic64_t *v) 2652 { 2653 arch_atomic64_add(i, v); 2654 } 2655 2656 /** 2657 * raw_atomic64_add_return() - atomic add with full ordering 2658 * @i: s64 value to add 2659 * @v: pointer to atomic64_t 2660 * 2661 * Atomically updates @v to (@v + @i) with full ordering. 2662 * 2663 * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere. 2664 * 2665 * Return: The updated value of @v. 2666 */ 2667 static __always_inline s64 2668 raw_atomic64_add_return(s64 i, atomic64_t *v) 2669 { 2670 #if defined(arch_atomic64_add_return) 2671 return arch_atomic64_add_return(i, v); 2672 #elif defined(arch_atomic64_add_return_relaxed) 2673 s64 ret; 2674 __atomic_pre_full_fence(); 2675 ret = arch_atomic64_add_return_relaxed(i, v); 2676 __atomic_post_full_fence(); 2677 return ret; 2678 #else 2679 #error "Unable to define raw_atomic64_add_return" 2680 #endif 2681 } 2682 2683 /** 2684 * raw_atomic64_add_return_acquire() - atomic add with acquire ordering 2685 * @i: s64 value to add 2686 * @v: pointer to atomic64_t 2687 * 2688 * Atomically updates @v to (@v + @i) with acquire ordering. 2689 * 2690 * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere. 2691 * 2692 * Return: The updated value of @v. 2693 */ 2694 static __always_inline s64 2695 raw_atomic64_add_return_acquire(s64 i, atomic64_t *v) 2696 { 2697 #if defined(arch_atomic64_add_return_acquire) 2698 return arch_atomic64_add_return_acquire(i, v); 2699 #elif defined(arch_atomic64_add_return_relaxed) 2700 s64 ret = arch_atomic64_add_return_relaxed(i, v); 2701 __atomic_acquire_fence(); 2702 return ret; 2703 #elif defined(arch_atomic64_add_return) 2704 return arch_atomic64_add_return(i, v); 2705 #else 2706 #error "Unable to define raw_atomic64_add_return_acquire" 2707 #endif 2708 } 2709 2710 /** 2711 * raw_atomic64_add_return_release() - atomic add with release ordering 2712 * @i: s64 value to add 2713 * @v: pointer to atomic64_t 2714 * 2715 * Atomically updates @v to (@v + @i) with release ordering. 2716 * 2717 * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere. 2718 * 2719 * Return: The updated value of @v. 2720 */ 2721 static __always_inline s64 2722 raw_atomic64_add_return_release(s64 i, atomic64_t *v) 2723 { 2724 #if defined(arch_atomic64_add_return_release) 2725 return arch_atomic64_add_return_release(i, v); 2726 #elif defined(arch_atomic64_add_return_relaxed) 2727 __atomic_release_fence(); 2728 return arch_atomic64_add_return_relaxed(i, v); 2729 #elif defined(arch_atomic64_add_return) 2730 return arch_atomic64_add_return(i, v); 2731 #else 2732 #error "Unable to define raw_atomic64_add_return_release" 2733 #endif 2734 } 2735 2736 /** 2737 * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering 2738 * @i: s64 value to add 2739 * @v: pointer to atomic64_t 2740 * 2741 * Atomically updates @v to (@v + @i) with relaxed ordering. 2742 * 2743 * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere. 2744 * 2745 * Return: The updated value of @v. 2746 */ 2747 static __always_inline s64 2748 raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v) 2749 { 2750 #if defined(arch_atomic64_add_return_relaxed) 2751 return arch_atomic64_add_return_relaxed(i, v); 2752 #elif defined(arch_atomic64_add_return) 2753 return arch_atomic64_add_return(i, v); 2754 #else 2755 #error "Unable to define raw_atomic64_add_return_relaxed" 2756 #endif 2757 } 2758 2759 /** 2760 * raw_atomic64_fetch_add() - atomic add with full ordering 2761 * @i: s64 value to add 2762 * @v: pointer to atomic64_t 2763 * 2764 * Atomically updates @v to (@v + @i) with full ordering. 2765 * 2766 * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere. 2767 * 2768 * Return: The original value of @v. 2769 */ 2770 static __always_inline s64 2771 raw_atomic64_fetch_add(s64 i, atomic64_t *v) 2772 { 2773 #if defined(arch_atomic64_fetch_add) 2774 return arch_atomic64_fetch_add(i, v); 2775 #elif defined(arch_atomic64_fetch_add_relaxed) 2776 s64 ret; 2777 __atomic_pre_full_fence(); 2778 ret = arch_atomic64_fetch_add_relaxed(i, v); 2779 __atomic_post_full_fence(); 2780 return ret; 2781 #else 2782 #error "Unable to define raw_atomic64_fetch_add" 2783 #endif 2784 } 2785 2786 /** 2787 * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering 2788 * @i: s64 value to add 2789 * @v: pointer to atomic64_t 2790 * 2791 * Atomically updates @v to (@v + @i) with acquire ordering. 2792 * 2793 * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere. 2794 * 2795 * Return: The original value of @v. 2796 */ 2797 static __always_inline s64 2798 raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) 2799 { 2800 #if defined(arch_atomic64_fetch_add_acquire) 2801 return arch_atomic64_fetch_add_acquire(i, v); 2802 #elif defined(arch_atomic64_fetch_add_relaxed) 2803 s64 ret = arch_atomic64_fetch_add_relaxed(i, v); 2804 __atomic_acquire_fence(); 2805 return ret; 2806 #elif defined(arch_atomic64_fetch_add) 2807 return arch_atomic64_fetch_add(i, v); 2808 #else 2809 #error "Unable to define raw_atomic64_fetch_add_acquire" 2810 #endif 2811 } 2812 2813 /** 2814 * raw_atomic64_fetch_add_release() - atomic add with release ordering 2815 * @i: s64 value to add 2816 * @v: pointer to atomic64_t 2817 * 2818 * Atomically updates @v to (@v + @i) with release ordering. 2819 * 2820 * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere. 2821 * 2822 * Return: The original value of @v. 2823 */ 2824 static __always_inline s64 2825 raw_atomic64_fetch_add_release(s64 i, atomic64_t *v) 2826 { 2827 #if defined(arch_atomic64_fetch_add_release) 2828 return arch_atomic64_fetch_add_release(i, v); 2829 #elif defined(arch_atomic64_fetch_add_relaxed) 2830 __atomic_release_fence(); 2831 return arch_atomic64_fetch_add_relaxed(i, v); 2832 #elif defined(arch_atomic64_fetch_add) 2833 return arch_atomic64_fetch_add(i, v); 2834 #else 2835 #error "Unable to define raw_atomic64_fetch_add_release" 2836 #endif 2837 } 2838 2839 /** 2840 * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering 2841 * @i: s64 value to add 2842 * @v: pointer to atomic64_t 2843 * 2844 * Atomically updates @v to (@v + @i) with relaxed ordering. 2845 * 2846 * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere. 2847 * 2848 * Return: The original value of @v. 2849 */ 2850 static __always_inline s64 2851 raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) 2852 { 2853 #if defined(arch_atomic64_fetch_add_relaxed) 2854 return arch_atomic64_fetch_add_relaxed(i, v); 2855 #elif defined(arch_atomic64_fetch_add) 2856 return arch_atomic64_fetch_add(i, v); 2857 #else 2858 #error "Unable to define raw_atomic64_fetch_add_relaxed" 2859 #endif 2860 } 2861 2862 /** 2863 * raw_atomic64_sub() - atomic subtract with relaxed ordering 2864 * @i: s64 value to subtract 2865 * @v: pointer to atomic64_t 2866 * 2867 * Atomically updates @v to (@v - @i) with relaxed ordering. 2868 * 2869 * Safe to use in noinstr code; prefer atomic64_sub() elsewhere. 2870 * 2871 * Return: Nothing. 2872 */ 2873 static __always_inline void 2874 raw_atomic64_sub(s64 i, atomic64_t *v) 2875 { 2876 arch_atomic64_sub(i, v); 2877 } 2878 2879 /** 2880 * raw_atomic64_sub_return() - atomic subtract with full ordering 2881 * @i: s64 value to subtract 2882 * @v: pointer to atomic64_t 2883 * 2884 * Atomically updates @v to (@v - @i) with full ordering. 2885 * 2886 * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere. 2887 * 2888 * Return: The updated value of @v. 2889 */ 2890 static __always_inline s64 2891 raw_atomic64_sub_return(s64 i, atomic64_t *v) 2892 { 2893 #if defined(arch_atomic64_sub_return) 2894 return arch_atomic64_sub_return(i, v); 2895 #elif defined(arch_atomic64_sub_return_relaxed) 2896 s64 ret; 2897 __atomic_pre_full_fence(); 2898 ret = arch_atomic64_sub_return_relaxed(i, v); 2899 __atomic_post_full_fence(); 2900 return ret; 2901 #else 2902 #error "Unable to define raw_atomic64_sub_return" 2903 #endif 2904 } 2905 2906 /** 2907 * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering 2908 * @i: s64 value to subtract 2909 * @v: pointer to atomic64_t 2910 * 2911 * Atomically updates @v to (@v - @i) with acquire ordering. 2912 * 2913 * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere. 2914 * 2915 * Return: The updated value of @v. 2916 */ 2917 static __always_inline s64 2918 raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v) 2919 { 2920 #if defined(arch_atomic64_sub_return_acquire) 2921 return arch_atomic64_sub_return_acquire(i, v); 2922 #elif defined(arch_atomic64_sub_return_relaxed) 2923 s64 ret = arch_atomic64_sub_return_relaxed(i, v); 2924 __atomic_acquire_fence(); 2925 return ret; 2926 #elif defined(arch_atomic64_sub_return) 2927 return arch_atomic64_sub_return(i, v); 2928 #else 2929 #error "Unable to define raw_atomic64_sub_return_acquire" 2930 #endif 2931 } 2932 2933 /** 2934 * raw_atomic64_sub_return_release() - atomic subtract with release ordering 2935 * @i: s64 value to subtract 2936 * @v: pointer to atomic64_t 2937 * 2938 * Atomically updates @v to (@v - @i) with release ordering. 2939 * 2940 * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere. 2941 * 2942 * Return: The updated value of @v. 2943 */ 2944 static __always_inline s64 2945 raw_atomic64_sub_return_release(s64 i, atomic64_t *v) 2946 { 2947 #if defined(arch_atomic64_sub_return_release) 2948 return arch_atomic64_sub_return_release(i, v); 2949 #elif defined(arch_atomic64_sub_return_relaxed) 2950 __atomic_release_fence(); 2951 return arch_atomic64_sub_return_relaxed(i, v); 2952 #elif defined(arch_atomic64_sub_return) 2953 return arch_atomic64_sub_return(i, v); 2954 #else 2955 #error "Unable to define raw_atomic64_sub_return_release" 2956 #endif 2957 } 2958 2959 /** 2960 * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering 2961 * @i: s64 value to subtract 2962 * @v: pointer to atomic64_t 2963 * 2964 * Atomically updates @v to (@v - @i) with relaxed ordering. 2965 * 2966 * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere. 2967 * 2968 * Return: The updated value of @v. 2969 */ 2970 static __always_inline s64 2971 raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v) 2972 { 2973 #if defined(arch_atomic64_sub_return_relaxed) 2974 return arch_atomic64_sub_return_relaxed(i, v); 2975 #elif defined(arch_atomic64_sub_return) 2976 return arch_atomic64_sub_return(i, v); 2977 #else 2978 #error "Unable to define raw_atomic64_sub_return_relaxed" 2979 #endif 2980 } 2981 2982 /** 2983 * raw_atomic64_fetch_sub() - atomic subtract with full ordering 2984 * @i: s64 value to subtract 2985 * @v: pointer to atomic64_t 2986 * 2987 * Atomically updates @v to (@v - @i) with full ordering. 2988 * 2989 * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere. 2990 * 2991 * Return: The original value of @v. 2992 */ 2993 static __always_inline s64 2994 raw_atomic64_fetch_sub(s64 i, atomic64_t *v) 2995 { 2996 #if defined(arch_atomic64_fetch_sub) 2997 return arch_atomic64_fetch_sub(i, v); 2998 #elif defined(arch_atomic64_fetch_sub_relaxed) 2999 s64 ret; 3000 __atomic_pre_full_fence(); 3001 ret = arch_atomic64_fetch_sub_relaxed(i, v); 3002 __atomic_post_full_fence(); 3003 return ret; 3004 #else 3005 #error "Unable to define raw_atomic64_fetch_sub" 3006 #endif 3007 } 3008 3009 /** 3010 * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering 3011 * @i: s64 value to subtract 3012 * @v: pointer to atomic64_t 3013 * 3014 * Atomically updates @v to (@v - @i) with acquire ordering. 3015 * 3016 * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere. 3017 * 3018 * Return: The original value of @v. 3019 */ 3020 static __always_inline s64 3021 raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) 3022 { 3023 #if defined(arch_atomic64_fetch_sub_acquire) 3024 return arch_atomic64_fetch_sub_acquire(i, v); 3025 #elif defined(arch_atomic64_fetch_sub_relaxed) 3026 s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); 3027 __atomic_acquire_fence(); 3028 return ret; 3029 #elif defined(arch_atomic64_fetch_sub) 3030 return arch_atomic64_fetch_sub(i, v); 3031 #else 3032 #error "Unable to define raw_atomic64_fetch_sub_acquire" 3033 #endif 3034 } 3035 3036 /** 3037 * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering 3038 * @i: s64 value to subtract 3039 * @v: pointer to atomic64_t 3040 * 3041 * Atomically updates @v to (@v - @i) with release ordering. 3042 * 3043 * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere. 3044 * 3045 * Return: The original value of @v. 3046 */ 3047 static __always_inline s64 3048 raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v) 3049 { 3050 #if defined(arch_atomic64_fetch_sub_release) 3051 return arch_atomic64_fetch_sub_release(i, v); 3052 #elif defined(arch_atomic64_fetch_sub_relaxed) 3053 __atomic_release_fence(); 3054 return arch_atomic64_fetch_sub_relaxed(i, v); 3055 #elif defined(arch_atomic64_fetch_sub) 3056 return arch_atomic64_fetch_sub(i, v); 3057 #else 3058 #error "Unable to define raw_atomic64_fetch_sub_release" 3059 #endif 3060 } 3061 3062 /** 3063 * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering 3064 * @i: s64 value to subtract 3065 * @v: pointer to atomic64_t 3066 * 3067 * Atomically updates @v to (@v - @i) with relaxed ordering. 3068 * 3069 * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere. 3070 * 3071 * Return: The original value of @v. 3072 */ 3073 static __always_inline s64 3074 raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) 3075 { 3076 #if defined(arch_atomic64_fetch_sub_relaxed) 3077 return arch_atomic64_fetch_sub_relaxed(i, v); 3078 #elif defined(arch_atomic64_fetch_sub) 3079 return arch_atomic64_fetch_sub(i, v); 3080 #else 3081 #error "Unable to define raw_atomic64_fetch_sub_relaxed" 3082 #endif 3083 } 3084 3085 /** 3086 * raw_atomic64_inc() - atomic increment with relaxed ordering 3087 * @v: pointer to atomic64_t 3088 * 3089 * Atomically updates @v to (@v + 1) with relaxed ordering. 3090 * 3091 * Safe to use in noinstr code; prefer atomic64_inc() elsewhere. 3092 * 3093 * Return: Nothing. 3094 */ 3095 static __always_inline void 3096 raw_atomic64_inc(atomic64_t *v) 3097 { 3098 #if defined(arch_atomic64_inc) 3099 arch_atomic64_inc(v); 3100 #else 3101 raw_atomic64_add(1, v); 3102 #endif 3103 } 3104 3105 /** 3106 * raw_atomic64_inc_return() - atomic increment with full ordering 3107 * @v: pointer to atomic64_t 3108 * 3109 * Atomically updates @v to (@v + 1) with full ordering. 3110 * 3111 * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere. 3112 * 3113 * Return: The updated value of @v. 3114 */ 3115 static __always_inline s64 3116 raw_atomic64_inc_return(atomic64_t *v) 3117 { 3118 #if defined(arch_atomic64_inc_return) 3119 return arch_atomic64_inc_return(v); 3120 #elif defined(arch_atomic64_inc_return_relaxed) 3121 s64 ret; 3122 __atomic_pre_full_fence(); 3123 ret = arch_atomic64_inc_return_relaxed(v); 3124 __atomic_post_full_fence(); 3125 return ret; 3126 #else 3127 return raw_atomic64_add_return(1, v); 3128 #endif 3129 } 3130 3131 /** 3132 * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering 3133 * @v: pointer to atomic64_t 3134 * 3135 * Atomically updates @v to (@v + 1) with acquire ordering. 3136 * 3137 * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere. 3138 * 3139 * Return: The updated value of @v. 3140 */ 3141 static __always_inline s64 3142 raw_atomic64_inc_return_acquire(atomic64_t *v) 3143 { 3144 #if defined(arch_atomic64_inc_return_acquire) 3145 return arch_atomic64_inc_return_acquire(v); 3146 #elif defined(arch_atomic64_inc_return_relaxed) 3147 s64 ret = arch_atomic64_inc_return_relaxed(v); 3148 __atomic_acquire_fence(); 3149 return ret; 3150 #elif defined(arch_atomic64_inc_return) 3151 return arch_atomic64_inc_return(v); 3152 #else 3153 return raw_atomic64_add_return_acquire(1, v); 3154 #endif 3155 } 3156 3157 /** 3158 * raw_atomic64_inc_return_release() - atomic increment with release ordering 3159 * @v: pointer to atomic64_t 3160 * 3161 * Atomically updates @v to (@v + 1) with release ordering. 3162 * 3163 * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere. 3164 * 3165 * Return: The updated value of @v. 3166 */ 3167 static __always_inline s64 3168 raw_atomic64_inc_return_release(atomic64_t *v) 3169 { 3170 #if defined(arch_atomic64_inc_return_release) 3171 return arch_atomic64_inc_return_release(v); 3172 #elif defined(arch_atomic64_inc_return_relaxed) 3173 __atomic_release_fence(); 3174 return arch_atomic64_inc_return_relaxed(v); 3175 #elif defined(arch_atomic64_inc_return) 3176 return arch_atomic64_inc_return(v); 3177 #else 3178 return raw_atomic64_add_return_release(1, v); 3179 #endif 3180 } 3181 3182 /** 3183 * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering 3184 * @v: pointer to atomic64_t 3185 * 3186 * Atomically updates @v to (@v + 1) with relaxed ordering. 3187 * 3188 * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere. 3189 * 3190 * Return: The updated value of @v. 3191 */ 3192 static __always_inline s64 3193 raw_atomic64_inc_return_relaxed(atomic64_t *v) 3194 { 3195 #if defined(arch_atomic64_inc_return_relaxed) 3196 return arch_atomic64_inc_return_relaxed(v); 3197 #elif defined(arch_atomic64_inc_return) 3198 return arch_atomic64_inc_return(v); 3199 #else 3200 return raw_atomic64_add_return_relaxed(1, v); 3201 #endif 3202 } 3203 3204 /** 3205 * raw_atomic64_fetch_inc() - atomic increment with full ordering 3206 * @v: pointer to atomic64_t 3207 * 3208 * Atomically updates @v to (@v + 1) with full ordering. 3209 * 3210 * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere. 3211 * 3212 * Return: The original value of @v. 3213 */ 3214 static __always_inline s64 3215 raw_atomic64_fetch_inc(atomic64_t *v) 3216 { 3217 #if defined(arch_atomic64_fetch_inc) 3218 return arch_atomic64_fetch_inc(v); 3219 #elif defined(arch_atomic64_fetch_inc_relaxed) 3220 s64 ret; 3221 __atomic_pre_full_fence(); 3222 ret = arch_atomic64_fetch_inc_relaxed(v); 3223 __atomic_post_full_fence(); 3224 return ret; 3225 #else 3226 return raw_atomic64_fetch_add(1, v); 3227 #endif 3228 } 3229 3230 /** 3231 * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering 3232 * @v: pointer to atomic64_t 3233 * 3234 * Atomically updates @v to (@v + 1) with acquire ordering. 3235 * 3236 * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere. 3237 * 3238 * Return: The original value of @v. 3239 */ 3240 static __always_inline s64 3241 raw_atomic64_fetch_inc_acquire(atomic64_t *v) 3242 { 3243 #if defined(arch_atomic64_fetch_inc_acquire) 3244 return arch_atomic64_fetch_inc_acquire(v); 3245 #elif defined(arch_atomic64_fetch_inc_relaxed) 3246 s64 ret = arch_atomic64_fetch_inc_relaxed(v); 3247 __atomic_acquire_fence(); 3248 return ret; 3249 #elif defined(arch_atomic64_fetch_inc) 3250 return arch_atomic64_fetch_inc(v); 3251 #else 3252 return raw_atomic64_fetch_add_acquire(1, v); 3253 #endif 3254 } 3255 3256 /** 3257 * raw_atomic64_fetch_inc_release() - atomic increment with release ordering 3258 * @v: pointer to atomic64_t 3259 * 3260 * Atomically updates @v to (@v + 1) with release ordering. 3261 * 3262 * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere. 3263 * 3264 * Return: The original value of @v. 3265 */ 3266 static __always_inline s64 3267 raw_atomic64_fetch_inc_release(atomic64_t *v) 3268 { 3269 #if defined(arch_atomic64_fetch_inc_release) 3270 return arch_atomic64_fetch_inc_release(v); 3271 #elif defined(arch_atomic64_fetch_inc_relaxed) 3272 __atomic_release_fence(); 3273 return arch_atomic64_fetch_inc_relaxed(v); 3274 #elif defined(arch_atomic64_fetch_inc) 3275 return arch_atomic64_fetch_inc(v); 3276 #else 3277 return raw_atomic64_fetch_add_release(1, v); 3278 #endif 3279 } 3280 3281 /** 3282 * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering 3283 * @v: pointer to atomic64_t 3284 * 3285 * Atomically updates @v to (@v + 1) with relaxed ordering. 3286 * 3287 * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere. 3288 * 3289 * Return: The original value of @v. 3290 */ 3291 static __always_inline s64 3292 raw_atomic64_fetch_inc_relaxed(atomic64_t *v) 3293 { 3294 #if defined(arch_atomic64_fetch_inc_relaxed) 3295 return arch_atomic64_fetch_inc_relaxed(v); 3296 #elif defined(arch_atomic64_fetch_inc) 3297 return arch_atomic64_fetch_inc(v); 3298 #else 3299 return raw_atomic64_fetch_add_relaxed(1, v); 3300 #endif 3301 } 3302 3303 /** 3304 * raw_atomic64_dec() - atomic decrement with relaxed ordering 3305 * @v: pointer to atomic64_t 3306 * 3307 * Atomically updates @v to (@v - 1) with relaxed ordering. 3308 * 3309 * Safe to use in noinstr code; prefer atomic64_dec() elsewhere. 3310 * 3311 * Return: Nothing. 3312 */ 3313 static __always_inline void 3314 raw_atomic64_dec(atomic64_t *v) 3315 { 3316 #if defined(arch_atomic64_dec) 3317 arch_atomic64_dec(v); 3318 #else 3319 raw_atomic64_sub(1, v); 3320 #endif 3321 } 3322 3323 /** 3324 * raw_atomic64_dec_return() - atomic decrement with full ordering 3325 * @v: pointer to atomic64_t 3326 * 3327 * Atomically updates @v to (@v - 1) with full ordering. 3328 * 3329 * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere. 3330 * 3331 * Return: The updated value of @v. 3332 */ 3333 static __always_inline s64 3334 raw_atomic64_dec_return(atomic64_t *v) 3335 { 3336 #if defined(arch_atomic64_dec_return) 3337 return arch_atomic64_dec_return(v); 3338 #elif defined(arch_atomic64_dec_return_relaxed) 3339 s64 ret; 3340 __atomic_pre_full_fence(); 3341 ret = arch_atomic64_dec_return_relaxed(v); 3342 __atomic_post_full_fence(); 3343 return ret; 3344 #else 3345 return raw_atomic64_sub_return(1, v); 3346 #endif 3347 } 3348 3349 /** 3350 * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering 3351 * @v: pointer to atomic64_t 3352 * 3353 * Atomically updates @v to (@v - 1) with acquire ordering. 3354 * 3355 * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere. 3356 * 3357 * Return: The updated value of @v. 3358 */ 3359 static __always_inline s64 3360 raw_atomic64_dec_return_acquire(atomic64_t *v) 3361 { 3362 #if defined(arch_atomic64_dec_return_acquire) 3363 return arch_atomic64_dec_return_acquire(v); 3364 #elif defined(arch_atomic64_dec_return_relaxed) 3365 s64 ret = arch_atomic64_dec_return_relaxed(v); 3366 __atomic_acquire_fence(); 3367 return ret; 3368 #elif defined(arch_atomic64_dec_return) 3369 return arch_atomic64_dec_return(v); 3370 #else 3371 return raw_atomic64_sub_return_acquire(1, v); 3372 #endif 3373 } 3374 3375 /** 3376 * raw_atomic64_dec_return_release() - atomic decrement with release ordering 3377 * @v: pointer to atomic64_t 3378 * 3379 * Atomically updates @v to (@v - 1) with release ordering. 3380 * 3381 * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere. 3382 * 3383 * Return: The updated value of @v. 3384 */ 3385 static __always_inline s64 3386 raw_atomic64_dec_return_release(atomic64_t *v) 3387 { 3388 #if defined(arch_atomic64_dec_return_release) 3389 return arch_atomic64_dec_return_release(v); 3390 #elif defined(arch_atomic64_dec_return_relaxed) 3391 __atomic_release_fence(); 3392 return arch_atomic64_dec_return_relaxed(v); 3393 #elif defined(arch_atomic64_dec_return) 3394 return arch_atomic64_dec_return(v); 3395 #else 3396 return raw_atomic64_sub_return_release(1, v); 3397 #endif 3398 } 3399 3400 /** 3401 * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering 3402 * @v: pointer to atomic64_t 3403 * 3404 * Atomically updates @v to (@v - 1) with relaxed ordering. 3405 * 3406 * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere. 3407 * 3408 * Return: The updated value of @v. 3409 */ 3410 static __always_inline s64 3411 raw_atomic64_dec_return_relaxed(atomic64_t *v) 3412 { 3413 #if defined(arch_atomic64_dec_return_relaxed) 3414 return arch_atomic64_dec_return_relaxed(v); 3415 #elif defined(arch_atomic64_dec_return) 3416 return arch_atomic64_dec_return(v); 3417 #else 3418 return raw_atomic64_sub_return_relaxed(1, v); 3419 #endif 3420 } 3421 3422 /** 3423 * raw_atomic64_fetch_dec() - atomic decrement with full ordering 3424 * @v: pointer to atomic64_t 3425 * 3426 * Atomically updates @v to (@v - 1) with full ordering. 3427 * 3428 * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere. 3429 * 3430 * Return: The original value of @v. 3431 */ 3432 static __always_inline s64 3433 raw_atomic64_fetch_dec(atomic64_t *v) 3434 { 3435 #if defined(arch_atomic64_fetch_dec) 3436 return arch_atomic64_fetch_dec(v); 3437 #elif defined(arch_atomic64_fetch_dec_relaxed) 3438 s64 ret; 3439 __atomic_pre_full_fence(); 3440 ret = arch_atomic64_fetch_dec_relaxed(v); 3441 __atomic_post_full_fence(); 3442 return ret; 3443 #else 3444 return raw_atomic64_fetch_sub(1, v); 3445 #endif 3446 } 3447 3448 /** 3449 * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering 3450 * @v: pointer to atomic64_t 3451 * 3452 * Atomically updates @v to (@v - 1) with acquire ordering. 3453 * 3454 * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere. 3455 * 3456 * Return: The original value of @v. 3457 */ 3458 static __always_inline s64 3459 raw_atomic64_fetch_dec_acquire(atomic64_t *v) 3460 { 3461 #if defined(arch_atomic64_fetch_dec_acquire) 3462 return arch_atomic64_fetch_dec_acquire(v); 3463 #elif defined(arch_atomic64_fetch_dec_relaxed) 3464 s64 ret = arch_atomic64_fetch_dec_relaxed(v); 3465 __atomic_acquire_fence(); 3466 return ret; 3467 #elif defined(arch_atomic64_fetch_dec) 3468 return arch_atomic64_fetch_dec(v); 3469 #else 3470 return raw_atomic64_fetch_sub_acquire(1, v); 3471 #endif 3472 } 3473 3474 /** 3475 * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering 3476 * @v: pointer to atomic64_t 3477 * 3478 * Atomically updates @v to (@v - 1) with release ordering. 3479 * 3480 * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere. 3481 * 3482 * Return: The original value of @v. 3483 */ 3484 static __always_inline s64 3485 raw_atomic64_fetch_dec_release(atomic64_t *v) 3486 { 3487 #if defined(arch_atomic64_fetch_dec_release) 3488 return arch_atomic64_fetch_dec_release(v); 3489 #elif defined(arch_atomic64_fetch_dec_relaxed) 3490 __atomic_release_fence(); 3491 return arch_atomic64_fetch_dec_relaxed(v); 3492 #elif defined(arch_atomic64_fetch_dec) 3493 return arch_atomic64_fetch_dec(v); 3494 #else 3495 return raw_atomic64_fetch_sub_release(1, v); 3496 #endif 3497 } 3498 3499 /** 3500 * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering 3501 * @v: pointer to atomic64_t 3502 * 3503 * Atomically updates @v to (@v - 1) with relaxed ordering. 3504 * 3505 * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere. 3506 * 3507 * Return: The original value of @v. 3508 */ 3509 static __always_inline s64 3510 raw_atomic64_fetch_dec_relaxed(atomic64_t *v) 3511 { 3512 #if defined(arch_atomic64_fetch_dec_relaxed) 3513 return arch_atomic64_fetch_dec_relaxed(v); 3514 #elif defined(arch_atomic64_fetch_dec) 3515 return arch_atomic64_fetch_dec(v); 3516 #else 3517 return raw_atomic64_fetch_sub_relaxed(1, v); 3518 #endif 3519 } 3520 3521 /** 3522 * raw_atomic64_and() - atomic bitwise AND with relaxed ordering 3523 * @i: s64 value 3524 * @v: pointer to atomic64_t 3525 * 3526 * Atomically updates @v to (@v & @i) with relaxed ordering. 3527 * 3528 * Safe to use in noinstr code; prefer atomic64_and() elsewhere. 3529 * 3530 * Return: Nothing. 3531 */ 3532 static __always_inline void 3533 raw_atomic64_and(s64 i, atomic64_t *v) 3534 { 3535 arch_atomic64_and(i, v); 3536 } 3537 3538 /** 3539 * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering 3540 * @i: s64 value 3541 * @v: pointer to atomic64_t 3542 * 3543 * Atomically updates @v to (@v & @i) with full ordering. 3544 * 3545 * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere. 3546 * 3547 * Return: The original value of @v. 3548 */ 3549 static __always_inline s64 3550 raw_atomic64_fetch_and(s64 i, atomic64_t *v) 3551 { 3552 #if defined(arch_atomic64_fetch_and) 3553 return arch_atomic64_fetch_and(i, v); 3554 #elif defined(arch_atomic64_fetch_and_relaxed) 3555 s64 ret; 3556 __atomic_pre_full_fence(); 3557 ret = arch_atomic64_fetch_and_relaxed(i, v); 3558 __atomic_post_full_fence(); 3559 return ret; 3560 #else 3561 #error "Unable to define raw_atomic64_fetch_and" 3562 #endif 3563 } 3564 3565 /** 3566 * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering 3567 * @i: s64 value 3568 * @v: pointer to atomic64_t 3569 * 3570 * Atomically updates @v to (@v & @i) with acquire ordering. 3571 * 3572 * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere. 3573 * 3574 * Return: The original value of @v. 3575 */ 3576 static __always_inline s64 3577 raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) 3578 { 3579 #if defined(arch_atomic64_fetch_and_acquire) 3580 return arch_atomic64_fetch_and_acquire(i, v); 3581 #elif defined(arch_atomic64_fetch_and_relaxed) 3582 s64 ret = arch_atomic64_fetch_and_relaxed(i, v); 3583 __atomic_acquire_fence(); 3584 return ret; 3585 #elif defined(arch_atomic64_fetch_and) 3586 return arch_atomic64_fetch_and(i, v); 3587 #else 3588 #error "Unable to define raw_atomic64_fetch_and_acquire" 3589 #endif 3590 } 3591 3592 /** 3593 * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering 3594 * @i: s64 value 3595 * @v: pointer to atomic64_t 3596 * 3597 * Atomically updates @v to (@v & @i) with release ordering. 3598 * 3599 * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere. 3600 * 3601 * Return: The original value of @v. 3602 */ 3603 static __always_inline s64 3604 raw_atomic64_fetch_and_release(s64 i, atomic64_t *v) 3605 { 3606 #if defined(arch_atomic64_fetch_and_release) 3607 return arch_atomic64_fetch_and_release(i, v); 3608 #elif defined(arch_atomic64_fetch_and_relaxed) 3609 __atomic_release_fence(); 3610 return arch_atomic64_fetch_and_relaxed(i, v); 3611 #elif defined(arch_atomic64_fetch_and) 3612 return arch_atomic64_fetch_and(i, v); 3613 #else 3614 #error "Unable to define raw_atomic64_fetch_and_release" 3615 #endif 3616 } 3617 3618 /** 3619 * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering 3620 * @i: s64 value 3621 * @v: pointer to atomic64_t 3622 * 3623 * Atomically updates @v to (@v & @i) with relaxed ordering. 3624 * 3625 * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere. 3626 * 3627 * Return: The original value of @v. 3628 */ 3629 static __always_inline s64 3630 raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) 3631 { 3632 #if defined(arch_atomic64_fetch_and_relaxed) 3633 return arch_atomic64_fetch_and_relaxed(i, v); 3634 #elif defined(arch_atomic64_fetch_and) 3635 return arch_atomic64_fetch_and(i, v); 3636 #else 3637 #error "Unable to define raw_atomic64_fetch_and_relaxed" 3638 #endif 3639 } 3640 3641 /** 3642 * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering 3643 * @i: s64 value 3644 * @v: pointer to atomic64_t 3645 * 3646 * Atomically updates @v to (@v & ~@i) with relaxed ordering. 3647 * 3648 * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere. 3649 * 3650 * Return: Nothing. 3651 */ 3652 static __always_inline void 3653 raw_atomic64_andnot(s64 i, atomic64_t *v) 3654 { 3655 #if defined(arch_atomic64_andnot) 3656 arch_atomic64_andnot(i, v); 3657 #else 3658 raw_atomic64_and(~i, v); 3659 #endif 3660 } 3661 3662 /** 3663 * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering 3664 * @i: s64 value 3665 * @v: pointer to atomic64_t 3666 * 3667 * Atomically updates @v to (@v & ~@i) with full ordering. 3668 * 3669 * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere. 3670 * 3671 * Return: The original value of @v. 3672 */ 3673 static __always_inline s64 3674 raw_atomic64_fetch_andnot(s64 i, atomic64_t *v) 3675 { 3676 #if defined(arch_atomic64_fetch_andnot) 3677 return arch_atomic64_fetch_andnot(i, v); 3678 #elif defined(arch_atomic64_fetch_andnot_relaxed) 3679 s64 ret; 3680 __atomic_pre_full_fence(); 3681 ret = arch_atomic64_fetch_andnot_relaxed(i, v); 3682 __atomic_post_full_fence(); 3683 return ret; 3684 #else 3685 return raw_atomic64_fetch_and(~i, v); 3686 #endif 3687 } 3688 3689 /** 3690 * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering 3691 * @i: s64 value 3692 * @v: pointer to atomic64_t 3693 * 3694 * Atomically updates @v to (@v & ~@i) with acquire ordering. 3695 * 3696 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere. 3697 * 3698 * Return: The original value of @v. 3699 */ 3700 static __always_inline s64 3701 raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) 3702 { 3703 #if defined(arch_atomic64_fetch_andnot_acquire) 3704 return arch_atomic64_fetch_andnot_acquire(i, v); 3705 #elif defined(arch_atomic64_fetch_andnot_relaxed) 3706 s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); 3707 __atomic_acquire_fence(); 3708 return ret; 3709 #elif defined(arch_atomic64_fetch_andnot) 3710 return arch_atomic64_fetch_andnot(i, v); 3711 #else 3712 return raw_atomic64_fetch_and_acquire(~i, v); 3713 #endif 3714 } 3715 3716 /** 3717 * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering 3718 * @i: s64 value 3719 * @v: pointer to atomic64_t 3720 * 3721 * Atomically updates @v to (@v & ~@i) with release ordering. 3722 * 3723 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere. 3724 * 3725 * Return: The original value of @v. 3726 */ 3727 static __always_inline s64 3728 raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) 3729 { 3730 #if defined(arch_atomic64_fetch_andnot_release) 3731 return arch_atomic64_fetch_andnot_release(i, v); 3732 #elif defined(arch_atomic64_fetch_andnot_relaxed) 3733 __atomic_release_fence(); 3734 return arch_atomic64_fetch_andnot_relaxed(i, v); 3735 #elif defined(arch_atomic64_fetch_andnot) 3736 return arch_atomic64_fetch_andnot(i, v); 3737 #else 3738 return raw_atomic64_fetch_and_release(~i, v); 3739 #endif 3740 } 3741 3742 /** 3743 * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering 3744 * @i: s64 value 3745 * @v: pointer to atomic64_t 3746 * 3747 * Atomically updates @v to (@v & ~@i) with relaxed ordering. 3748 * 3749 * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere. 3750 * 3751 * Return: The original value of @v. 3752 */ 3753 static __always_inline s64 3754 raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) 3755 { 3756 #if defined(arch_atomic64_fetch_andnot_relaxed) 3757 return arch_atomic64_fetch_andnot_relaxed(i, v); 3758 #elif defined(arch_atomic64_fetch_andnot) 3759 return arch_atomic64_fetch_andnot(i, v); 3760 #else 3761 return raw_atomic64_fetch_and_relaxed(~i, v); 3762 #endif 3763 } 3764 3765 /** 3766 * raw_atomic64_or() - atomic bitwise OR with relaxed ordering 3767 * @i: s64 value 3768 * @v: pointer to atomic64_t 3769 * 3770 * Atomically updates @v to (@v | @i) with relaxed ordering. 3771 * 3772 * Safe to use in noinstr code; prefer atomic64_or() elsewhere. 3773 * 3774 * Return: Nothing. 3775 */ 3776 static __always_inline void 3777 raw_atomic64_or(s64 i, atomic64_t *v) 3778 { 3779 arch_atomic64_or(i, v); 3780 } 3781 3782 /** 3783 * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering 3784 * @i: s64 value 3785 * @v: pointer to atomic64_t 3786 * 3787 * Atomically updates @v to (@v | @i) with full ordering. 3788 * 3789 * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere. 3790 * 3791 * Return: The original value of @v. 3792 */ 3793 static __always_inline s64 3794 raw_atomic64_fetch_or(s64 i, atomic64_t *v) 3795 { 3796 #if defined(arch_atomic64_fetch_or) 3797 return arch_atomic64_fetch_or(i, v); 3798 #elif defined(arch_atomic64_fetch_or_relaxed) 3799 s64 ret; 3800 __atomic_pre_full_fence(); 3801 ret = arch_atomic64_fetch_or_relaxed(i, v); 3802 __atomic_post_full_fence(); 3803 return ret; 3804 #else 3805 #error "Unable to define raw_atomic64_fetch_or" 3806 #endif 3807 } 3808 3809 /** 3810 * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering 3811 * @i: s64 value 3812 * @v: pointer to atomic64_t 3813 * 3814 * Atomically updates @v to (@v | @i) with acquire ordering. 3815 * 3816 * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere. 3817 * 3818 * Return: The original value of @v. 3819 */ 3820 static __always_inline s64 3821 raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) 3822 { 3823 #if defined(arch_atomic64_fetch_or_acquire) 3824 return arch_atomic64_fetch_or_acquire(i, v); 3825 #elif defined(arch_atomic64_fetch_or_relaxed) 3826 s64 ret = arch_atomic64_fetch_or_relaxed(i, v); 3827 __atomic_acquire_fence(); 3828 return ret; 3829 #elif defined(arch_atomic64_fetch_or) 3830 return arch_atomic64_fetch_or(i, v); 3831 #else 3832 #error "Unable to define raw_atomic64_fetch_or_acquire" 3833 #endif 3834 } 3835 3836 /** 3837 * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering 3838 * @i: s64 value 3839 * @v: pointer to atomic64_t 3840 * 3841 * Atomically updates @v to (@v | @i) with release ordering. 3842 * 3843 * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere. 3844 * 3845 * Return: The original value of @v. 3846 */ 3847 static __always_inline s64 3848 raw_atomic64_fetch_or_release(s64 i, atomic64_t *v) 3849 { 3850 #if defined(arch_atomic64_fetch_or_release) 3851 return arch_atomic64_fetch_or_release(i, v); 3852 #elif defined(arch_atomic64_fetch_or_relaxed) 3853 __atomic_release_fence(); 3854 return arch_atomic64_fetch_or_relaxed(i, v); 3855 #elif defined(arch_atomic64_fetch_or) 3856 return arch_atomic64_fetch_or(i, v); 3857 #else 3858 #error "Unable to define raw_atomic64_fetch_or_release" 3859 #endif 3860 } 3861 3862 /** 3863 * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering 3864 * @i: s64 value 3865 * @v: pointer to atomic64_t 3866 * 3867 * Atomically updates @v to (@v | @i) with relaxed ordering. 3868 * 3869 * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere. 3870 * 3871 * Return: The original value of @v. 3872 */ 3873 static __always_inline s64 3874 raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) 3875 { 3876 #if defined(arch_atomic64_fetch_or_relaxed) 3877 return arch_atomic64_fetch_or_relaxed(i, v); 3878 #elif defined(arch_atomic64_fetch_or) 3879 return arch_atomic64_fetch_or(i, v); 3880 #else 3881 #error "Unable to define raw_atomic64_fetch_or_relaxed" 3882 #endif 3883 } 3884 3885 /** 3886 * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering 3887 * @i: s64 value 3888 * @v: pointer to atomic64_t 3889 * 3890 * Atomically updates @v to (@v ^ @i) with relaxed ordering. 3891 * 3892 * Safe to use in noinstr code; prefer atomic64_xor() elsewhere. 3893 * 3894 * Return: Nothing. 3895 */ 3896 static __always_inline void 3897 raw_atomic64_xor(s64 i, atomic64_t *v) 3898 { 3899 arch_atomic64_xor(i, v); 3900 } 3901 3902 /** 3903 * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering 3904 * @i: s64 value 3905 * @v: pointer to atomic64_t 3906 * 3907 * Atomically updates @v to (@v ^ @i) with full ordering. 3908 * 3909 * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere. 3910 * 3911 * Return: The original value of @v. 3912 */ 3913 static __always_inline s64 3914 raw_atomic64_fetch_xor(s64 i, atomic64_t *v) 3915 { 3916 #if defined(arch_atomic64_fetch_xor) 3917 return arch_atomic64_fetch_xor(i, v); 3918 #elif defined(arch_atomic64_fetch_xor_relaxed) 3919 s64 ret; 3920 __atomic_pre_full_fence(); 3921 ret = arch_atomic64_fetch_xor_relaxed(i, v); 3922 __atomic_post_full_fence(); 3923 return ret; 3924 #else 3925 #error "Unable to define raw_atomic64_fetch_xor" 3926 #endif 3927 } 3928 3929 /** 3930 * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering 3931 * @i: s64 value 3932 * @v: pointer to atomic64_t 3933 * 3934 * Atomically updates @v to (@v ^ @i) with acquire ordering. 3935 * 3936 * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere. 3937 * 3938 * Return: The original value of @v. 3939 */ 3940 static __always_inline s64 3941 raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) 3942 { 3943 #if defined(arch_atomic64_fetch_xor_acquire) 3944 return arch_atomic64_fetch_xor_acquire(i, v); 3945 #elif defined(arch_atomic64_fetch_xor_relaxed) 3946 s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); 3947 __atomic_acquire_fence(); 3948 return ret; 3949 #elif defined(arch_atomic64_fetch_xor) 3950 return arch_atomic64_fetch_xor(i, v); 3951 #else 3952 #error "Unable to define raw_atomic64_fetch_xor_acquire" 3953 #endif 3954 } 3955 3956 /** 3957 * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering 3958 * @i: s64 value 3959 * @v: pointer to atomic64_t 3960 * 3961 * Atomically updates @v to (@v ^ @i) with release ordering. 3962 * 3963 * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere. 3964 * 3965 * Return: The original value of @v. 3966 */ 3967 static __always_inline s64 3968 raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v) 3969 { 3970 #if defined(arch_atomic64_fetch_xor_release) 3971 return arch_atomic64_fetch_xor_release(i, v); 3972 #elif defined(arch_atomic64_fetch_xor_relaxed) 3973 __atomic_release_fence(); 3974 return arch_atomic64_fetch_xor_relaxed(i, v); 3975 #elif defined(arch_atomic64_fetch_xor) 3976 return arch_atomic64_fetch_xor(i, v); 3977 #else 3978 #error "Unable to define raw_atomic64_fetch_xor_release" 3979 #endif 3980 } 3981 3982 /** 3983 * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering 3984 * @i: s64 value 3985 * @v: pointer to atomic64_t 3986 * 3987 * Atomically updates @v to (@v ^ @i) with relaxed ordering. 3988 * 3989 * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere. 3990 * 3991 * Return: The original value of @v. 3992 */ 3993 static __always_inline s64 3994 raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) 3995 { 3996 #if defined(arch_atomic64_fetch_xor_relaxed) 3997 return arch_atomic64_fetch_xor_relaxed(i, v); 3998 #elif defined(arch_atomic64_fetch_xor) 3999 return arch_atomic64_fetch_xor(i, v); 4000 #else 4001 #error "Unable to define raw_atomic64_fetch_xor_relaxed" 4002 #endif 4003 } 4004 4005 /** 4006 * raw_atomic64_xchg() - atomic exchange with full ordering 4007 * @v: pointer to atomic64_t 4008 * @new: s64 value to assign 4009 * 4010 * Atomically updates @v to @new with full ordering. 4011 * 4012 * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere. 4013 * 4014 * Return: The original value of @v. 4015 */ 4016 static __always_inline s64 4017 raw_atomic64_xchg(atomic64_t *v, s64 new) 4018 { 4019 #if defined(arch_atomic64_xchg) 4020 return arch_atomic64_xchg(v, new); 4021 #elif defined(arch_atomic64_xchg_relaxed) 4022 s64 ret; 4023 __atomic_pre_full_fence(); 4024 ret = arch_atomic64_xchg_relaxed(v, new); 4025 __atomic_post_full_fence(); 4026 return ret; 4027 #else 4028 return raw_xchg(&v->counter, new); 4029 #endif 4030 } 4031 4032 /** 4033 * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering 4034 * @v: pointer to atomic64_t 4035 * @new: s64 value to assign 4036 * 4037 * Atomically updates @v to @new with acquire ordering. 4038 * 4039 * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere. 4040 * 4041 * Return: The original value of @v. 4042 */ 4043 static __always_inline s64 4044 raw_atomic64_xchg_acquire(atomic64_t *v, s64 new) 4045 { 4046 #if defined(arch_atomic64_xchg_acquire) 4047 return arch_atomic64_xchg_acquire(v, new); 4048 #elif defined(arch_atomic64_xchg_relaxed) 4049 s64 ret = arch_atomic64_xchg_relaxed(v, new); 4050 __atomic_acquire_fence(); 4051 return ret; 4052 #elif defined(arch_atomic64_xchg) 4053 return arch_atomic64_xchg(v, new); 4054 #else 4055 return raw_xchg_acquire(&v->counter, new); 4056 #endif 4057 } 4058 4059 /** 4060 * raw_atomic64_xchg_release() - atomic exchange with release ordering 4061 * @v: pointer to atomic64_t 4062 * @new: s64 value to assign 4063 * 4064 * Atomically updates @v to @new with release ordering. 4065 * 4066 * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere. 4067 * 4068 * Return: The original value of @v. 4069 */ 4070 static __always_inline s64 4071 raw_atomic64_xchg_release(atomic64_t *v, s64 new) 4072 { 4073 #if defined(arch_atomic64_xchg_release) 4074 return arch_atomic64_xchg_release(v, new); 4075 #elif defined(arch_atomic64_xchg_relaxed) 4076 __atomic_release_fence(); 4077 return arch_atomic64_xchg_relaxed(v, new); 4078 #elif defined(arch_atomic64_xchg) 4079 return arch_atomic64_xchg(v, new); 4080 #else 4081 return raw_xchg_release(&v->counter, new); 4082 #endif 4083 } 4084 4085 /** 4086 * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering 4087 * @v: pointer to atomic64_t 4088 * @new: s64 value to assign 4089 * 4090 * Atomically updates @v to @new with relaxed ordering. 4091 * 4092 * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere. 4093 * 4094 * Return: The original value of @v. 4095 */ 4096 static __always_inline s64 4097 raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new) 4098 { 4099 #if defined(arch_atomic64_xchg_relaxed) 4100 return arch_atomic64_xchg_relaxed(v, new); 4101 #elif defined(arch_atomic64_xchg) 4102 return arch_atomic64_xchg(v, new); 4103 #else 4104 return raw_xchg_relaxed(&v->counter, new); 4105 #endif 4106 } 4107 4108 /** 4109 * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering 4110 * @v: pointer to atomic64_t 4111 * @old: s64 value to compare with 4112 * @new: s64 value to assign 4113 * 4114 * If (@v == @old), atomically updates @v to @new with full ordering. 4115 * 4116 * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere. 4117 * 4118 * Return: The original value of @v. 4119 */ 4120 static __always_inline s64 4121 raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) 4122 { 4123 #if defined(arch_atomic64_cmpxchg) 4124 return arch_atomic64_cmpxchg(v, old, new); 4125 #elif defined(arch_atomic64_cmpxchg_relaxed) 4126 s64 ret; 4127 __atomic_pre_full_fence(); 4128 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); 4129 __atomic_post_full_fence(); 4130 return ret; 4131 #else 4132 return raw_cmpxchg(&v->counter, old, new); 4133 #endif 4134 } 4135 4136 /** 4137 * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering 4138 * @v: pointer to atomic64_t 4139 * @old: s64 value to compare with 4140 * @new: s64 value to assign 4141 * 4142 * If (@v == @old), atomically updates @v to @new with acquire ordering. 4143 * 4144 * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere. 4145 * 4146 * Return: The original value of @v. 4147 */ 4148 static __always_inline s64 4149 raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) 4150 { 4151 #if defined(arch_atomic64_cmpxchg_acquire) 4152 return arch_atomic64_cmpxchg_acquire(v, old, new); 4153 #elif defined(arch_atomic64_cmpxchg_relaxed) 4154 s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); 4155 __atomic_acquire_fence(); 4156 return ret; 4157 #elif defined(arch_atomic64_cmpxchg) 4158 return arch_atomic64_cmpxchg(v, old, new); 4159 #else 4160 return raw_cmpxchg_acquire(&v->counter, old, new); 4161 #endif 4162 } 4163 4164 /** 4165 * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering 4166 * @v: pointer to atomic64_t 4167 * @old: s64 value to compare with 4168 * @new: s64 value to assign 4169 * 4170 * If (@v == @old), atomically updates @v to @new with release ordering. 4171 * 4172 * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere. 4173 * 4174 * Return: The original value of @v. 4175 */ 4176 static __always_inline s64 4177 raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) 4178 { 4179 #if defined(arch_atomic64_cmpxchg_release) 4180 return arch_atomic64_cmpxchg_release(v, old, new); 4181 #elif defined(arch_atomic64_cmpxchg_relaxed) 4182 __atomic_release_fence(); 4183 return arch_atomic64_cmpxchg_relaxed(v, old, new); 4184 #elif defined(arch_atomic64_cmpxchg) 4185 return arch_atomic64_cmpxchg(v, old, new); 4186 #else 4187 return raw_cmpxchg_release(&v->counter, old, new); 4188 #endif 4189 } 4190 4191 /** 4192 * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering 4193 * @v: pointer to atomic64_t 4194 * @old: s64 value to compare with 4195 * @new: s64 value to assign 4196 * 4197 * If (@v == @old), atomically updates @v to @new with relaxed ordering. 4198 * 4199 * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere. 4200 * 4201 * Return: The original value of @v. 4202 */ 4203 static __always_inline s64 4204 raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) 4205 { 4206 #if defined(arch_atomic64_cmpxchg_relaxed) 4207 return arch_atomic64_cmpxchg_relaxed(v, old, new); 4208 #elif defined(arch_atomic64_cmpxchg) 4209 return arch_atomic64_cmpxchg(v, old, new); 4210 #else 4211 return raw_cmpxchg_relaxed(&v->counter, old, new); 4212 #endif 4213 } 4214 4215 /** 4216 * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering 4217 * @v: pointer to atomic64_t 4218 * @old: pointer to s64 value to compare with 4219 * @new: s64 value to assign 4220 * 4221 * If (@v == @old), atomically updates @v to @new with full ordering. 4222 * Otherwise, updates @old to the current value of @v. 4223 * 4224 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere. 4225 * 4226 * Return: @true if the exchange occured, @false otherwise. 4227 */ 4228 static __always_inline bool 4229 raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) 4230 { 4231 #if defined(arch_atomic64_try_cmpxchg) 4232 return arch_atomic64_try_cmpxchg(v, old, new); 4233 #elif defined(arch_atomic64_try_cmpxchg_relaxed) 4234 bool ret; 4235 __atomic_pre_full_fence(); 4236 ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); 4237 __atomic_post_full_fence(); 4238 return ret; 4239 #else 4240 s64 r, o = *old; 4241 r = raw_atomic64_cmpxchg(v, o, new); 4242 if (unlikely(r != o)) 4243 *old = r; 4244 return likely(r == o); 4245 #endif 4246 } 4247 4248 /** 4249 * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering 4250 * @v: pointer to atomic64_t 4251 * @old: pointer to s64 value to compare with 4252 * @new: s64 value to assign 4253 * 4254 * If (@v == @old), atomically updates @v to @new with acquire ordering. 4255 * Otherwise, updates @old to the current value of @v. 4256 * 4257 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere. 4258 * 4259 * Return: @true if the exchange occured, @false otherwise. 4260 */ 4261 static __always_inline bool 4262 raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) 4263 { 4264 #if defined(arch_atomic64_try_cmpxchg_acquire) 4265 return arch_atomic64_try_cmpxchg_acquire(v, old, new); 4266 #elif defined(arch_atomic64_try_cmpxchg_relaxed) 4267 bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); 4268 __atomic_acquire_fence(); 4269 return ret; 4270 #elif defined(arch_atomic64_try_cmpxchg) 4271 return arch_atomic64_try_cmpxchg(v, old, new); 4272 #else 4273 s64 r, o = *old; 4274 r = raw_atomic64_cmpxchg_acquire(v, o, new); 4275 if (unlikely(r != o)) 4276 *old = r; 4277 return likely(r == o); 4278 #endif 4279 } 4280 4281 /** 4282 * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering 4283 * @v: pointer to atomic64_t 4284 * @old: pointer to s64 value to compare with 4285 * @new: s64 value to assign 4286 * 4287 * If (@v == @old), atomically updates @v to @new with release ordering. 4288 * Otherwise, updates @old to the current value of @v. 4289 * 4290 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere. 4291 * 4292 * Return: @true if the exchange occured, @false otherwise. 4293 */ 4294 static __always_inline bool 4295 raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) 4296 { 4297 #if defined(arch_atomic64_try_cmpxchg_release) 4298 return arch_atomic64_try_cmpxchg_release(v, old, new); 4299 #elif defined(arch_atomic64_try_cmpxchg_relaxed) 4300 __atomic_release_fence(); 4301 return arch_atomic64_try_cmpxchg_relaxed(v, old, new); 4302 #elif defined(arch_atomic64_try_cmpxchg) 4303 return arch_atomic64_try_cmpxchg(v, old, new); 4304 #else 4305 s64 r, o = *old; 4306 r = raw_atomic64_cmpxchg_release(v, o, new); 4307 if (unlikely(r != o)) 4308 *old = r; 4309 return likely(r == o); 4310 #endif 4311 } 4312 4313 /** 4314 * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering 4315 * @v: pointer to atomic64_t 4316 * @old: pointer to s64 value to compare with 4317 * @new: s64 value to assign 4318 * 4319 * If (@v == @old), atomically updates @v to @new with relaxed ordering. 4320 * Otherwise, updates @old to the current value of @v. 4321 * 4322 * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere. 4323 * 4324 * Return: @true if the exchange occured, @false otherwise. 4325 */ 4326 static __always_inline bool 4327 raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) 4328 { 4329 #if defined(arch_atomic64_try_cmpxchg_relaxed) 4330 return arch_atomic64_try_cmpxchg_relaxed(v, old, new); 4331 #elif defined(arch_atomic64_try_cmpxchg) 4332 return arch_atomic64_try_cmpxchg(v, old, new); 4333 #else 4334 s64 r, o = *old; 4335 r = raw_atomic64_cmpxchg_relaxed(v, o, new); 4336 if (unlikely(r != o)) 4337 *old = r; 4338 return likely(r == o); 4339 #endif 4340 } 4341 4342 /** 4343 * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering 4344 * @i: s64 value to add 4345 * @v: pointer to atomic64_t 4346 * 4347 * Atomically updates @v to (@v - @i) with full ordering. 4348 * 4349 * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere. 4350 * 4351 * Return: @true if the resulting value of @v is zero, @false otherwise. 4352 */ 4353 static __always_inline bool 4354 raw_atomic64_sub_and_test(s64 i, atomic64_t *v) 4355 { 4356 #if defined(arch_atomic64_sub_and_test) 4357 return arch_atomic64_sub_and_test(i, v); 4358 #else 4359 return raw_atomic64_sub_return(i, v) == 0; 4360 #endif 4361 } 4362 4363 /** 4364 * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering 4365 * @v: pointer to atomic64_t 4366 * 4367 * Atomically updates @v to (@v - 1) with full ordering. 4368 * 4369 * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere. 4370 * 4371 * Return: @true if the resulting value of @v is zero, @false otherwise. 4372 */ 4373 static __always_inline bool 4374 raw_atomic64_dec_and_test(atomic64_t *v) 4375 { 4376 #if defined(arch_atomic64_dec_and_test) 4377 return arch_atomic64_dec_and_test(v); 4378 #else 4379 return raw_atomic64_dec_return(v) == 0; 4380 #endif 4381 } 4382 4383 /** 4384 * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering 4385 * @v: pointer to atomic64_t 4386 * 4387 * Atomically updates @v to (@v + 1) with full ordering. 4388 * 4389 * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere. 4390 * 4391 * Return: @true if the resulting value of @v is zero, @false otherwise. 4392 */ 4393 static __always_inline bool 4394 raw_atomic64_inc_and_test(atomic64_t *v) 4395 { 4396 #if defined(arch_atomic64_inc_and_test) 4397 return arch_atomic64_inc_and_test(v); 4398 #else 4399 return raw_atomic64_inc_return(v) == 0; 4400 #endif 4401 } 4402 4403 /** 4404 * raw_atomic64_add_negative() - atomic add and test if negative with full ordering 4405 * @i: s64 value to add 4406 * @v: pointer to atomic64_t 4407 * 4408 * Atomically updates @v to (@v + @i) with full ordering. 4409 * 4410 * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere. 4411 * 4412 * Return: @true if the resulting value of @v is negative, @false otherwise. 4413 */ 4414 static __always_inline bool 4415 raw_atomic64_add_negative(s64 i, atomic64_t *v) 4416 { 4417 #if defined(arch_atomic64_add_negative) 4418 return arch_atomic64_add_negative(i, v); 4419 #elif defined(arch_atomic64_add_negative_relaxed) 4420 bool ret; 4421 __atomic_pre_full_fence(); 4422 ret = arch_atomic64_add_negative_relaxed(i, v); 4423 __atomic_post_full_fence(); 4424 return ret; 4425 #else 4426 return raw_atomic64_add_return(i, v) < 0; 4427 #endif 4428 } 4429 4430 /** 4431 * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering 4432 * @i: s64 value to add 4433 * @v: pointer to atomic64_t 4434 * 4435 * Atomically updates @v to (@v + @i) with acquire ordering. 4436 * 4437 * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere. 4438 * 4439 * Return: @true if the resulting value of @v is negative, @false otherwise. 4440 */ 4441 static __always_inline bool 4442 raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v) 4443 { 4444 #if defined(arch_atomic64_add_negative_acquire) 4445 return arch_atomic64_add_negative_acquire(i, v); 4446 #elif defined(arch_atomic64_add_negative_relaxed) 4447 bool ret = arch_atomic64_add_negative_relaxed(i, v); 4448 __atomic_acquire_fence(); 4449 return ret; 4450 #elif defined(arch_atomic64_add_negative) 4451 return arch_atomic64_add_negative(i, v); 4452 #else 4453 return raw_atomic64_add_return_acquire(i, v) < 0; 4454 #endif 4455 } 4456 4457 /** 4458 * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering 4459 * @i: s64 value to add 4460 * @v: pointer to atomic64_t 4461 * 4462 * Atomically updates @v to (@v + @i) with release ordering. 4463 * 4464 * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere. 4465 * 4466 * Return: @true if the resulting value of @v is negative, @false otherwise. 4467 */ 4468 static __always_inline bool 4469 raw_atomic64_add_negative_release(s64 i, atomic64_t *v) 4470 { 4471 #if defined(arch_atomic64_add_negative_release) 4472 return arch_atomic64_add_negative_release(i, v); 4473 #elif defined(arch_atomic64_add_negative_relaxed) 4474 __atomic_release_fence(); 4475 return arch_atomic64_add_negative_relaxed(i, v); 4476 #elif defined(arch_atomic64_add_negative) 4477 return arch_atomic64_add_negative(i, v); 4478 #else 4479 return raw_atomic64_add_return_release(i, v) < 0; 4480 #endif 4481 } 4482 4483 /** 4484 * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering 4485 * @i: s64 value to add 4486 * @v: pointer to atomic64_t 4487 * 4488 * Atomically updates @v to (@v + @i) with relaxed ordering. 4489 * 4490 * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere. 4491 * 4492 * Return: @true if the resulting value of @v is negative, @false otherwise. 4493 */ 4494 static __always_inline bool 4495 raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v) 4496 { 4497 #if defined(arch_atomic64_add_negative_relaxed) 4498 return arch_atomic64_add_negative_relaxed(i, v); 4499 #elif defined(arch_atomic64_add_negative) 4500 return arch_atomic64_add_negative(i, v); 4501 #else 4502 return raw_atomic64_add_return_relaxed(i, v) < 0; 4503 #endif 4504 } 4505 4506 /** 4507 * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering 4508 * @v: pointer to atomic64_t 4509 * @a: s64 value to add 4510 * @u: s64 value to compare with 4511 * 4512 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. 4513 * 4514 * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere. 4515 * 4516 * Return: The original value of @v. 4517 */ 4518 static __always_inline s64 4519 raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) 4520 { 4521 #if defined(arch_atomic64_fetch_add_unless) 4522 return arch_atomic64_fetch_add_unless(v, a, u); 4523 #else 4524 s64 c = raw_atomic64_read(v); 4525 4526 do { 4527 if (unlikely(c == u)) 4528 break; 4529 } while (!raw_atomic64_try_cmpxchg(v, &c, c + a)); 4530 4531 return c; 4532 #endif 4533 } 4534 4535 /** 4536 * raw_atomic64_add_unless() - atomic add unless value with full ordering 4537 * @v: pointer to atomic64_t 4538 * @a: s64 value to add 4539 * @u: s64 value to compare with 4540 * 4541 * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. 4542 * 4543 * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere. 4544 * 4545 * Return: @true if @v was updated, @false otherwise. 4546 */ 4547 static __always_inline bool 4548 raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) 4549 { 4550 #if defined(arch_atomic64_add_unless) 4551 return arch_atomic64_add_unless(v, a, u); 4552 #else 4553 return raw_atomic64_fetch_add_unless(v, a, u) != u; 4554 #endif 4555 } 4556 4557 /** 4558 * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering 4559 * @v: pointer to atomic64_t 4560 * 4561 * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. 4562 * 4563 * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere. 4564 * 4565 * Return: @true if @v was updated, @false otherwise. 4566 */ 4567 static __always_inline bool 4568 raw_atomic64_inc_not_zero(atomic64_t *v) 4569 { 4570 #if defined(arch_atomic64_inc_not_zero) 4571 return arch_atomic64_inc_not_zero(v); 4572 #else 4573 return raw_atomic64_add_unless(v, 1, 0); 4574 #endif 4575 } 4576 4577 /** 4578 * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering 4579 * @v: pointer to atomic64_t 4580 * 4581 * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. 4582 * 4583 * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere. 4584 * 4585 * Return: @true if @v was updated, @false otherwise. 4586 */ 4587 static __always_inline bool 4588 raw_atomic64_inc_unless_negative(atomic64_t *v) 4589 { 4590 #if defined(arch_atomic64_inc_unless_negative) 4591 return arch_atomic64_inc_unless_negative(v); 4592 #else 4593 s64 c = raw_atomic64_read(v); 4594 4595 do { 4596 if (unlikely(c < 0)) 4597 return false; 4598 } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1)); 4599 4600 return true; 4601 #endif 4602 } 4603 4604 /** 4605 * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering 4606 * @v: pointer to atomic64_t 4607 * 4608 * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. 4609 * 4610 * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere. 4611 * 4612 * Return: @true if @v was updated, @false otherwise. 4613 */ 4614 static __always_inline bool 4615 raw_atomic64_dec_unless_positive(atomic64_t *v) 4616 { 4617 #if defined(arch_atomic64_dec_unless_positive) 4618 return arch_atomic64_dec_unless_positive(v); 4619 #else 4620 s64 c = raw_atomic64_read(v); 4621 4622 do { 4623 if (unlikely(c > 0)) 4624 return false; 4625 } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1)); 4626 4627 return true; 4628 #endif 4629 } 4630 4631 /** 4632 * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering 4633 * @v: pointer to atomic64_t 4634 * 4635 * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. 4636 * 4637 * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere. 4638 * 4639 * Return: The old value of (@v - 1), regardless of whether @v was updated. 4640 */ 4641 static __always_inline s64 4642 raw_atomic64_dec_if_positive(atomic64_t *v) 4643 { 4644 #if defined(arch_atomic64_dec_if_positive) 4645 return arch_atomic64_dec_if_positive(v); 4646 #else 4647 s64 dec, c = raw_atomic64_read(v); 4648 4649 do { 4650 dec = c - 1; 4651 if (unlikely(dec < 0)) 4652 break; 4653 } while (!raw_atomic64_try_cmpxchg(v, &c, dec)); 4654 4655 return dec; 4656 #endif 4657 } 4658 4659 #endif /* _LINUX_ATOMIC_FALLBACK_H */ 4660 // 202b45c7db600ce36198eb1f1fc2c2d5268ace2d 4661