1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30 #ifndef _MACHINE_ATOMIC_H_
31 #define _MACHINE_ATOMIC_H_
32
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36
37 /*
38 * To express interprocessor (as opposed to processor and device) memory
39 * ordering constraints, use the atomic_*() functions with acquire and release
40 * semantics rather than the *mb() functions. An architecture's memory
41 * ordering (or memory consistency) model governs the order in which a
42 * program's accesses to different locations may be performed by an
43 * implementation of that architecture. In general, for memory regions
44 * defined as writeback cacheable, the memory ordering implemented by amd64
45 * processors preserves the program ordering of a load followed by a load, a
46 * load followed by a store, and a store followed by a store. Only a store
47 * followed by a load to a different memory location may be reordered.
48 * Therefore, except for special cases, like non-temporal memory accesses or
49 * memory regions defined as write combining, the memory ordering effects
50 * provided by the sfence instruction in the wmb() function and the lfence
51 * instruction in the rmb() function are redundant. In contrast, the
52 * atomic_*() functions with acquire and release semantics do not perform
53 * redundant instructions for ordinary cases of interprocessor memory
54 * ordering on any architecture.
55 */
56 #define mb() __asm __volatile("mfence;" : : : "memory")
57 #define wmb() __asm __volatile("sfence;" : : : "memory")
58 #define rmb() __asm __volatile("lfence;" : : : "memory")
59
60 #ifdef _KERNEL
61 /*
62 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
63 *
64 * The open-coded number is used instead of the symbolic expression to
65 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
66 * An assertion in amd64/vm_machdep.c ensures that the value is correct.
67 */
68 #define OFFSETOF_MONITORBUF 0x100
69 #endif
70
71 #ifndef SAN_RUNTIME
72 #if defined(KASAN)
73 #define ATOMIC_SAN_PREFIX kasan
74 #elif defined(KCSAN)
75 #define ATOMIC_SAN_PREFIX kcsan
76 #endif
77 #endif
78
79 #ifdef ATOMIC_SAN_PREFIX
80 #include <sys/atomic_san.h>
81 #else
82 #include <sys/atomic_common.h>
83
84 /*
85 * Various simple operations on memory, each of which is atomic in the
86 * presence of interrupts and multiple processors.
87 *
88 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
89 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
90 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
91 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
92 *
93 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
94 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
95 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
96 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
97 *
98 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
99 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
100 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
101 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
102 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
103 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
104 *
105 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
106 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
107 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
108 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
109 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
110 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
111 */
112
113 #if !defined(__GNUCLIKE_ASM)
114 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
115 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
116 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
117
118 int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
119 int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
120 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
121 int atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
122 int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
123 int atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
124 u_short src);
125 int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
126 int atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
127 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
128 u_long atomic_fetchadd_long(volatile u_long *p, u_long v);
129 int atomic_testandset_int(volatile u_int *p, u_int v);
130 int atomic_testandset_long(volatile u_long *p, u_int v);
131 int atomic_testandclear_int(volatile u_int *p, u_int v);
132 int atomic_testandclear_long(volatile u_long *p, u_int v);
133 void atomic_thread_fence_acq(void);
134 void atomic_thread_fence_acq_rel(void);
135 void atomic_thread_fence_rel(void);
136 void atomic_thread_fence_seq_cst(void);
137
138 #define ATOMIC_LOAD(TYPE) \
139 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
140 #define ATOMIC_STORE(TYPE) \
141 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
142
143 #else /* !__GNUCLIKE_ASM */
144
145 /*
146 * Always use lock prefixes. The result is slighly less optimal for
147 * UP systems, but it matters less now, and sometimes UP is emulated
148 * over SMP.
149 *
150 * The assembly is volatilized to avoid code chunk removal by the compiler.
151 * GCC aggressively reorders operations and memory clobbering is necessary
152 * in order to avoid that for memory barriers.
153 */
154 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
155 static __inline void \
156 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
157 { \
158 __asm __volatile("lock; " OP \
159 : "+m" (*p) \
160 : CONS (V) \
161 : "cc"); \
162 } \
163 \
164 static __inline void \
165 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
166 { \
167 __asm __volatile("lock; " OP \
168 : "+m" (*p) \
169 : CONS (V) \
170 : "memory", "cc"); \
171 } \
172 struct __hack
173
174 /*
175 * Atomic compare and set, used by the mutex functions.
176 *
177 * cmpset:
178 * if (*dst == expect)
179 * *dst = src
180 *
181 * fcmpset:
182 * if (*dst == *expect)
183 * *dst = src
184 * else
185 * *expect = *dst
186 *
187 * Returns 0 on failure, non-zero on success.
188 */
189 #define ATOMIC_CMPSET(TYPE) \
190 static __inline int \
191 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
192 { \
193 u_char res; \
194 \
195 __asm __volatile( \
196 " lock; cmpxchg %3,%1 ; " \
197 "# atomic_cmpset_" #TYPE " " \
198 : "=@cce" (res), /* 0 */ \
199 "+m" (*dst), /* 1 */ \
200 "+a" (expect) /* 2 */ \
201 : "r" (src) /* 3 */ \
202 : "memory", "cc"); \
203 return (res); \
204 } \
205 \
206 static __inline int \
207 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
208 { \
209 u_char res; \
210 \
211 __asm __volatile( \
212 " lock; cmpxchg %3,%1 ; " \
213 "# atomic_fcmpset_" #TYPE " " \
214 : "=@cce" (res), /* 0 */ \
215 "+m" (*dst), /* 1 */ \
216 "+a" (*expect) /* 2 */ \
217 : "r" (src) /* 3 */ \
218 : "memory", "cc"); \
219 return (res); \
220 }
221
222 ATOMIC_CMPSET(char);
223 ATOMIC_CMPSET(short);
224 ATOMIC_CMPSET(int);
225 ATOMIC_CMPSET(long);
226
227 /*
228 * Atomically add the value of v to the integer pointed to by p and return
229 * the previous value of *p.
230 */
231 static __inline u_int
atomic_fetchadd_int(volatile u_int * p,u_int v)232 atomic_fetchadd_int(volatile u_int *p, u_int v)
233 {
234
235 __asm __volatile(
236 " lock; xaddl %0,%1 ; "
237 "# atomic_fetchadd_int"
238 : "+r" (v), /* 0 */
239 "+m" (*p) /* 1 */
240 : : "cc");
241 return (v);
242 }
243
244 /*
245 * Atomically add the value of v to the long integer pointed to by p and return
246 * the previous value of *p.
247 */
248 static __inline u_long
atomic_fetchadd_long(volatile u_long * p,u_long v)249 atomic_fetchadd_long(volatile u_long *p, u_long v)
250 {
251
252 __asm __volatile(
253 " lock; xaddq %0,%1 ; "
254 "# atomic_fetchadd_long"
255 : "+r" (v), /* 0 */
256 "+m" (*p) /* 1 */
257 : : "cc");
258 return (v);
259 }
260
261 static __inline int
atomic_testandset_int(volatile u_int * p,u_int v)262 atomic_testandset_int(volatile u_int *p, u_int v)
263 {
264 u_char res;
265
266 __asm __volatile(
267 " lock; btsl %2,%1 ; "
268 "# atomic_testandset_int"
269 : "=@ccc" (res), /* 0 */
270 "+m" (*p) /* 1 */
271 : "Ir" (v & 0x1f) /* 2 */
272 : "cc");
273 return (res);
274 }
275
276 static __inline int
atomic_testandset_long(volatile u_long * p,u_int v)277 atomic_testandset_long(volatile u_long *p, u_int v)
278 {
279 u_char res;
280
281 __asm __volatile(
282 " lock; btsq %2,%1 ; "
283 "# atomic_testandset_long"
284 : "=@ccc" (res), /* 0 */
285 "+m" (*p) /* 1 */
286 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
287 : "cc");
288 return (res);
289 }
290
291 static __inline int
atomic_testandclear_int(volatile u_int * p,u_int v)292 atomic_testandclear_int(volatile u_int *p, u_int v)
293 {
294 u_char res;
295
296 __asm __volatile(
297 " lock; btrl %2,%1 ; "
298 "# atomic_testandclear_int"
299 : "=@ccc" (res), /* 0 */
300 "+m" (*p) /* 1 */
301 : "Ir" (v & 0x1f) /* 2 */
302 : "cc");
303 return (res);
304 }
305
306 static __inline int
atomic_testandclear_long(volatile u_long * p,u_int v)307 atomic_testandclear_long(volatile u_long *p, u_int v)
308 {
309 u_char res;
310
311 __asm __volatile(
312 " lock; btrq %2,%1 ; "
313 "# atomic_testandclear_long"
314 : "=@ccc" (res), /* 0 */
315 "+m" (*p) /* 1 */
316 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
317 : "cc");
318 return (res);
319 }
320
321 /*
322 * We assume that a = b will do atomic loads and stores. Due to the
323 * IA32 memory model, a simple store guarantees release semantics.
324 *
325 * However, a load may pass a store if they are performed on distinct
326 * addresses, so we need a Store/Load barrier for sequentially
327 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a
328 * Store/Load barrier, as recommended by the AMD Software Optimization
329 * Guide, and not mfence. To avoid false data dependencies, we use a
330 * special address for "mem". In the kernel, we use a private per-cpu
331 * cache line. In user space, we use a word in the stack's red zone
332 * (-8(%rsp)).
333 */
334
335 static __inline void
__storeload_barrier(void)336 __storeload_barrier(void)
337 {
338 #if defined(_KERNEL)
339 __asm __volatile("lock; addl $0,%%gs:%0"
340 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
341 #else /* !_KERNEL */
342 __asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
343 #endif /* _KERNEL*/
344 }
345
346 #define ATOMIC_LOAD(TYPE) \
347 static __inline u_##TYPE \
348 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
349 { \
350 u_##TYPE res; \
351 \
352 res = *p; \
353 __compiler_membar(); \
354 return (res); \
355 } \
356 struct __hack
357
358 #define ATOMIC_STORE(TYPE) \
359 static __inline void \
360 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
361 { \
362 \
363 __compiler_membar(); \
364 *p = v; \
365 } \
366 struct __hack
367
368 static __inline void
atomic_thread_fence_acq(void)369 atomic_thread_fence_acq(void)
370 {
371
372 __compiler_membar();
373 }
374
375 static __inline void
atomic_thread_fence_rel(void)376 atomic_thread_fence_rel(void)
377 {
378
379 __compiler_membar();
380 }
381
382 static __inline void
atomic_thread_fence_acq_rel(void)383 atomic_thread_fence_acq_rel(void)
384 {
385
386 __compiler_membar();
387 }
388
389 static __inline void
atomic_thread_fence_seq_cst(void)390 atomic_thread_fence_seq_cst(void)
391 {
392
393 __storeload_barrier();
394 }
395
396 #endif /* !__GNUCLIKE_ASM */
397
398 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
399 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
400 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
401 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
402
403 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
404 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
405 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
406 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
407
408 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
409 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
410 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
411 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
412
413 ATOMIC_ASM(set, long, "orq %1,%0", "er", v);
414 ATOMIC_ASM(clear, long, "andq %1,%0", "er", ~v);
415 ATOMIC_ASM(add, long, "addq %1,%0", "er", v);
416 ATOMIC_ASM(subtract, long, "subq %1,%0", "er", v);
417
418 #define ATOMIC_LOADSTORE(TYPE) \
419 ATOMIC_LOAD(TYPE); \
420 ATOMIC_STORE(TYPE)
421
422 ATOMIC_LOADSTORE(char);
423 ATOMIC_LOADSTORE(short);
424 ATOMIC_LOADSTORE(int);
425 ATOMIC_LOADSTORE(long);
426
427 #undef ATOMIC_ASM
428 #undef ATOMIC_LOAD
429 #undef ATOMIC_STORE
430 #undef ATOMIC_LOADSTORE
431 #ifndef WANT_FUNCTIONS
432
433 /* Read the current value and store a new value in the destination. */
434 #ifdef __GNUCLIKE_ASM
435
436 static __inline u_int
atomic_swap_int(volatile u_int * p,u_int v)437 atomic_swap_int(volatile u_int *p, u_int v)
438 {
439
440 __asm __volatile(
441 " xchgl %1,%0 ; "
442 "# atomic_swap_int"
443 : "+r" (v), /* 0 */
444 "+m" (*p)); /* 1 */
445 return (v);
446 }
447
448 static __inline u_long
atomic_swap_long(volatile u_long * p,u_long v)449 atomic_swap_long(volatile u_long *p, u_long v)
450 {
451
452 __asm __volatile(
453 " xchgq %1,%0 ; "
454 "# atomic_swap_long"
455 : "+r" (v), /* 0 */
456 "+m" (*p)); /* 1 */
457 return (v);
458 }
459
460 #else /* !__GNUCLIKE_ASM */
461
462 u_int atomic_swap_int(volatile u_int *p, u_int v);
463 u_long atomic_swap_long(volatile u_long *p, u_long v);
464
465 #endif /* __GNUCLIKE_ASM */
466
467 #define atomic_set_acq_char atomic_set_barr_char
468 #define atomic_set_rel_char atomic_set_barr_char
469 #define atomic_clear_acq_char atomic_clear_barr_char
470 #define atomic_clear_rel_char atomic_clear_barr_char
471 #define atomic_add_acq_char atomic_add_barr_char
472 #define atomic_add_rel_char atomic_add_barr_char
473 #define atomic_subtract_acq_char atomic_subtract_barr_char
474 #define atomic_subtract_rel_char atomic_subtract_barr_char
475 #define atomic_cmpset_acq_char atomic_cmpset_char
476 #define atomic_cmpset_rel_char atomic_cmpset_char
477 #define atomic_fcmpset_acq_char atomic_fcmpset_char
478 #define atomic_fcmpset_rel_char atomic_fcmpset_char
479
480 #define atomic_set_acq_short atomic_set_barr_short
481 #define atomic_set_rel_short atomic_set_barr_short
482 #define atomic_clear_acq_short atomic_clear_barr_short
483 #define atomic_clear_rel_short atomic_clear_barr_short
484 #define atomic_add_acq_short atomic_add_barr_short
485 #define atomic_add_rel_short atomic_add_barr_short
486 #define atomic_subtract_acq_short atomic_subtract_barr_short
487 #define atomic_subtract_rel_short atomic_subtract_barr_short
488 #define atomic_cmpset_acq_short atomic_cmpset_short
489 #define atomic_cmpset_rel_short atomic_cmpset_short
490 #define atomic_fcmpset_acq_short atomic_fcmpset_short
491 #define atomic_fcmpset_rel_short atomic_fcmpset_short
492
493 #define atomic_set_acq_int atomic_set_barr_int
494 #define atomic_set_rel_int atomic_set_barr_int
495 #define atomic_clear_acq_int atomic_clear_barr_int
496 #define atomic_clear_rel_int atomic_clear_barr_int
497 #define atomic_add_acq_int atomic_add_barr_int
498 #define atomic_add_rel_int atomic_add_barr_int
499 #define atomic_subtract_acq_int atomic_subtract_barr_int
500 #define atomic_subtract_rel_int atomic_subtract_barr_int
501 #define atomic_cmpset_acq_int atomic_cmpset_int
502 #define atomic_cmpset_rel_int atomic_cmpset_int
503 #define atomic_fcmpset_acq_int atomic_fcmpset_int
504 #define atomic_fcmpset_rel_int atomic_fcmpset_int
505
506 #define atomic_set_acq_long atomic_set_barr_long
507 #define atomic_set_rel_long atomic_set_barr_long
508 #define atomic_clear_acq_long atomic_clear_barr_long
509 #define atomic_clear_rel_long atomic_clear_barr_long
510 #define atomic_add_acq_long atomic_add_barr_long
511 #define atomic_add_rel_long atomic_add_barr_long
512 #define atomic_subtract_acq_long atomic_subtract_barr_long
513 #define atomic_subtract_rel_long atomic_subtract_barr_long
514 #define atomic_cmpset_acq_long atomic_cmpset_long
515 #define atomic_cmpset_rel_long atomic_cmpset_long
516 #define atomic_fcmpset_acq_long atomic_fcmpset_long
517 #define atomic_fcmpset_rel_long atomic_fcmpset_long
518
519 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
520 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
521 #define atomic_testandset_acq_long atomic_testandset_long
522
523 /* Operations on 8-bit bytes. */
524 #define atomic_set_8 atomic_set_char
525 #define atomic_set_acq_8 atomic_set_acq_char
526 #define atomic_set_rel_8 atomic_set_rel_char
527 #define atomic_clear_8 atomic_clear_char
528 #define atomic_clear_acq_8 atomic_clear_acq_char
529 #define atomic_clear_rel_8 atomic_clear_rel_char
530 #define atomic_add_8 atomic_add_char
531 #define atomic_add_acq_8 atomic_add_acq_char
532 #define atomic_add_rel_8 atomic_add_rel_char
533 #define atomic_subtract_8 atomic_subtract_char
534 #define atomic_subtract_acq_8 atomic_subtract_acq_char
535 #define atomic_subtract_rel_8 atomic_subtract_rel_char
536 #define atomic_load_acq_8 atomic_load_acq_char
537 #define atomic_store_rel_8 atomic_store_rel_char
538 #define atomic_cmpset_8 atomic_cmpset_char
539 #define atomic_cmpset_acq_8 atomic_cmpset_acq_char
540 #define atomic_cmpset_rel_8 atomic_cmpset_rel_char
541 #define atomic_fcmpset_8 atomic_fcmpset_char
542 #define atomic_fcmpset_acq_8 atomic_fcmpset_acq_char
543 #define atomic_fcmpset_rel_8 atomic_fcmpset_rel_char
544
545 /* Operations on 16-bit words. */
546 #define atomic_set_16 atomic_set_short
547 #define atomic_set_acq_16 atomic_set_acq_short
548 #define atomic_set_rel_16 atomic_set_rel_short
549 #define atomic_clear_16 atomic_clear_short
550 #define atomic_clear_acq_16 atomic_clear_acq_short
551 #define atomic_clear_rel_16 atomic_clear_rel_short
552 #define atomic_add_16 atomic_add_short
553 #define atomic_add_acq_16 atomic_add_acq_short
554 #define atomic_add_rel_16 atomic_add_rel_short
555 #define atomic_subtract_16 atomic_subtract_short
556 #define atomic_subtract_acq_16 atomic_subtract_acq_short
557 #define atomic_subtract_rel_16 atomic_subtract_rel_short
558 #define atomic_load_acq_16 atomic_load_acq_short
559 #define atomic_store_rel_16 atomic_store_rel_short
560 #define atomic_cmpset_16 atomic_cmpset_short
561 #define atomic_cmpset_acq_16 atomic_cmpset_acq_short
562 #define atomic_cmpset_rel_16 atomic_cmpset_rel_short
563 #define atomic_fcmpset_16 atomic_fcmpset_short
564 #define atomic_fcmpset_acq_16 atomic_fcmpset_acq_short
565 #define atomic_fcmpset_rel_16 atomic_fcmpset_rel_short
566
567 /* Operations on 32-bit double words. */
568 #define atomic_set_32 atomic_set_int
569 #define atomic_set_acq_32 atomic_set_acq_int
570 #define atomic_set_rel_32 atomic_set_rel_int
571 #define atomic_clear_32 atomic_clear_int
572 #define atomic_clear_acq_32 atomic_clear_acq_int
573 #define atomic_clear_rel_32 atomic_clear_rel_int
574 #define atomic_add_32 atomic_add_int
575 #define atomic_add_acq_32 atomic_add_acq_int
576 #define atomic_add_rel_32 atomic_add_rel_int
577 #define atomic_subtract_32 atomic_subtract_int
578 #define atomic_subtract_acq_32 atomic_subtract_acq_int
579 #define atomic_subtract_rel_32 atomic_subtract_rel_int
580 #define atomic_load_acq_32 atomic_load_acq_int
581 #define atomic_store_rel_32 atomic_store_rel_int
582 #define atomic_cmpset_32 atomic_cmpset_int
583 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
584 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
585 #define atomic_fcmpset_32 atomic_fcmpset_int
586 #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
587 #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
588 #define atomic_swap_32 atomic_swap_int
589 #define atomic_readandclear_32 atomic_readandclear_int
590 #define atomic_fetchadd_32 atomic_fetchadd_int
591 #define atomic_testandset_32 atomic_testandset_int
592 #define atomic_testandclear_32 atomic_testandclear_int
593
594 /* Operations on 64-bit quad words. */
595 #define atomic_set_64 atomic_set_long
596 #define atomic_set_acq_64 atomic_set_acq_long
597 #define atomic_set_rel_64 atomic_set_rel_long
598 #define atomic_clear_64 atomic_clear_long
599 #define atomic_clear_acq_64 atomic_clear_acq_long
600 #define atomic_clear_rel_64 atomic_clear_rel_long
601 #define atomic_add_64 atomic_add_long
602 #define atomic_add_acq_64 atomic_add_acq_long
603 #define atomic_add_rel_64 atomic_add_rel_long
604 #define atomic_subtract_64 atomic_subtract_long
605 #define atomic_subtract_acq_64 atomic_subtract_acq_long
606 #define atomic_subtract_rel_64 atomic_subtract_rel_long
607 #define atomic_load_acq_64 atomic_load_acq_long
608 #define atomic_store_rel_64 atomic_store_rel_long
609 #define atomic_cmpset_64 atomic_cmpset_long
610 #define atomic_cmpset_acq_64 atomic_cmpset_acq_long
611 #define atomic_cmpset_rel_64 atomic_cmpset_rel_long
612 #define atomic_fcmpset_64 atomic_fcmpset_long
613 #define atomic_fcmpset_acq_64 atomic_fcmpset_acq_long
614 #define atomic_fcmpset_rel_64 atomic_fcmpset_rel_long
615 #define atomic_swap_64 atomic_swap_long
616 #define atomic_readandclear_64 atomic_readandclear_long
617 #define atomic_fetchadd_64 atomic_fetchadd_long
618 #define atomic_testandset_64 atomic_testandset_long
619 #define atomic_testandclear_64 atomic_testandclear_long
620
621 /* Operations on pointers. */
622 #define atomic_set_ptr atomic_set_long
623 #define atomic_set_acq_ptr atomic_set_acq_long
624 #define atomic_set_rel_ptr atomic_set_rel_long
625 #define atomic_clear_ptr atomic_clear_long
626 #define atomic_clear_acq_ptr atomic_clear_acq_long
627 #define atomic_clear_rel_ptr atomic_clear_rel_long
628 #define atomic_add_ptr atomic_add_long
629 #define atomic_add_acq_ptr atomic_add_acq_long
630 #define atomic_add_rel_ptr atomic_add_rel_long
631 #define atomic_subtract_ptr atomic_subtract_long
632 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
633 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
634 #define atomic_load_acq_ptr atomic_load_acq_long
635 #define atomic_store_rel_ptr atomic_store_rel_long
636 #define atomic_cmpset_ptr atomic_cmpset_long
637 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
638 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
639 #define atomic_fcmpset_ptr atomic_fcmpset_long
640 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
641 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
642 #define atomic_swap_ptr atomic_swap_long
643 #define atomic_readandclear_ptr atomic_readandclear_long
644
645 #endif /* !WANT_FUNCTIONS */
646
647 #endif /* !ATOMIC_SAN_PREFIX */
648
649 #endif /* !_MACHINE_ATOMIC_H_ */
650