1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1998 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30 #ifndef _MACHINE_ATOMIC_H_
31 #define _MACHINE_ATOMIC_H_
32
33 #ifndef _SYS_CDEFS_H_
34 #error this file needs sys/cdefs.h as a prerequisite
35 #endif
36
37 /*
38 * To express interprocessor (as opposed to processor and device) memory
39 * ordering constraints, use the atomic_*() functions with acquire and release
40 * semantics rather than the *mb() functions. An architecture's memory
41 * ordering (or memory consistency) model governs the order in which a
42 * program's accesses to different locations may be performed by an
43 * implementation of that architecture. In general, for memory regions
44 * defined as writeback cacheable, the memory ordering implemented by amd64
45 * processors preserves the program ordering of a load followed by a load, a
46 * load followed by a store, and a store followed by a store. Only a store
47 * followed by a load to a different memory location may be reordered.
48 * Therefore, except for special cases, like non-temporal memory accesses or
49 * memory regions defined as write combining, the memory ordering effects
50 * provided by the sfence instruction in the wmb() function and the lfence
51 * instruction in the rmb() function are redundant. In contrast, the
52 * atomic_*() functions with acquire and release semantics do not perform
53 * redundant instructions for ordinary cases of interprocessor memory
54 * ordering on any architecture.
55 */
56 #define mb() __asm __volatile("mfence;" : : : "memory")
57 #define wmb() __asm __volatile("sfence;" : : : "memory")
58 #define rmb() __asm __volatile("lfence;" : : : "memory")
59
60 #ifdef _KERNEL
61 /*
62 * OFFSETOF_MONITORBUF == __pcpu_offset(pc_monitorbuf).
63 *
64 * The open-coded number is used instead of the symbolic expression to
65 * avoid a dependency on sys/pcpu.h in machine/atomic.h consumers.
66 * An assertion in amd64/vm_machdep.c ensures that the value is correct.
67 */
68 #define OFFSETOF_MONITORBUF 0x100
69 #endif
70
71 #if defined(KCSAN) && !defined(KCSAN_RUNTIME)
72 #include <sys/_cscan_atomic.h>
73 #else
74 #include <sys/atomic_common.h>
75
76 /*
77 * Various simple operations on memory, each of which is atomic in the
78 * presence of interrupts and multiple processors.
79 *
80 * atomic_set_char(P, V) (*(u_char *)(P) |= (V))
81 * atomic_clear_char(P, V) (*(u_char *)(P) &= ~(V))
82 * atomic_add_char(P, V) (*(u_char *)(P) += (V))
83 * atomic_subtract_char(P, V) (*(u_char *)(P) -= (V))
84 *
85 * atomic_set_short(P, V) (*(u_short *)(P) |= (V))
86 * atomic_clear_short(P, V) (*(u_short *)(P) &= ~(V))
87 * atomic_add_short(P, V) (*(u_short *)(P) += (V))
88 * atomic_subtract_short(P, V) (*(u_short *)(P) -= (V))
89 *
90 * atomic_set_int(P, V) (*(u_int *)(P) |= (V))
91 * atomic_clear_int(P, V) (*(u_int *)(P) &= ~(V))
92 * atomic_add_int(P, V) (*(u_int *)(P) += (V))
93 * atomic_subtract_int(P, V) (*(u_int *)(P) -= (V))
94 * atomic_swap_int(P, V) (return (*(u_int *)(P)); *(u_int *)(P) = (V);)
95 * atomic_readandclear_int(P) (return (*(u_int *)(P)); *(u_int *)(P) = 0;)
96 *
97 * atomic_set_long(P, V) (*(u_long *)(P) |= (V))
98 * atomic_clear_long(P, V) (*(u_long *)(P) &= ~(V))
99 * atomic_add_long(P, V) (*(u_long *)(P) += (V))
100 * atomic_subtract_long(P, V) (*(u_long *)(P) -= (V))
101 * atomic_swap_long(P, V) (return (*(u_long *)(P)); *(u_long *)(P) = (V);)
102 * atomic_readandclear_long(P) (return (*(u_long *)(P)); *(u_long *)(P) = 0;)
103 */
104
105 /*
106 * The above functions are expanded inline in the statically-linked
107 * kernel. Lock prefixes are generated if an SMP kernel is being
108 * built.
109 *
110 * Kernel modules call real functions which are built into the kernel.
111 * This allows kernel modules to be portable between UP and SMP systems.
112 */
113 #if !defined(__GNUCLIKE_ASM)
114 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
115 void atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v); \
116 void atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
117
118 int atomic_cmpset_char(volatile u_char *dst, u_char expect, u_char src);
119 int atomic_cmpset_short(volatile u_short *dst, u_short expect, u_short src);
120 int atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src);
121 int atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src);
122 int atomic_fcmpset_char(volatile u_char *dst, u_char *expect, u_char src);
123 int atomic_fcmpset_short(volatile u_short *dst, u_short *expect,
124 u_short src);
125 int atomic_fcmpset_int(volatile u_int *dst, u_int *expect, u_int src);
126 int atomic_fcmpset_long(volatile u_long *dst, u_long *expect, u_long src);
127 u_int atomic_fetchadd_int(volatile u_int *p, u_int v);
128 u_long atomic_fetchadd_long(volatile u_long *p, u_long v);
129 int atomic_testandset_int(volatile u_int *p, u_int v);
130 int atomic_testandset_long(volatile u_long *p, u_int v);
131 int atomic_testandclear_int(volatile u_int *p, u_int v);
132 int atomic_testandclear_long(volatile u_long *p, u_int v);
133 void atomic_thread_fence_acq(void);
134 void atomic_thread_fence_acq_rel(void);
135 void atomic_thread_fence_rel(void);
136 void atomic_thread_fence_seq_cst(void);
137
138 #define ATOMIC_LOAD(TYPE) \
139 u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p)
140 #define ATOMIC_STORE(TYPE) \
141 void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)
142
143 #else /* !KLD_MODULE && __GNUCLIKE_ASM */
144
145 /*
146 * For userland, always use lock prefixes so that the binaries will run
147 * on both SMP and !SMP systems.
148 */
149 #if defined(SMP) || !defined(_KERNEL) || defined(KLD_MODULE)
150 #define MPLOCKED "lock ; "
151 #else
152 #define MPLOCKED
153 #endif
154
155 /*
156 * The assembly is volatilized to avoid code chunk removal by the compiler.
157 * GCC aggressively reorders operations and memory clobbering is necessary
158 * in order to avoid that for memory barriers.
159 */
160 #define ATOMIC_ASM(NAME, TYPE, OP, CONS, V) \
161 static __inline void \
162 atomic_##NAME##_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
163 { \
164 __asm __volatile(MPLOCKED OP \
165 : "+m" (*p) \
166 : CONS (V) \
167 : "cc"); \
168 } \
169 \
170 static __inline void \
171 atomic_##NAME##_barr_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
172 { \
173 __asm __volatile(MPLOCKED OP \
174 : "+m" (*p) \
175 : CONS (V) \
176 : "memory", "cc"); \
177 } \
178 struct __hack
179
180 /*
181 * Atomic compare and set, used by the mutex functions.
182 *
183 * cmpset:
184 * if (*dst == expect)
185 * *dst = src
186 *
187 * fcmpset:
188 * if (*dst == *expect)
189 * *dst = src
190 * else
191 * *expect = *dst
192 *
193 * Returns 0 on failure, non-zero on success.
194 */
195 #define ATOMIC_CMPSET(TYPE) \
196 static __inline int \
197 atomic_cmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE expect, u_##TYPE src) \
198 { \
199 u_char res; \
200 \
201 __asm __volatile( \
202 " " MPLOCKED " " \
203 " cmpxchg %3,%1 ; " \
204 "# atomic_cmpset_" #TYPE " " \
205 : "=@cce" (res), /* 0 */ \
206 "+m" (*dst), /* 1 */ \
207 "+a" (expect) /* 2 */ \
208 : "r" (src) /* 3 */ \
209 : "memory", "cc"); \
210 return (res); \
211 } \
212 \
213 static __inline int \
214 atomic_fcmpset_##TYPE(volatile u_##TYPE *dst, u_##TYPE *expect, u_##TYPE src) \
215 { \
216 u_char res; \
217 \
218 __asm __volatile( \
219 " " MPLOCKED " " \
220 " cmpxchg %3,%1 ; " \
221 "# atomic_fcmpset_" #TYPE " " \
222 : "=@cce" (res), /* 0 */ \
223 "+m" (*dst), /* 1 */ \
224 "+a" (*expect) /* 2 */ \
225 : "r" (src) /* 3 */ \
226 : "memory", "cc"); \
227 return (res); \
228 }
229
230 ATOMIC_CMPSET(char);
231 ATOMIC_CMPSET(short);
232 ATOMIC_CMPSET(int);
233 ATOMIC_CMPSET(long);
234
235 /*
236 * Atomically add the value of v to the integer pointed to by p and return
237 * the previous value of *p.
238 */
239 static __inline u_int
atomic_fetchadd_int(volatile u_int * p,u_int v)240 atomic_fetchadd_int(volatile u_int *p, u_int v)
241 {
242
243 __asm __volatile(
244 " " MPLOCKED " "
245 " xaddl %0,%1 ; "
246 "# atomic_fetchadd_int"
247 : "+r" (v), /* 0 */
248 "+m" (*p) /* 1 */
249 : : "cc");
250 return (v);
251 }
252
253 /*
254 * Atomically add the value of v to the long integer pointed to by p and return
255 * the previous value of *p.
256 */
257 static __inline u_long
atomic_fetchadd_long(volatile u_long * p,u_long v)258 atomic_fetchadd_long(volatile u_long *p, u_long v)
259 {
260
261 __asm __volatile(
262 " " MPLOCKED " "
263 " xaddq %0,%1 ; "
264 "# atomic_fetchadd_long"
265 : "+r" (v), /* 0 */
266 "+m" (*p) /* 1 */
267 : : "cc");
268 return (v);
269 }
270
271 static __inline int
atomic_testandset_int(volatile u_int * p,u_int v)272 atomic_testandset_int(volatile u_int *p, u_int v)
273 {
274 u_char res;
275
276 __asm __volatile(
277 " " MPLOCKED " "
278 " btsl %2,%1 ; "
279 "# atomic_testandset_int"
280 : "=@ccc" (res), /* 0 */
281 "+m" (*p) /* 1 */
282 : "Ir" (v & 0x1f) /* 2 */
283 : "cc");
284 return (res);
285 }
286
287 static __inline int
atomic_testandset_long(volatile u_long * p,u_int v)288 atomic_testandset_long(volatile u_long *p, u_int v)
289 {
290 u_char res;
291
292 __asm __volatile(
293 " " MPLOCKED " "
294 " btsq %2,%1 ; "
295 "# atomic_testandset_long"
296 : "=@ccc" (res), /* 0 */
297 "+m" (*p) /* 1 */
298 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
299 : "cc");
300 return (res);
301 }
302
303 static __inline int
atomic_testandclear_int(volatile u_int * p,u_int v)304 atomic_testandclear_int(volatile u_int *p, u_int v)
305 {
306 u_char res;
307
308 __asm __volatile(
309 " " MPLOCKED " "
310 " btrl %2,%1 ; "
311 "# atomic_testandclear_int"
312 : "=@ccc" (res), /* 0 */
313 "+m" (*p) /* 1 */
314 : "Ir" (v & 0x1f) /* 2 */
315 : "cc");
316 return (res);
317 }
318
319 static __inline int
atomic_testandclear_long(volatile u_long * p,u_int v)320 atomic_testandclear_long(volatile u_long *p, u_int v)
321 {
322 u_char res;
323
324 __asm __volatile(
325 " " MPLOCKED " "
326 " btrq %2,%1 ; "
327 "# atomic_testandclear_long"
328 : "=@ccc" (res), /* 0 */
329 "+m" (*p) /* 1 */
330 : "Jr" ((u_long)(v & 0x3f)) /* 2 */
331 : "cc");
332 return (res);
333 }
334
335 /*
336 * We assume that a = b will do atomic loads and stores. Due to the
337 * IA32 memory model, a simple store guarantees release semantics.
338 *
339 * However, a load may pass a store if they are performed on distinct
340 * addresses, so we need a Store/Load barrier for sequentially
341 * consistent fences in SMP kernels. We use "lock addl $0,mem" for a
342 * Store/Load barrier, as recommended by the AMD Software Optimization
343 * Guide, and not mfence. To avoid false data dependencies, we use a
344 * special address for "mem". In the kernel, we use a private per-cpu
345 * cache line. In user space, we use a word in the stack's red zone
346 * (-8(%rsp)).
347 *
348 * For UP kernels, however, the memory of the single processor is
349 * always consistent, so we only need to stop the compiler from
350 * reordering accesses in a way that violates the semantics of acquire
351 * and release.
352 */
353
354 #if defined(_KERNEL)
355
356 #if defined(SMP) || defined(KLD_MODULE)
357 static __inline void
__storeload_barrier(void)358 __storeload_barrier(void)
359 {
360
361 __asm __volatile("lock; addl $0,%%gs:%0"
362 : "+m" (*(u_int *)OFFSETOF_MONITORBUF) : : "memory", "cc");
363 }
364 #else /* _KERNEL && UP */
365 static __inline void
__storeload_barrier(void)366 __storeload_barrier(void)
367 {
368
369 __compiler_membar();
370 }
371 #endif /* SMP */
372 #else /* !_KERNEL */
373 static __inline void
__storeload_barrier(void)374 __storeload_barrier(void)
375 {
376
377 __asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
378 }
379 #endif /* _KERNEL*/
380
381 #define ATOMIC_LOAD(TYPE) \
382 static __inline u_##TYPE \
383 atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
384 { \
385 u_##TYPE res; \
386 \
387 res = *p; \
388 __compiler_membar(); \
389 return (res); \
390 } \
391 struct __hack
392
393 #define ATOMIC_STORE(TYPE) \
394 static __inline void \
395 atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) \
396 { \
397 \
398 __compiler_membar(); \
399 *p = v; \
400 } \
401 struct __hack
402
403 static __inline void
atomic_thread_fence_acq(void)404 atomic_thread_fence_acq(void)
405 {
406
407 __compiler_membar();
408 }
409
410 static __inline void
atomic_thread_fence_rel(void)411 atomic_thread_fence_rel(void)
412 {
413
414 __compiler_membar();
415 }
416
417 static __inline void
atomic_thread_fence_acq_rel(void)418 atomic_thread_fence_acq_rel(void)
419 {
420
421 __compiler_membar();
422 }
423
424 static __inline void
atomic_thread_fence_seq_cst(void)425 atomic_thread_fence_seq_cst(void)
426 {
427
428 __storeload_barrier();
429 }
430
431 #endif /* KLD_MODULE || !__GNUCLIKE_ASM */
432
433 ATOMIC_ASM(set, char, "orb %b1,%0", "iq", v);
434 ATOMIC_ASM(clear, char, "andb %b1,%0", "iq", ~v);
435 ATOMIC_ASM(add, char, "addb %b1,%0", "iq", v);
436 ATOMIC_ASM(subtract, char, "subb %b1,%0", "iq", v);
437
438 ATOMIC_ASM(set, short, "orw %w1,%0", "ir", v);
439 ATOMIC_ASM(clear, short, "andw %w1,%0", "ir", ~v);
440 ATOMIC_ASM(add, short, "addw %w1,%0", "ir", v);
441 ATOMIC_ASM(subtract, short, "subw %w1,%0", "ir", v);
442
443 ATOMIC_ASM(set, int, "orl %1,%0", "ir", v);
444 ATOMIC_ASM(clear, int, "andl %1,%0", "ir", ~v);
445 ATOMIC_ASM(add, int, "addl %1,%0", "ir", v);
446 ATOMIC_ASM(subtract, int, "subl %1,%0", "ir", v);
447
448 ATOMIC_ASM(set, long, "orq %1,%0", "er", v);
449 ATOMIC_ASM(clear, long, "andq %1,%0", "er", ~v);
450 ATOMIC_ASM(add, long, "addq %1,%0", "er", v);
451 ATOMIC_ASM(subtract, long, "subq %1,%0", "er", v);
452
453 #define ATOMIC_LOADSTORE(TYPE) \
454 ATOMIC_LOAD(TYPE); \
455 ATOMIC_STORE(TYPE)
456
457 ATOMIC_LOADSTORE(char);
458 ATOMIC_LOADSTORE(short);
459 ATOMIC_LOADSTORE(int);
460 ATOMIC_LOADSTORE(long);
461
462 #undef ATOMIC_ASM
463 #undef ATOMIC_LOAD
464 #undef ATOMIC_STORE
465 #undef ATOMIC_LOADSTORE
466 #ifndef WANT_FUNCTIONS
467
468 /* Read the current value and store a new value in the destination. */
469 #ifdef __GNUCLIKE_ASM
470
471 static __inline u_int
atomic_swap_int(volatile u_int * p,u_int v)472 atomic_swap_int(volatile u_int *p, u_int v)
473 {
474
475 __asm __volatile(
476 " xchgl %1,%0 ; "
477 "# atomic_swap_int"
478 : "+r" (v), /* 0 */
479 "+m" (*p)); /* 1 */
480 return (v);
481 }
482
483 static __inline u_long
atomic_swap_long(volatile u_long * p,u_long v)484 atomic_swap_long(volatile u_long *p, u_long v)
485 {
486
487 __asm __volatile(
488 " xchgq %1,%0 ; "
489 "# atomic_swap_long"
490 : "+r" (v), /* 0 */
491 "+m" (*p)); /* 1 */
492 return (v);
493 }
494
495 #else /* !__GNUCLIKE_ASM */
496
497 u_int atomic_swap_int(volatile u_int *p, u_int v);
498 u_long atomic_swap_long(volatile u_long *p, u_long v);
499
500 #endif /* __GNUCLIKE_ASM */
501
502 #define atomic_set_acq_char atomic_set_barr_char
503 #define atomic_set_rel_char atomic_set_barr_char
504 #define atomic_clear_acq_char atomic_clear_barr_char
505 #define atomic_clear_rel_char atomic_clear_barr_char
506 #define atomic_add_acq_char atomic_add_barr_char
507 #define atomic_add_rel_char atomic_add_barr_char
508 #define atomic_subtract_acq_char atomic_subtract_barr_char
509 #define atomic_subtract_rel_char atomic_subtract_barr_char
510 #define atomic_cmpset_acq_char atomic_cmpset_char
511 #define atomic_cmpset_rel_char atomic_cmpset_char
512 #define atomic_fcmpset_acq_char atomic_fcmpset_char
513 #define atomic_fcmpset_rel_char atomic_fcmpset_char
514
515 #define atomic_set_acq_short atomic_set_barr_short
516 #define atomic_set_rel_short atomic_set_barr_short
517 #define atomic_clear_acq_short atomic_clear_barr_short
518 #define atomic_clear_rel_short atomic_clear_barr_short
519 #define atomic_add_acq_short atomic_add_barr_short
520 #define atomic_add_rel_short atomic_add_barr_short
521 #define atomic_subtract_acq_short atomic_subtract_barr_short
522 #define atomic_subtract_rel_short atomic_subtract_barr_short
523 #define atomic_cmpset_acq_short atomic_cmpset_short
524 #define atomic_cmpset_rel_short atomic_cmpset_short
525 #define atomic_fcmpset_acq_short atomic_fcmpset_short
526 #define atomic_fcmpset_rel_short atomic_fcmpset_short
527
528 #define atomic_set_acq_int atomic_set_barr_int
529 #define atomic_set_rel_int atomic_set_barr_int
530 #define atomic_clear_acq_int atomic_clear_barr_int
531 #define atomic_clear_rel_int atomic_clear_barr_int
532 #define atomic_add_acq_int atomic_add_barr_int
533 #define atomic_add_rel_int atomic_add_barr_int
534 #define atomic_subtract_acq_int atomic_subtract_barr_int
535 #define atomic_subtract_rel_int atomic_subtract_barr_int
536 #define atomic_cmpset_acq_int atomic_cmpset_int
537 #define atomic_cmpset_rel_int atomic_cmpset_int
538 #define atomic_fcmpset_acq_int atomic_fcmpset_int
539 #define atomic_fcmpset_rel_int atomic_fcmpset_int
540
541 #define atomic_set_acq_long atomic_set_barr_long
542 #define atomic_set_rel_long atomic_set_barr_long
543 #define atomic_clear_acq_long atomic_clear_barr_long
544 #define atomic_clear_rel_long atomic_clear_barr_long
545 #define atomic_add_acq_long atomic_add_barr_long
546 #define atomic_add_rel_long atomic_add_barr_long
547 #define atomic_subtract_acq_long atomic_subtract_barr_long
548 #define atomic_subtract_rel_long atomic_subtract_barr_long
549 #define atomic_cmpset_acq_long atomic_cmpset_long
550 #define atomic_cmpset_rel_long atomic_cmpset_long
551 #define atomic_fcmpset_acq_long atomic_fcmpset_long
552 #define atomic_fcmpset_rel_long atomic_fcmpset_long
553
554 #define atomic_readandclear_int(p) atomic_swap_int(p, 0)
555 #define atomic_readandclear_long(p) atomic_swap_long(p, 0)
556 #define atomic_testandset_acq_long atomic_testandset_long
557
558 /* Operations on 8-bit bytes. */
559 #define atomic_set_8 atomic_set_char
560 #define atomic_set_acq_8 atomic_set_acq_char
561 #define atomic_set_rel_8 atomic_set_rel_char
562 #define atomic_clear_8 atomic_clear_char
563 #define atomic_clear_acq_8 atomic_clear_acq_char
564 #define atomic_clear_rel_8 atomic_clear_rel_char
565 #define atomic_add_8 atomic_add_char
566 #define atomic_add_acq_8 atomic_add_acq_char
567 #define atomic_add_rel_8 atomic_add_rel_char
568 #define atomic_subtract_8 atomic_subtract_char
569 #define atomic_subtract_acq_8 atomic_subtract_acq_char
570 #define atomic_subtract_rel_8 atomic_subtract_rel_char
571 #define atomic_load_acq_8 atomic_load_acq_char
572 #define atomic_store_rel_8 atomic_store_rel_char
573 #define atomic_cmpset_8 atomic_cmpset_char
574 #define atomic_cmpset_acq_8 atomic_cmpset_acq_char
575 #define atomic_cmpset_rel_8 atomic_cmpset_rel_char
576 #define atomic_fcmpset_8 atomic_fcmpset_char
577 #define atomic_fcmpset_acq_8 atomic_fcmpset_acq_char
578 #define atomic_fcmpset_rel_8 atomic_fcmpset_rel_char
579
580 /* Operations on 16-bit words. */
581 #define atomic_set_16 atomic_set_short
582 #define atomic_set_acq_16 atomic_set_acq_short
583 #define atomic_set_rel_16 atomic_set_rel_short
584 #define atomic_clear_16 atomic_clear_short
585 #define atomic_clear_acq_16 atomic_clear_acq_short
586 #define atomic_clear_rel_16 atomic_clear_rel_short
587 #define atomic_add_16 atomic_add_short
588 #define atomic_add_acq_16 atomic_add_acq_short
589 #define atomic_add_rel_16 atomic_add_rel_short
590 #define atomic_subtract_16 atomic_subtract_short
591 #define atomic_subtract_acq_16 atomic_subtract_acq_short
592 #define atomic_subtract_rel_16 atomic_subtract_rel_short
593 #define atomic_load_acq_16 atomic_load_acq_short
594 #define atomic_store_rel_16 atomic_store_rel_short
595 #define atomic_cmpset_16 atomic_cmpset_short
596 #define atomic_cmpset_acq_16 atomic_cmpset_acq_short
597 #define atomic_cmpset_rel_16 atomic_cmpset_rel_short
598 #define atomic_fcmpset_16 atomic_fcmpset_short
599 #define atomic_fcmpset_acq_16 atomic_fcmpset_acq_short
600 #define atomic_fcmpset_rel_16 atomic_fcmpset_rel_short
601
602 /* Operations on 32-bit double words. */
603 #define atomic_set_32 atomic_set_int
604 #define atomic_set_acq_32 atomic_set_acq_int
605 #define atomic_set_rel_32 atomic_set_rel_int
606 #define atomic_clear_32 atomic_clear_int
607 #define atomic_clear_acq_32 atomic_clear_acq_int
608 #define atomic_clear_rel_32 atomic_clear_rel_int
609 #define atomic_add_32 atomic_add_int
610 #define atomic_add_acq_32 atomic_add_acq_int
611 #define atomic_add_rel_32 atomic_add_rel_int
612 #define atomic_subtract_32 atomic_subtract_int
613 #define atomic_subtract_acq_32 atomic_subtract_acq_int
614 #define atomic_subtract_rel_32 atomic_subtract_rel_int
615 #define atomic_load_acq_32 atomic_load_acq_int
616 #define atomic_store_rel_32 atomic_store_rel_int
617 #define atomic_cmpset_32 atomic_cmpset_int
618 #define atomic_cmpset_acq_32 atomic_cmpset_acq_int
619 #define atomic_cmpset_rel_32 atomic_cmpset_rel_int
620 #define atomic_fcmpset_32 atomic_fcmpset_int
621 #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int
622 #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int
623 #define atomic_swap_32 atomic_swap_int
624 #define atomic_readandclear_32 atomic_readandclear_int
625 #define atomic_fetchadd_32 atomic_fetchadd_int
626 #define atomic_testandset_32 atomic_testandset_int
627 #define atomic_testandclear_32 atomic_testandclear_int
628
629 /* Operations on 64-bit quad words. */
630 #define atomic_set_64 atomic_set_long
631 #define atomic_set_acq_64 atomic_set_acq_long
632 #define atomic_set_rel_64 atomic_set_rel_long
633 #define atomic_clear_64 atomic_clear_long
634 #define atomic_clear_acq_64 atomic_clear_acq_long
635 #define atomic_clear_rel_64 atomic_clear_rel_long
636 #define atomic_add_64 atomic_add_long
637 #define atomic_add_acq_64 atomic_add_acq_long
638 #define atomic_add_rel_64 atomic_add_rel_long
639 #define atomic_subtract_64 atomic_subtract_long
640 #define atomic_subtract_acq_64 atomic_subtract_acq_long
641 #define atomic_subtract_rel_64 atomic_subtract_rel_long
642 #define atomic_load_acq_64 atomic_load_acq_long
643 #define atomic_store_rel_64 atomic_store_rel_long
644 #define atomic_cmpset_64 atomic_cmpset_long
645 #define atomic_cmpset_acq_64 atomic_cmpset_acq_long
646 #define atomic_cmpset_rel_64 atomic_cmpset_rel_long
647 #define atomic_fcmpset_64 atomic_fcmpset_long
648 #define atomic_fcmpset_acq_64 atomic_fcmpset_acq_long
649 #define atomic_fcmpset_rel_64 atomic_fcmpset_rel_long
650 #define atomic_swap_64 atomic_swap_long
651 #define atomic_readandclear_64 atomic_readandclear_long
652 #define atomic_fetchadd_64 atomic_fetchadd_long
653 #define atomic_testandset_64 atomic_testandset_long
654 #define atomic_testandclear_64 atomic_testandclear_long
655
656 /* Operations on pointers. */
657 #define atomic_set_ptr atomic_set_long
658 #define atomic_set_acq_ptr atomic_set_acq_long
659 #define atomic_set_rel_ptr atomic_set_rel_long
660 #define atomic_clear_ptr atomic_clear_long
661 #define atomic_clear_acq_ptr atomic_clear_acq_long
662 #define atomic_clear_rel_ptr atomic_clear_rel_long
663 #define atomic_add_ptr atomic_add_long
664 #define atomic_add_acq_ptr atomic_add_acq_long
665 #define atomic_add_rel_ptr atomic_add_rel_long
666 #define atomic_subtract_ptr atomic_subtract_long
667 #define atomic_subtract_acq_ptr atomic_subtract_acq_long
668 #define atomic_subtract_rel_ptr atomic_subtract_rel_long
669 #define atomic_load_acq_ptr atomic_load_acq_long
670 #define atomic_store_rel_ptr atomic_store_rel_long
671 #define atomic_cmpset_ptr atomic_cmpset_long
672 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_long
673 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_long
674 #define atomic_fcmpset_ptr atomic_fcmpset_long
675 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_long
676 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_long
677 #define atomic_swap_ptr atomic_swap_long
678 #define atomic_readandclear_ptr atomic_readandclear_long
679
680 #endif /* !WANT_FUNCTIONS */
681
682 #endif /* KCSAN && !KCSAN_RUNTIME */
683
684 #endif /* !_MACHINE_ATOMIC_H_ */
685