xref: /f-stack/freebsd/sys/atomic_common.h (revision 22ce4aff)
1*22ce4affSfengbojiang /*-
2*22ce4affSfengbojiang  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*22ce4affSfengbojiang  *
4*22ce4affSfengbojiang  * Copyright (c) 2017 The FreeBSD Foundation
5*22ce4affSfengbojiang  * All rights reserved.
6*22ce4affSfengbojiang  *
7*22ce4affSfengbojiang  * This software was developed by Konstantin Belousov <[email protected]>
8*22ce4affSfengbojiang  * under sponsorship from the FreeBSD Foundation.
9*22ce4affSfengbojiang  *
10*22ce4affSfengbojiang  * Redistribution and use in source and binary forms, with or without
11*22ce4affSfengbojiang  * modification, are permitted provided that the following conditions
12*22ce4affSfengbojiang  * are met:
13*22ce4affSfengbojiang  * 1. Redistributions of source code must retain the above copyright
14*22ce4affSfengbojiang  *    notice, this list of conditions and the following disclaimer.
15*22ce4affSfengbojiang  * 2. Redistributions in binary form must reproduce the above copyright
16*22ce4affSfengbojiang  *    notice, this list of conditions and the following disclaimer in the
17*22ce4affSfengbojiang  *    documentation and/or other materials provided with the distribution.
18*22ce4affSfengbojiang  *
19*22ce4affSfengbojiang  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20*22ce4affSfengbojiang  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21*22ce4affSfengbojiang  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22*22ce4affSfengbojiang  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23*22ce4affSfengbojiang  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24*22ce4affSfengbojiang  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25*22ce4affSfengbojiang  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26*22ce4affSfengbojiang  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27*22ce4affSfengbojiang  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28*22ce4affSfengbojiang  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29*22ce4affSfengbojiang  * SUCH DAMAGE.
30*22ce4affSfengbojiang  *
31*22ce4affSfengbojiang  * $FreeBSD$
32*22ce4affSfengbojiang  */
33*22ce4affSfengbojiang #ifndef _SYS_ATOMIC_COMMON_H_
34*22ce4affSfengbojiang #define	_SYS_ATOMIC_COMMON_H_
35*22ce4affSfengbojiang 
36*22ce4affSfengbojiang #ifndef _MACHINE_ATOMIC_H_
37*22ce4affSfengbojiang #error do not include this header, use machine/atomic.h
38*22ce4affSfengbojiang #endif
39*22ce4affSfengbojiang 
40*22ce4affSfengbojiang #define	atomic_load_char(p)	(*(volatile u_char *)(p))
41*22ce4affSfengbojiang #define	atomic_load_short(p)	(*(volatile u_short *)(p))
42*22ce4affSfengbojiang #define	atomic_load_int(p)	(*(volatile u_int *)(p))
43*22ce4affSfengbojiang #define	atomic_load_long(p)	(*(volatile u_long *)(p))
44*22ce4affSfengbojiang #define	atomic_load_ptr(p)	(*(volatile __typeof(*p) *)(p))
45*22ce4affSfengbojiang #define	atomic_load_8(p)	(*(volatile uint8_t *)(p))
46*22ce4affSfengbojiang #define	atomic_load_16(p)	(*(volatile uint16_t *)(p))
47*22ce4affSfengbojiang #define	atomic_load_32(p)	(*(volatile uint32_t *)(p))
48*22ce4affSfengbojiang #ifdef _LP64
49*22ce4affSfengbojiang #define	atomic_load_64(p)	(*(volatile uint64_t *)(p))
50*22ce4affSfengbojiang #endif
51*22ce4affSfengbojiang 
52*22ce4affSfengbojiang #define	atomic_store_char(p, v)		\
53*22ce4affSfengbojiang     (*(volatile u_char *)(p) = (u_char)(v))
54*22ce4affSfengbojiang #define	atomic_store_short(p, v)		\
55*22ce4affSfengbojiang     (*(volatile u_short *)(p) = (u_short)(v))
56*22ce4affSfengbojiang #define	atomic_store_int(p, v)		\
57*22ce4affSfengbojiang     (*(volatile u_int *)(p) = (u_int)(v))
58*22ce4affSfengbojiang #define	atomic_store_long(p, v)		\
59*22ce4affSfengbojiang     (*(volatile u_long *)(p) = (u_long)(v))
60*22ce4affSfengbojiang #define	atomic_store_ptr(p, v)		\
61*22ce4affSfengbojiang     (*(volatile __typeof(*p) *)(p) = (v))
62*22ce4affSfengbojiang #define	atomic_store_8(p, v)		\
63*22ce4affSfengbojiang     (*(volatile uint8_t *)(p) = (uint8_t)(v))
64*22ce4affSfengbojiang #define	atomic_store_16(p, v)		\
65*22ce4affSfengbojiang     (*(volatile uint16_t *)(p) = (uint16_t)(v))
66*22ce4affSfengbojiang #define	atomic_store_32(p, v)		\
67*22ce4affSfengbojiang     (*(volatile uint32_t *)(p) = (uint32_t)(v))
68*22ce4affSfengbojiang #ifdef _LP64
69*22ce4affSfengbojiang #define	atomic_store_64(p, v)		\
70*22ce4affSfengbojiang     (*(volatile uint64_t *)(p) = (uint64_t)(v))
71*22ce4affSfengbojiang #endif
72*22ce4affSfengbojiang 
73*22ce4affSfengbojiang /*
74*22ce4affSfengbojiang  * Currently all architectures provide acquire and release fences on their own,
75*22ce4affSfengbojiang  * but they don't provide consume. Kludge below allows relevant code to stop
76*22ce4affSfengbojiang  * openly resorting to the stronger acquire fence, to be sorted out.
77*22ce4affSfengbojiang  */
78*22ce4affSfengbojiang #define	atomic_load_consume_ptr(p)	\
79*22ce4affSfengbojiang     ((__typeof(*p)) atomic_load_acq_ptr((uintptr_t *)p))
80*22ce4affSfengbojiang 
81*22ce4affSfengbojiang #define	atomic_interrupt_fence()	__compiler_membar()
82*22ce4affSfengbojiang 
83*22ce4affSfengbojiang #endif
84