1 /*-
2 * Copyright (c) 2016-2017 Mellanox Technologies, Ltd.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28 #ifndef _LINUXKPI_ASM_ATOMIC64_H_
29 #define _LINUXKPI_ASM_ATOMIC64_H_
30
31 #include <linux/compiler.h>
32 #include <sys/types.h>
33 #include <machine/atomic.h>
34
35 typedef struct {
36 volatile int64_t counter;
37 } atomic64_t;
38 #define ATOMIC64_INIT(x) { .counter = (x) }
39
40 /*------------------------------------------------------------------------*
41 * 64-bit atomic operations
42 *------------------------------------------------------------------------*/
43
44 #define atomic64_add(i, v) atomic64_add_return((i), (v))
45 #define atomic64_sub(i, v) atomic64_sub_return((i), (v))
46 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
47 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
48 #define atomic64_add_and_test(i, v) (atomic64_add_return((i), (v)) == 0)
49 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
50 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
51 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
52 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
53 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
54
55 static inline int64_t
atomic64_fetch_add(int64_t i,atomic64_t * v)56 atomic64_fetch_add(int64_t i, atomic64_t *v)
57 {
58 return (atomic_fetchadd_64(&v->counter, i));
59 }
60
61 static inline int64_t
atomic64_add_return(int64_t i,atomic64_t * v)62 atomic64_add_return(int64_t i, atomic64_t *v)
63 {
64 return i + atomic_fetchadd_64(&v->counter, i);
65 }
66
67 static inline int64_t
atomic64_sub_return(int64_t i,atomic64_t * v)68 atomic64_sub_return(int64_t i, atomic64_t *v)
69 {
70 return atomic_fetchadd_64(&v->counter, -i) - i;
71 }
72
73 static inline void
atomic64_set(atomic64_t * v,int64_t i)74 atomic64_set(atomic64_t *v, int64_t i)
75 {
76 atomic_store_rel_64(&v->counter, i);
77 }
78
79 static inline int64_t
atomic64_read(atomic64_t * v)80 atomic64_read(atomic64_t *v)
81 {
82 return READ_ONCE(v->counter);
83 }
84
85 static inline int64_t
atomic64_inc(atomic64_t * v)86 atomic64_inc(atomic64_t *v)
87 {
88 return atomic_fetchadd_64(&v->counter, 1) + 1;
89 }
90
91 static inline int64_t
atomic64_dec(atomic64_t * v)92 atomic64_dec(atomic64_t *v)
93 {
94 return atomic_fetchadd_64(&v->counter, -1) - 1;
95 }
96
97 static inline int64_t
atomic64_add_unless(atomic64_t * v,int64_t a,int64_t u)98 atomic64_add_unless(atomic64_t *v, int64_t a, int64_t u)
99 {
100 int64_t c = atomic64_read(v);
101
102 for (;;) {
103 if (unlikely(c == u))
104 break;
105 if (likely(atomic_fcmpset_64(&v->counter, &c, c + a)))
106 break;
107 }
108 return (c != u);
109 }
110
111 static inline int64_t
atomic64_fetch_add_unless(atomic64_t * v,int64_t a,int64_t u)112 atomic64_fetch_add_unless(atomic64_t *v, int64_t a, int64_t u)
113 {
114 int64_t c = atomic64_read(v);
115
116 for (;;) {
117 if (unlikely(c == u))
118 break;
119 if (likely(atomic_fcmpset_64(&v->counter, &c, c + a)))
120 break;
121 }
122 return (c);
123 }
124
125 static inline int64_t
atomic64_xchg(atomic64_t * v,int64_t i)126 atomic64_xchg(atomic64_t *v, int64_t i)
127 {
128 #if !((defined(__mips__) && !(defined(__mips_n32) || defined(__mips_n64))) || \
129 (defined(__powerpc__) && !defined(__powerpc64__)))
130 return (atomic_swap_64(&v->counter, i));
131 #else
132 int64_t ret = atomic64_read(v);
133
134 while (!atomic_fcmpset_64(&v->counter, &ret, i))
135 ;
136 return (ret);
137 #endif
138 }
139
140 static inline int64_t
atomic64_cmpxchg(atomic64_t * v,int64_t old,int64_t new)141 atomic64_cmpxchg(atomic64_t *v, int64_t old, int64_t new)
142 {
143 int64_t ret = old;
144
145 for (;;) {
146 if (atomic_fcmpset_64(&v->counter, &ret, new))
147 break;
148 if (ret != old)
149 break;
150 }
151 return (ret);
152 }
153
154 #endif /* _LINUXKPI_ASM_ATOMIC64_H_ */
155