1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #ifndef _RTE_SPINLOCK_H_
6 #define _RTE_SPINLOCK_H_
7
8 /**
9 * @file
10 *
11 * RTE Spinlocks
12 *
13 * This file defines an API for read-write locks, which are implemented
14 * in an architecture-specific way. This kind of lock simply waits in
15 * a loop repeatedly checking until the lock becomes available.
16 *
17 * All locks must be initialised before use, and only initialised once.
18 *
19 */
20
21 #include <rte_lcore.h>
22 #ifdef RTE_FORCE_INTRINSICS
23 #include <rte_common.h>
24 #endif
25 #include <rte_pause.h>
26
27 /**
28 * The rte_spinlock_t type.
29 */
30 typedef struct {
31 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
32 } rte_spinlock_t;
33
34 /**
35 * A static spinlock initializer.
36 */
37 #define RTE_SPINLOCK_INITIALIZER { 0 }
38
39 /**
40 * Initialize the spinlock to an unlocked state.
41 *
42 * @param sl
43 * A pointer to the spinlock.
44 */
45 static inline void
rte_spinlock_init(rte_spinlock_t * sl)46 rte_spinlock_init(rte_spinlock_t *sl)
47 {
48 sl->locked = 0;
49 }
50
51 /**
52 * Take the spinlock.
53 *
54 * @param sl
55 * A pointer to the spinlock.
56 */
57 static inline void
58 rte_spinlock_lock(rte_spinlock_t *sl);
59
60 #ifdef RTE_FORCE_INTRINSICS
61 static inline void
rte_spinlock_lock(rte_spinlock_t * sl)62 rte_spinlock_lock(rte_spinlock_t *sl)
63 {
64 int exp = 0;
65
66 while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
67 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
68 rte_wait_until_equal_32((volatile uint32_t *)&sl->locked,
69 0, __ATOMIC_RELAXED);
70 exp = 0;
71 }
72 }
73 #endif
74
75 /**
76 * Release the spinlock.
77 *
78 * @param sl
79 * A pointer to the spinlock.
80 */
81 static inline void
82 rte_spinlock_unlock (rte_spinlock_t *sl);
83
84 #ifdef RTE_FORCE_INTRINSICS
85 static inline void
rte_spinlock_unlock(rte_spinlock_t * sl)86 rte_spinlock_unlock (rte_spinlock_t *sl)
87 {
88 __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
89 }
90 #endif
91
92 /**
93 * Try to take the lock.
94 *
95 * @param sl
96 * A pointer to the spinlock.
97 * @return
98 * 1 if the lock is successfully taken; 0 otherwise.
99 */
100 __rte_warn_unused_result
101 static inline int
102 rte_spinlock_trylock (rte_spinlock_t *sl);
103
104 #ifdef RTE_FORCE_INTRINSICS
105 static inline int
rte_spinlock_trylock(rte_spinlock_t * sl)106 rte_spinlock_trylock (rte_spinlock_t *sl)
107 {
108 int exp = 0;
109 return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
110 0, /* disallow spurious failure */
111 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
112 }
113 #endif
114
115 /**
116 * Test if the lock is taken.
117 *
118 * @param sl
119 * A pointer to the spinlock.
120 * @return
121 * 1 if the lock is currently taken; 0 otherwise.
122 */
rte_spinlock_is_locked(rte_spinlock_t * sl)123 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
124 {
125 return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
126 }
127
128 /**
129 * Test if hardware transactional memory (lock elision) is supported
130 *
131 * @return
132 * 1 if the hardware transactional memory is supported; 0 otherwise.
133 */
134 static inline int rte_tm_supported(void);
135
136 /**
137 * Try to execute critical section in a hardware memory transaction,
138 * if it fails or not available take the spinlock.
139 *
140 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
141 * transaction always aborts the transaction since the CPU is not able to
142 * roll-back should the transaction fail. Therefore, hardware transactional
143 * locks are not advised to be used around rte_eth_rx_burst() and
144 * rte_eth_tx_burst() calls.
145 *
146 * @param sl
147 * A pointer to the spinlock.
148 */
149 static inline void
150 rte_spinlock_lock_tm(rte_spinlock_t *sl);
151
152 /**
153 * Commit hardware memory transaction or release the spinlock if
154 * the spinlock is used as a fall-back
155 *
156 * @param sl
157 * A pointer to the spinlock.
158 */
159 static inline void
160 rte_spinlock_unlock_tm(rte_spinlock_t *sl);
161
162 /**
163 * Try to execute critical section in a hardware memory transaction,
164 * if it fails or not available try to take the lock.
165 *
166 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
167 * transaction always aborts the transaction since the CPU is not able to
168 * roll-back should the transaction fail. Therefore, hardware transactional
169 * locks are not advised to be used around rte_eth_rx_burst() and
170 * rte_eth_tx_burst() calls.
171 *
172 * @param sl
173 * A pointer to the spinlock.
174 * @return
175 * 1 if the hardware memory transaction is successfully started
176 * or lock is successfully taken; 0 otherwise.
177 */
178 __rte_warn_unused_result
179 static inline int
180 rte_spinlock_trylock_tm(rte_spinlock_t *sl);
181
182 /**
183 * The rte_spinlock_recursive_t type.
184 */
185 typedef struct {
186 rte_spinlock_t sl; /**< the actual spinlock */
187 volatile int user; /**< core id using lock, -1 for unused */
188 volatile int count; /**< count of time this lock has been called */
189 } rte_spinlock_recursive_t;
190
191 /**
192 * A static recursive spinlock initializer.
193 */
194 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
195
196 /**
197 * Initialize the recursive spinlock to an unlocked state.
198 *
199 * @param slr
200 * A pointer to the recursive spinlock.
201 */
rte_spinlock_recursive_init(rte_spinlock_recursive_t * slr)202 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
203 {
204 rte_spinlock_init(&slr->sl);
205 slr->user = -1;
206 slr->count = 0;
207 }
208
209 /**
210 * Take the recursive spinlock.
211 *
212 * @param slr
213 * A pointer to the recursive spinlock.
214 */
rte_spinlock_recursive_lock(rte_spinlock_recursive_t * slr)215 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
216 {
217 int id = rte_gettid();
218
219 if (slr->user != id) {
220 rte_spinlock_lock(&slr->sl);
221 slr->user = id;
222 }
223 slr->count++;
224 }
225 /**
226 * Release the recursive spinlock.
227 *
228 * @param slr
229 * A pointer to the recursive spinlock.
230 */
rte_spinlock_recursive_unlock(rte_spinlock_recursive_t * slr)231 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
232 {
233 if (--(slr->count) == 0) {
234 slr->user = -1;
235 rte_spinlock_unlock(&slr->sl);
236 }
237
238 }
239
240 /**
241 * Try to take the recursive lock.
242 *
243 * @param slr
244 * A pointer to the recursive spinlock.
245 * @return
246 * 1 if the lock is successfully taken; 0 otherwise.
247 */
248 __rte_warn_unused_result
rte_spinlock_recursive_trylock(rte_spinlock_recursive_t * slr)249 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
250 {
251 int id = rte_gettid();
252
253 if (slr->user != id) {
254 if (rte_spinlock_trylock(&slr->sl) == 0)
255 return 0;
256 slr->user = id;
257 }
258 slr->count++;
259 return 1;
260 }
261
262
263 /**
264 * Try to execute critical section in a hardware memory transaction,
265 * if it fails or not available take the recursive spinlocks
266 *
267 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
268 * transaction always aborts the transaction since the CPU is not able to
269 * roll-back should the transaction fail. Therefore, hardware transactional
270 * locks are not advised to be used around rte_eth_rx_burst() and
271 * rte_eth_tx_burst() calls.
272 *
273 * @param slr
274 * A pointer to the recursive spinlock.
275 */
276 static inline void rte_spinlock_recursive_lock_tm(
277 rte_spinlock_recursive_t *slr);
278
279 /**
280 * Commit hardware memory transaction or release the recursive spinlock
281 * if the recursive spinlock is used as a fall-back
282 *
283 * @param slr
284 * A pointer to the recursive spinlock.
285 */
286 static inline void rte_spinlock_recursive_unlock_tm(
287 rte_spinlock_recursive_t *slr);
288
289 /**
290 * Try to execute critical section in a hardware memory transaction,
291 * if it fails or not available try to take the recursive lock
292 *
293 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
294 * transaction always aborts the transaction since the CPU is not able to
295 * roll-back should the transaction fail. Therefore, hardware transactional
296 * locks are not advised to be used around rte_eth_rx_burst() and
297 * rte_eth_tx_burst() calls.
298 *
299 * @param slr
300 * A pointer to the recursive spinlock.
301 * @return
302 * 1 if the hardware memory transaction is successfully started
303 * or lock is successfully taken; 0 otherwise.
304 */
305 __rte_warn_unused_result
306 static inline int rte_spinlock_recursive_trylock_tm(
307 rte_spinlock_recursive_t *slr);
308
309 #endif /* _RTE_SPINLOCK_H_ */
310