1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_SPINLOCK_X86_64_H_
6 #define _RTE_SPINLOCK_X86_64_H_
7 
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11 
12 #include "generic/rte_spinlock.h"
13 #include "rte_rtm.h"
14 #include "rte_cpuflags.h"
15 #include "rte_branch_prediction.h"
16 #include "rte_common.h"
17 #include "rte_pause.h"
18 #include "rte_cycles.h"
19 
20 #define RTE_RTM_MAX_RETRIES (20)
21 #define RTE_XABORT_LOCK_BUSY (0xff)
22 
23 #ifndef RTE_FORCE_INTRINSICS
24 static inline void
rte_spinlock_lock(rte_spinlock_t * sl)25 rte_spinlock_lock(rte_spinlock_t *sl)
26 {
27 	int lock_val = 1;
28 	asm volatile (
29 			"1:\n"
30 			"xchg %[locked], %[lv]\n"
31 			"test %[lv], %[lv]\n"
32 			"jz 3f\n"
33 			"2:\n"
34 			"pause\n"
35 			"cmpl $0, %[locked]\n"
36 			"jnz 2b\n"
37 			"jmp 1b\n"
38 			"3:\n"
39 			: [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
40 			: "[lv]" (lock_val)
41 			: "memory");
42 }
43 
44 static inline void
rte_spinlock_unlock(rte_spinlock_t * sl)45 rte_spinlock_unlock (rte_spinlock_t *sl)
46 {
47 	int unlock_val = 0;
48 	asm volatile (
49 			"xchg %[locked], %[ulv]\n"
50 			: [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
51 			: "[ulv]" (unlock_val)
52 			: "memory");
53 }
54 
55 static inline int
rte_spinlock_trylock(rte_spinlock_t * sl)56 rte_spinlock_trylock (rte_spinlock_t *sl)
57 {
58 	int lockval = 1;
59 
60 	asm volatile (
61 			"xchg %[locked], %[lockval]"
62 			: [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
63 			: "[lockval]" (lockval)
64 			: "memory");
65 
66 	return lockval == 0;
67 }
68 #endif
69 
70 extern uint8_t rte_rtm_supported;
71 
rte_tm_supported(void)72 static inline int rte_tm_supported(void)
73 {
74 	return rte_rtm_supported;
75 }
76 
77 static inline int
rte_try_tm(volatile int * lock)78 rte_try_tm(volatile int *lock)
79 {
80 	int i, retries;
81 
82 	if (!rte_rtm_supported)
83 		return 0;
84 
85 	retries = RTE_RTM_MAX_RETRIES;
86 
87 	while (likely(retries--)) {
88 
89 		unsigned int status = rte_xbegin();
90 
91 		if (likely(RTE_XBEGIN_STARTED == status)) {
92 			if (unlikely(*lock))
93 				rte_xabort(RTE_XABORT_LOCK_BUSY);
94 			else
95 				return 1;
96 		}
97 		while (*lock)
98 			rte_pause();
99 
100 		if ((status & RTE_XABORT_CONFLICT) ||
101 		   ((status & RTE_XABORT_EXPLICIT) &&
102 		    (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
103 			/* add a small delay before retrying, basing the
104 			 * delay on the number of times we've already tried,
105 			 * to give a back-off type of behaviour. We
106 			 * randomize trycount by taking bits from the tsc count
107 			 */
108 			int try_count = RTE_RTM_MAX_RETRIES - retries;
109 			int pause_count = (rte_rdtsc() & 0x7) | 1;
110 			pause_count <<= try_count;
111 			for (i = 0; i < pause_count; i++)
112 				rte_pause();
113 			continue;
114 		}
115 
116 		if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
117 			break;
118 	}
119 	return 0;
120 }
121 
122 static inline void
rte_spinlock_lock_tm(rte_spinlock_t * sl)123 rte_spinlock_lock_tm(rte_spinlock_t *sl)
124 {
125 	if (likely(rte_try_tm(&sl->locked)))
126 		return;
127 
128 	rte_spinlock_lock(sl); /* fall-back */
129 }
130 
131 static inline int
rte_spinlock_trylock_tm(rte_spinlock_t * sl)132 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
133 {
134 	if (likely(rte_try_tm(&sl->locked)))
135 		return 1;
136 
137 	return rte_spinlock_trylock(sl);
138 }
139 
140 static inline void
rte_spinlock_unlock_tm(rte_spinlock_t * sl)141 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
142 {
143 	if (unlikely(sl->locked))
144 		rte_spinlock_unlock(sl);
145 	else
146 		rte_xend();
147 }
148 
149 static inline void
rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t * slr)150 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
151 {
152 	if (likely(rte_try_tm(&slr->sl.locked)))
153 		return;
154 
155 	rte_spinlock_recursive_lock(slr); /* fall-back */
156 }
157 
158 static inline void
rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t * slr)159 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
160 {
161 	if (unlikely(slr->sl.locked))
162 		rte_spinlock_recursive_unlock(slr);
163 	else
164 		rte_xend();
165 }
166 
167 static inline int
rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t * slr)168 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
169 {
170 	if (likely(rte_try_tm(&slr->sl.locked)))
171 		return 1;
172 
173 	return rte_spinlock_recursive_trylock(slr);
174 }
175 
176 
177 #ifdef __cplusplus
178 }
179 #endif
180 
181 #endif /* _RTE_SPINLOCK_X86_64_H_ */
182