1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4 
5 #ifndef _RTE_TICKETLOCK_H_
6 #define _RTE_TICKETLOCK_H_
7 
8 /**
9  * @file
10  *
11  * RTE ticket locks
12  *
13  * This file defines an API for ticket locks, which give each waiting
14  * thread a ticket and take the lock one by one, first come, first
15  * serviced.
16  *
17  * All locks must be initialised before use, and only initialised once.
18  *
19  */
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 
25 #include <rte_common.h>
26 #include <rte_lcore.h>
27 #include <rte_pause.h>
28 
29 /**
30  * The rte_ticketlock_t type.
31  */
32 typedef union {
33 	uint32_t tickets;
34 	struct {
35 		uint16_t current;
36 		uint16_t next;
37 	} s;
38 } rte_ticketlock_t;
39 
40 /**
41  * A static ticketlock initializer.
42  */
43 #define RTE_TICKETLOCK_INITIALIZER { 0 }
44 
45 /**
46  * Initialize the ticketlock to an unlocked state.
47  *
48  * @param tl
49  *   A pointer to the ticketlock.
50  */
51 static inline void
rte_ticketlock_init(rte_ticketlock_t * tl)52 rte_ticketlock_init(rte_ticketlock_t *tl)
53 {
54 	__atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED);
55 }
56 
57 /**
58  * Take the ticketlock.
59  *
60  * @param tl
61  *   A pointer to the ticketlock.
62  */
63 static inline void
rte_ticketlock_lock(rte_ticketlock_t * tl)64 rte_ticketlock_lock(rte_ticketlock_t *tl)
65 {
66 	uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);
67 	rte_wait_until_equal_16(&tl->s.current, me, __ATOMIC_ACQUIRE);
68 }
69 
70 /**
71  * Release the ticketlock.
72  *
73  * @param tl
74  *   A pointer to the ticketlock.
75  */
76 static inline void
rte_ticketlock_unlock(rte_ticketlock_t * tl)77 rte_ticketlock_unlock(rte_ticketlock_t *tl)
78 {
79 	uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);
80 	__atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);
81 }
82 
83 /**
84  * Try to take the lock.
85  *
86  * @param tl
87  *   A pointer to the ticketlock.
88  * @return
89  *   1 if the lock is successfully taken; 0 otherwise.
90  */
91 static inline int
rte_ticketlock_trylock(rte_ticketlock_t * tl)92 rte_ticketlock_trylock(rte_ticketlock_t *tl)
93 {
94 	rte_ticketlock_t old, new;
95 	old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
96 	new.tickets = old.tickets;
97 	new.s.next++;
98 	if (old.s.next == old.s.current) {
99 		if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets,
100 		    new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
101 			return 1;
102 	}
103 
104 	return 0;
105 }
106 
107 /**
108  * Test if the lock is taken.
109  *
110  * @param tl
111  *   A pointer to the ticketlock.
112  * @return
113  *   1 if the lock is currently taken; 0 otherwise.
114  */
115 static inline int
rte_ticketlock_is_locked(rte_ticketlock_t * tl)116 rte_ticketlock_is_locked(rte_ticketlock_t *tl)
117 {
118 	rte_ticketlock_t tic;
119 	tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE);
120 	return (tic.s.current != tic.s.next);
121 }
122 
123 /**
124  * The rte_ticketlock_recursive_t type.
125  */
126 #define TICKET_LOCK_INVALID_ID -1
127 
128 typedef struct {
129 	rte_ticketlock_t tl; /**< the actual ticketlock */
130 	int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */
131 	unsigned int count; /**< count of time this lock has been called */
132 } rte_ticketlock_recursive_t;
133 
134 /**
135  * A static recursive ticketlock initializer.
136  */
137 #define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \
138 					      TICKET_LOCK_INVALID_ID, 0}
139 
140 /**
141  * Initialize the recursive ticketlock to an unlocked state.
142  *
143  * @param tlr
144  *   A pointer to the recursive ticketlock.
145  */
146 static inline void
rte_ticketlock_recursive_init(rte_ticketlock_recursive_t * tlr)147 rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)
148 {
149 	rte_ticketlock_init(&tlr->tl);
150 	__atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED);
151 	tlr->count = 0;
152 }
153 
154 /**
155  * Take the recursive ticketlock.
156  *
157  * @param tlr
158  *   A pointer to the recursive ticketlock.
159  */
160 static inline void
rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t * tlr)161 rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr)
162 {
163 	int id = rte_gettid();
164 
165 	if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
166 		rte_ticketlock_lock(&tlr->tl);
167 		__atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
168 	}
169 	tlr->count++;
170 }
171 
172 /**
173  * Release the recursive ticketlock.
174  *
175  * @param tlr
176  *   A pointer to the recursive ticketlock.
177  */
178 static inline void
rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t * tlr)179 rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)
180 {
181 	if (--(tlr->count) == 0) {
182 		__atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID,
183 				 __ATOMIC_RELAXED);
184 		rte_ticketlock_unlock(&tlr->tl);
185 	}
186 }
187 
188 /**
189  * Try to take the recursive lock.
190  *
191  * @param tlr
192  *   A pointer to the recursive ticketlock.
193  * @return
194  *   1 if the lock is successfully taken; 0 otherwise.
195  */
196 static inline int
rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t * tlr)197 rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr)
198 {
199 	int id = rte_gettid();
200 
201 	if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
202 		if (rte_ticketlock_trylock(&tlr->tl) == 0)
203 			return 0;
204 		__atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
205 	}
206 	tlr->count++;
207 	return 1;
208 }
209 
210 #ifdef __cplusplus
211 }
212 #endif
213 
214 #endif /* _RTE_TICKETLOCK_H_ */
215