xref: /f-stack/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c (revision d30ea906)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Netronome Systems, Inc.
3  * All rights reserved.
4  */
5 
6 #include <errno.h>
7 
8 #include <malloc.h>
9 #include <time.h>
10 #include <sched.h>
11 
12 #include "nfp_cpp.h"
13 #include "nfp6000/nfp6000.h"
14 
15 #define MUTEX_LOCKED(interface)  ((((uint32_t)(interface)) << 16) | 0x000f)
16 #define MUTEX_UNLOCK(interface)  (0                               | 0x0000)
17 
18 #define MUTEX_IS_LOCKED(value)   (((value) & 0xffff) == 0x000f)
19 #define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000)
20 #define MUTEX_INTERFACE(value)   (((value) >> 16) & 0xffff)
21 
22 /*
23  * If you need more than 65536 recursive locks, please
24  * rethink your code.
25  */
26 #define MUTEX_DEPTH_MAX         0xffff
27 
28 struct nfp_cpp_mutex {
29 	struct nfp_cpp *cpp;
30 	uint8_t target;
31 	uint16_t depth;
32 	unsigned long long address;
33 	uint32_t key;
34 	unsigned int usage;
35 	struct nfp_cpp_mutex *prev, *next;
36 };
37 
38 static int
_nfp_cpp_mutex_validate(uint32_t model,int * target,unsigned long long address)39 _nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address)
40 {
41 	/* Address must be 64-bit aligned */
42 	if (address & 7)
43 		return NFP_ERRNO(EINVAL);
44 
45 	if (NFP_CPP_MODEL_IS_6000(model)) {
46 		if (*target != NFP_CPP_TARGET_MU)
47 			return NFP_ERRNO(EINVAL);
48 	} else {
49 		return NFP_ERRNO(EINVAL);
50 	}
51 
52 	return 0;
53 }
54 
55 /*
56  * Initialize a mutex location
57  *
58  * The CPP target:address must point to a 64-bit aligned location, and
59  * will initialize 64 bits of data at the location.
60  *
61  * This creates the initial mutex state, as locked by this
62  * nfp_cpp_interface().
63  *
64  * This function should only be called when setting up
65  * the initial lock state upon boot-up of the system.
66  *
67  * @param mutex     NFP CPP Mutex handle
68  * @param target    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
69  *		    NFP_CPP_TARGET_MU)
70  * @param address   Offset into the address space of the NFP CPP target ID
71  * @param key       Unique 32-bit value for this mutex
72  *
73  * @return 0 on success, or -1 on failure (and set errno accordingly).
74  */
75 int
nfp_cpp_mutex_init(struct nfp_cpp * cpp,int target,unsigned long long address,uint32_t key)76 nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address,
77 		   uint32_t key)
78 {
79 	uint32_t model = nfp_cpp_model(cpp);
80 	uint32_t muw = NFP_CPP_ID(target, 4, 0);	/* atomic_write */
81 	int err;
82 
83 	err = _nfp_cpp_mutex_validate(model, &target, address);
84 	if (err < 0)
85 		return err;
86 
87 	err = nfp_cpp_writel(cpp, muw, address + 4, key);
88 	if (err < 0)
89 		return err;
90 
91 	err =
92 	    nfp_cpp_writel(cpp, muw, address + 0,
93 			   MUTEX_LOCKED(nfp_cpp_interface(cpp)));
94 	if (err < 0)
95 		return err;
96 
97 	return 0;
98 }
99 
100 /*
101  * Create a mutex handle from an address controlled by a MU Atomic engine
102  *
103  * The CPP target:address must point to a 64-bit aligned location, and
104  * reserve 64 bits of data at the location for use by the handle.
105  *
106  * Only target/address pairs that point to entities that support the
107  * MU Atomic Engine are supported.
108  *
109  * @param cpp       NFP CPP handle
110  * @param target    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
111  *		    NFP_CPP_TARGET_MU)
112  * @param address   Offset into the address space of the NFP CPP target ID
113  * @param key       32-bit unique key (must match the key at this location)
114  *
115  * @return      A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
116  */
117 struct nfp_cpp_mutex *
nfp_cpp_mutex_alloc(struct nfp_cpp * cpp,int target,unsigned long long address,uint32_t key)118 nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
119 		     unsigned long long address, uint32_t key)
120 {
121 	uint32_t model = nfp_cpp_model(cpp);
122 	struct nfp_cpp_mutex *mutex;
123 	uint32_t mur = NFP_CPP_ID(target, 3, 0);	/* atomic_read */
124 	int err;
125 	uint32_t tmp;
126 
127 	/* Look for cached mutex */
128 	for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) {
129 		if (mutex->target == target && mutex->address == address)
130 			break;
131 	}
132 
133 	if (mutex) {
134 		if (mutex->key == key) {
135 			mutex->usage++;
136 			return mutex;
137 		}
138 
139 		/* If the key doesn't match... */
140 		return NFP_ERRPTR(EEXIST);
141 	}
142 
143 	err = _nfp_cpp_mutex_validate(model, &target, address);
144 	if (err < 0)
145 		return NULL;
146 
147 	err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
148 	if (err < 0)
149 		return NULL;
150 
151 	if (tmp != key)
152 		return NFP_ERRPTR(EEXIST);
153 
154 	mutex = calloc(sizeof(*mutex), 1);
155 	if (!mutex)
156 		return NFP_ERRPTR(ENOMEM);
157 
158 	mutex->cpp = cpp;
159 	mutex->target = target;
160 	mutex->address = address;
161 	mutex->key = key;
162 	mutex->depth = 0;
163 	mutex->usage = 1;
164 
165 	/* Add mutex to the cache */
166 	if (cpp->mutex_cache) {
167 		cpp->mutex_cache->prev = mutex;
168 		mutex->next = cpp->mutex_cache;
169 		cpp->mutex_cache = mutex;
170 	} else {
171 		cpp->mutex_cache = mutex;
172 	}
173 
174 	return mutex;
175 }
176 
177 struct nfp_cpp *
nfp_cpp_mutex_cpp(struct nfp_cpp_mutex * mutex)178 nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex)
179 {
180 	return mutex->cpp;
181 }
182 
183 uint32_t
nfp_cpp_mutex_key(struct nfp_cpp_mutex * mutex)184 nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex)
185 {
186 	return mutex->key;
187 }
188 
189 uint16_t
nfp_cpp_mutex_owner(struct nfp_cpp_mutex * mutex)190 nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex)
191 {
192 	uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0);	/* atomic_read */
193 	uint32_t value, key;
194 	int err;
195 
196 	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
197 	if (err < 0)
198 		return err;
199 
200 	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
201 	if (err < 0)
202 		return err;
203 
204 	if (key != mutex->key)
205 		return NFP_ERRNO(EPERM);
206 
207 	if (!MUTEX_IS_LOCKED(value))
208 		return 0;
209 
210 	return MUTEX_INTERFACE(value);
211 }
212 
213 int
nfp_cpp_mutex_target(struct nfp_cpp_mutex * mutex)214 nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex)
215 {
216 	return mutex->target;
217 }
218 
219 uint64_t
nfp_cpp_mutex_address(struct nfp_cpp_mutex * mutex)220 nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex)
221 {
222 	return mutex->address;
223 }
224 
225 /*
226  * Free a mutex handle - does not alter the lock state
227  *
228  * @param mutex     NFP CPP Mutex handle
229  */
230 void
nfp_cpp_mutex_free(struct nfp_cpp_mutex * mutex)231 nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
232 {
233 	mutex->usage--;
234 	if (mutex->usage > 0)
235 		return;
236 
237 	/* Remove mutex from the cache */
238 	if (mutex->next)
239 		mutex->next->prev = mutex->prev;
240 	if (mutex->prev)
241 		mutex->prev->next = mutex->next;
242 
243 	/* If mutex->cpp == NULL, something broke */
244 	if (mutex->cpp && mutex == mutex->cpp->mutex_cache)
245 		mutex->cpp->mutex_cache = mutex->next;
246 
247 	free(mutex);
248 }
249 
250 /*
251  * Lock a mutex handle, using the NFP MU Atomic Engine
252  *
253  * @param mutex     NFP CPP Mutex handle
254  *
255  * @return 0 on success, or -1 on failure (and set errno accordingly).
256  */
257 int
nfp_cpp_mutex_lock(struct nfp_cpp_mutex * mutex)258 nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
259 {
260 	int err;
261 	time_t warn_at = time(NULL) + 15;
262 
263 	while ((err = nfp_cpp_mutex_trylock(mutex)) != 0) {
264 		/* If errno != EBUSY, then the lock was damaged */
265 		if (err < 0 && errno != EBUSY)
266 			return err;
267 		if (time(NULL) >= warn_at) {
268 			printf("Warning: waiting for NFP mutex\n");
269 			printf("\tusage:%u\n", mutex->usage);
270 			printf("\tdepth:%hd]\n", mutex->depth);
271 			printf("\ttarget:%d\n", mutex->target);
272 			printf("\taddr:%llx\n", mutex->address);
273 			printf("\tkey:%08x]\n", mutex->key);
274 			warn_at = time(NULL) + 60;
275 		}
276 		sched_yield();
277 	}
278 	return 0;
279 }
280 
281 /*
282  * Unlock a mutex handle, using the NFP MU Atomic Engine
283  *
284  * @param mutex     NFP CPP Mutex handle
285  *
286  * @return 0 on success, or -1 on failure (and set errno accordingly).
287  */
288 int
nfp_cpp_mutex_unlock(struct nfp_cpp_mutex * mutex)289 nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
290 {
291 	uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0);	/* atomic_write */
292 	uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0);	/* atomic_read */
293 	struct nfp_cpp *cpp = mutex->cpp;
294 	uint32_t key, value;
295 	uint16_t interface = nfp_cpp_interface(cpp);
296 	int err;
297 
298 	if (mutex->depth > 1) {
299 		mutex->depth--;
300 		return 0;
301 	}
302 
303 	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
304 	if (err < 0)
305 		goto exit;
306 
307 	err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
308 	if (err < 0)
309 		goto exit;
310 
311 	if (key != mutex->key) {
312 		err = NFP_ERRNO(EPERM);
313 		goto exit;
314 	}
315 
316 	if (value != MUTEX_LOCKED(interface)) {
317 		err = NFP_ERRNO(EACCES);
318 		goto exit;
319 	}
320 
321 	err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface));
322 	if (err < 0)
323 		goto exit;
324 
325 	mutex->depth = 0;
326 
327 exit:
328 	return err;
329 }
330 
331 /*
332  * Attempt to lock a mutex handle, using the NFP MU Atomic Engine
333  *
334  * Valid lock states:
335  *
336  *      0x....0000      - Unlocked
337  *      0x....000f      - Locked
338  *
339  * @param mutex     NFP CPP Mutex handle
340  * @return      0 if the lock succeeded, -1 on failure (and errno set
341  *		appropriately).
342  */
343 int
nfp_cpp_mutex_trylock(struct nfp_cpp_mutex * mutex)344 nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
345 {
346 	uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0);	/* atomic_read */
347 	uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0);	/* atomic_write */
348 	uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3);	/* test_set_imm */
349 	uint32_t key, value, tmp;
350 	struct nfp_cpp *cpp = mutex->cpp;
351 	int err;
352 
353 	if (mutex->depth > 0) {
354 		if (mutex->depth == MUTEX_DEPTH_MAX)
355 			return NFP_ERRNO(E2BIG);
356 
357 		mutex->depth++;
358 		return 0;
359 	}
360 
361 	/* Verify that the lock marker is not damaged */
362 	err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
363 	if (err < 0)
364 		goto exit;
365 
366 	if (key != mutex->key) {
367 		err = NFP_ERRNO(EPERM);
368 		goto exit;
369 	}
370 
371 	/*
372 	 * Compare against the unlocked state, and if true,
373 	 * write the interface id into the top 16 bits, and
374 	 * mark as locked.
375 	 */
376 	value = MUTEX_LOCKED(nfp_cpp_interface(cpp));
377 
378 	/*
379 	 * We use test_set_imm here, as it implies a read
380 	 * of the current state, and sets the bits in the
381 	 * bytemask of the command to 1s. Since the mutex
382 	 * is guaranteed to be 64-bit aligned, the bytemask
383 	 * of this 32-bit command is ensured to be 8'b00001111,
384 	 * which implies that the lower 4 bits will be set to
385 	 * ones regardless of the initial state.
386 	 *
387 	 * Since this is a 'Readback' operation, with no Pull
388 	 * data, we can treat this as a normal Push (read)
389 	 * atomic, which returns the original value.
390 	 */
391 	err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
392 	if (err < 0)
393 		goto exit;
394 
395 	/* Was it unlocked? */
396 	if (MUTEX_IS_UNLOCKED(tmp)) {
397 		/*
398 		 * The read value can only be 0x....0000 in the unlocked state.
399 		 * If there was another contending for this lock, then
400 		 * the lock state would be 0x....000f
401 		 *
402 		 * Write our owner ID into the lock
403 		 * While not strictly necessary, this helps with
404 		 * debug and bookkeeping.
405 		 */
406 		err = nfp_cpp_writel(cpp, muw, mutex->address, value);
407 		if (err < 0)
408 			goto exit;
409 
410 		mutex->depth = 1;
411 		goto exit;
412 	}
413 
414 	/* Already locked by us? Success! */
415 	if (tmp == value) {
416 		mutex->depth = 1;
417 		goto exit;
418 	}
419 
420 	err = NFP_ERRNO(MUTEX_IS_LOCKED(tmp) ? EBUSY : EINVAL);
421 
422 exit:
423 	return err;
424 }
425