1a9643ea8Slogwang /*-
2a9643ea8Slogwang  *   BSD LICENSE
3a9643ea8Slogwang  *
4a9643ea8Slogwang  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
5a9643ea8Slogwang  *   All rights reserved.
6a9643ea8Slogwang  *
7a9643ea8Slogwang  *   Redistribution and use in source and binary forms, with or without
8a9643ea8Slogwang  *   modification, are permitted provided that the following conditions
9a9643ea8Slogwang  *   are met:
10a9643ea8Slogwang  *
11a9643ea8Slogwang  *     * Redistributions of source code must retain the above copyright
12a9643ea8Slogwang  *       notice, this list of conditions and the following disclaimer.
13a9643ea8Slogwang  *     * Redistributions in binary form must reproduce the above copyright
14a9643ea8Slogwang  *       notice, this list of conditions and the following disclaimer in
15a9643ea8Slogwang  *       the documentation and/or other materials provided with the
16a9643ea8Slogwang  *       distribution.
17a9643ea8Slogwang  *     * Neither the name of Intel Corporation nor the names of its
18a9643ea8Slogwang  *       contributors may be used to endorse or promote products derived
19a9643ea8Slogwang  *       from this software without specific prior written permission.
20a9643ea8Slogwang  *
21a9643ea8Slogwang  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22a9643ea8Slogwang  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23a9643ea8Slogwang  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24a9643ea8Slogwang  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25a9643ea8Slogwang  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26a9643ea8Slogwang  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27a9643ea8Slogwang  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28a9643ea8Slogwang  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29a9643ea8Slogwang  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30a9643ea8Slogwang  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31a9643ea8Slogwang  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32a9643ea8Slogwang  */
33a9643ea8Slogwang 
34a9643ea8Slogwang #include <stdio.h>
35a9643ea8Slogwang #include <stdlib.h>
36a9643ea8Slogwang #include <string.h>
37a9643ea8Slogwang #include <stdint.h>
38a9643ea8Slogwang #include <stddef.h>
39a9643ea8Slogwang #include <limits.h>
40a9643ea8Slogwang #include <inttypes.h>
41a9643ea8Slogwang #include <unistd.h>
42a9643ea8Slogwang #include <pthread.h>
43a9643ea8Slogwang #include <fcntl.h>
44a9643ea8Slogwang #include <sys/time.h>
45a9643ea8Slogwang #include <sys/mman.h>
46a9643ea8Slogwang 
47a9643ea8Slogwang #include <rte_per_lcore.h>
48a9643ea8Slogwang #include <rte_log.h>
49a9643ea8Slogwang #include <rte_spinlock.h>
50a9643ea8Slogwang #include <rte_common.h>
51a9643ea8Slogwang 
52a9643ea8Slogwang #include "lthread_api.h"
53a9643ea8Slogwang #include "lthread_int.h"
54a9643ea8Slogwang #include "lthread_mutex.h"
55a9643ea8Slogwang #include "lthread_sched.h"
56a9643ea8Slogwang #include "lthread_queue.h"
57a9643ea8Slogwang #include "lthread_objcache.h"
58a9643ea8Slogwang #include "lthread_diag.h"
59a9643ea8Slogwang 
60a9643ea8Slogwang /*
61a9643ea8Slogwang  * Create a mutex
62a9643ea8Slogwang  */
63a9643ea8Slogwang int
64a9643ea8Slogwang lthread_mutex_init(char *name, struct lthread_mutex **mutex,
65a9643ea8Slogwang 		   __rte_unused const struct lthread_mutexattr *attr)
66a9643ea8Slogwang {
67a9643ea8Slogwang 	struct lthread_mutex *m;
68a9643ea8Slogwang 
69a9643ea8Slogwang 	if (mutex == NULL)
70a9643ea8Slogwang 		return POSIX_ERRNO(EINVAL);
71a9643ea8Slogwang 
72a9643ea8Slogwang 
73a9643ea8Slogwang 	m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
74a9643ea8Slogwang 	if (m == NULL)
75a9643ea8Slogwang 		return POSIX_ERRNO(EAGAIN);
76a9643ea8Slogwang 
77a9643ea8Slogwang 	m->blocked = _lthread_queue_create("blocked queue");
78a9643ea8Slogwang 	if (m->blocked == NULL) {
79a9643ea8Slogwang 		_lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
80a9643ea8Slogwang 		return POSIX_ERRNO(EAGAIN);
81a9643ea8Slogwang 	}
82a9643ea8Slogwang 
83a9643ea8Slogwang 	if (name == NULL)
84a9643ea8Slogwang 		strncpy(m->name, "no name", sizeof(m->name));
85a9643ea8Slogwang 	else
86a9643ea8Slogwang 		strncpy(m->name, name, sizeof(m->name));
87a9643ea8Slogwang 	m->name[sizeof(m->name)-1] = 0;
88a9643ea8Slogwang 
89a9643ea8Slogwang 	m->root_sched = THIS_SCHED;
90a9643ea8Slogwang 	m->owner = NULL;
91a9643ea8Slogwang 
92a9643ea8Slogwang 	rte_atomic64_init(&m->count);
93a9643ea8Slogwang 
94a9643ea8Slogwang 	DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
95a9643ea8Slogwang 	/* success */
96a9643ea8Slogwang 	(*mutex) = m;
97a9643ea8Slogwang 	return 0;
98a9643ea8Slogwang }
99a9643ea8Slogwang 
100a9643ea8Slogwang /*
101a9643ea8Slogwang  * Destroy a mutex
102a9643ea8Slogwang  */
103a9643ea8Slogwang int lthread_mutex_destroy(struct lthread_mutex *m)
104a9643ea8Slogwang {
105a9643ea8Slogwang 	if ((m == NULL) || (m->blocked == NULL)) {
106a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
107a9643ea8Slogwang 		return POSIX_ERRNO(EINVAL);
108a9643ea8Slogwang 	}
109a9643ea8Slogwang 
110a9643ea8Slogwang 	if (m->owner == NULL) {
111a9643ea8Slogwang 		/* try to delete the blocked queue */
112a9643ea8Slogwang 		if (_lthread_queue_destroy(m->blocked) < 0) {
113a9643ea8Slogwang 			DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
114a9643ea8Slogwang 					m, POSIX_ERRNO(EBUSY));
115a9643ea8Slogwang 			return POSIX_ERRNO(EBUSY);
116a9643ea8Slogwang 		}
117a9643ea8Slogwang 
118a9643ea8Slogwang 		/* free the mutex to cache */
119a9643ea8Slogwang 		_lthread_objcache_free(m->root_sched->mutex_cache, m);
120a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
121a9643ea8Slogwang 		return 0;
122a9643ea8Slogwang 	}
123a9643ea8Slogwang 	/* can't do its still in use */
124a9643ea8Slogwang 	DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
125a9643ea8Slogwang 	return POSIX_ERRNO(EBUSY);
126a9643ea8Slogwang }
127a9643ea8Slogwang 
128a9643ea8Slogwang /*
129a9643ea8Slogwang  * Try to obtain a mutex
130a9643ea8Slogwang  */
131a9643ea8Slogwang int lthread_mutex_lock(struct lthread_mutex *m)
132a9643ea8Slogwang {
133a9643ea8Slogwang 	struct lthread *lt = THIS_LTHREAD;
134a9643ea8Slogwang 
135a9643ea8Slogwang 	if ((m == NULL) || (m->blocked == NULL)) {
136a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
137a9643ea8Slogwang 		return POSIX_ERRNO(EINVAL);
138a9643ea8Slogwang 	}
139a9643ea8Slogwang 
140a9643ea8Slogwang 	/* allow no recursion */
141a9643ea8Slogwang 	if (m->owner == lt) {
142a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
143a9643ea8Slogwang 		return POSIX_ERRNO(EDEADLK);
144a9643ea8Slogwang 	}
145a9643ea8Slogwang 
146a9643ea8Slogwang 	for (;;) {
147a9643ea8Slogwang 		rte_atomic64_inc(&m->count);
148a9643ea8Slogwang 		do {
149a9643ea8Slogwang 			if (rte_atomic64_cmpset
150a9643ea8Slogwang 			    ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {
151a9643ea8Slogwang 				/* happy days, we got the lock */
152a9643ea8Slogwang 				DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
153a9643ea8Slogwang 				return 0;
154a9643ea8Slogwang 			}
155a9643ea8Slogwang 			/* spin due to race with unlock when
156a9643ea8Slogwang 			* nothing was blocked
157a9643ea8Slogwang 			*/
158a9643ea8Slogwang 		} while ((rte_atomic64_read(&m->count) == 1) &&
159a9643ea8Slogwang 				(m->owner == NULL));
160a9643ea8Slogwang 
161a9643ea8Slogwang 		/* queue the current thread in the blocked queue
162a9643ea8Slogwang 		 * we defer this to after we return to the scheduler
163a9643ea8Slogwang 		 * to ensure that the current thread context is saved
164a9643ea8Slogwang 		 * before unlock could result in it being dequeued and
165a9643ea8Slogwang 		 * resumed
166a9643ea8Slogwang 		 */
167a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
168a9643ea8Slogwang 		lt->pending_wr_queue = m->blocked;
169a9643ea8Slogwang 		/* now relinquish cpu */
170a9643ea8Slogwang 		_suspend();
171a9643ea8Slogwang 		/* resumed, must loop and compete for the lock again */
172a9643ea8Slogwang 	}
173a9643ea8Slogwang 	return 0;
174a9643ea8Slogwang }
175a9643ea8Slogwang 
176*2bfe3f2eSlogwang /* try to lock a mutex but don't block */
177a9643ea8Slogwang int lthread_mutex_trylock(struct lthread_mutex *m)
178a9643ea8Slogwang {
179a9643ea8Slogwang 	struct lthread *lt = THIS_LTHREAD;
180a9643ea8Slogwang 
181a9643ea8Slogwang 	if ((m == NULL) || (m->blocked == NULL)) {
182a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
183a9643ea8Slogwang 		return POSIX_ERRNO(EINVAL);
184a9643ea8Slogwang 	}
185a9643ea8Slogwang 
186a9643ea8Slogwang 	if (m->owner == lt) {
187a9643ea8Slogwang 		/* no recursion */
188a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
189a9643ea8Slogwang 		return POSIX_ERRNO(EDEADLK);
190a9643ea8Slogwang 	}
191a9643ea8Slogwang 
192a9643ea8Slogwang 	rte_atomic64_inc(&m->count);
193a9643ea8Slogwang 	if (rte_atomic64_cmpset
194a9643ea8Slogwang 	    ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {
195a9643ea8Slogwang 		/* got the lock */
196a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
197a9643ea8Slogwang 		return 0;
198a9643ea8Slogwang 	}
199a9643ea8Slogwang 
200a9643ea8Slogwang 	/* failed so return busy */
201a9643ea8Slogwang 	rte_atomic64_dec(&m->count);
202a9643ea8Slogwang 	DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
203a9643ea8Slogwang 	return POSIX_ERRNO(EBUSY);
204a9643ea8Slogwang }
205a9643ea8Slogwang 
206a9643ea8Slogwang /*
207a9643ea8Slogwang  * Unlock a mutex
208a9643ea8Slogwang  */
209a9643ea8Slogwang int lthread_mutex_unlock(struct lthread_mutex *m)
210a9643ea8Slogwang {
211a9643ea8Slogwang 	struct lthread *lt = THIS_LTHREAD;
212a9643ea8Slogwang 	struct lthread *unblocked;
213a9643ea8Slogwang 
214a9643ea8Slogwang 	if ((m == NULL) || (m->blocked == NULL)) {
215a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
216a9643ea8Slogwang 		return POSIX_ERRNO(EINVAL);
217a9643ea8Slogwang 	}
218a9643ea8Slogwang 
219a9643ea8Slogwang 	/* fail if its owned */
220a9643ea8Slogwang 	if (m->owner != lt || m->owner == NULL) {
221a9643ea8Slogwang 		DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
222a9643ea8Slogwang 		return POSIX_ERRNO(EPERM);
223a9643ea8Slogwang 	}
224a9643ea8Slogwang 
225a9643ea8Slogwang 	rte_atomic64_dec(&m->count);
226a9643ea8Slogwang 	/* if there are blocked threads then make one ready */
227a9643ea8Slogwang 	while (rte_atomic64_read(&m->count) > 0) {
228a9643ea8Slogwang 		unblocked = _lthread_queue_remove(m->blocked);
229a9643ea8Slogwang 
230a9643ea8Slogwang 		if (unblocked != NULL) {
231a9643ea8Slogwang 			rte_atomic64_dec(&m->count);
232a9643ea8Slogwang 			DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
233a9643ea8Slogwang 			RTE_ASSERT(unblocked->sched != NULL);
234a9643ea8Slogwang 			_ready_queue_insert((struct lthread_sched *)
235a9643ea8Slogwang 					    unblocked->sched, unblocked);
236a9643ea8Slogwang 			break;
237a9643ea8Slogwang 		}
238a9643ea8Slogwang 	}
239a9643ea8Slogwang 	/* release the lock */
240a9643ea8Slogwang 	m->owner = NULL;
241a9643ea8Slogwang 	return 0;
242a9643ea8Slogwang }
243a9643ea8Slogwang 
244a9643ea8Slogwang /*
245a9643ea8Slogwang  * return the diagnostic ref val stored in a mutex
246a9643ea8Slogwang  */
247a9643ea8Slogwang uint64_t
248a9643ea8Slogwang lthread_mutex_diag_ref(struct lthread_mutex *m)
249a9643ea8Slogwang {
250a9643ea8Slogwang 	if (m == NULL)
251a9643ea8Slogwang 		return 0;
252a9643ea8Slogwang 	return m->diag_ref;
253a9643ea8Slogwang }
254