1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606 * Copyright(c) 2015 Intel Corporation
3a9643ea8Slogwang */
4a9643ea8Slogwang
5a9643ea8Slogwang #include <stdio.h>
6a9643ea8Slogwang #include <stdlib.h>
7a9643ea8Slogwang #include <string.h>
8a9643ea8Slogwang #include <stdint.h>
9a9643ea8Slogwang #include <stddef.h>
10a9643ea8Slogwang #include <limits.h>
11a9643ea8Slogwang #include <inttypes.h>
12a9643ea8Slogwang #include <unistd.h>
13a9643ea8Slogwang #include <pthread.h>
14a9643ea8Slogwang #include <fcntl.h>
15a9643ea8Slogwang #include <sys/time.h>
16a9643ea8Slogwang #include <sys/mman.h>
17a9643ea8Slogwang
18a9643ea8Slogwang #include <rte_per_lcore.h>
19a9643ea8Slogwang #include <rte_log.h>
20a9643ea8Slogwang #include <rte_spinlock.h>
21a9643ea8Slogwang #include <rte_common.h>
22a9643ea8Slogwang
23a9643ea8Slogwang #include "lthread_api.h"
24a9643ea8Slogwang #include "lthread_int.h"
25a9643ea8Slogwang #include "lthread_mutex.h"
26a9643ea8Slogwang #include "lthread_sched.h"
27a9643ea8Slogwang #include "lthread_queue.h"
28a9643ea8Slogwang #include "lthread_objcache.h"
29a9643ea8Slogwang #include "lthread_diag.h"
30a9643ea8Slogwang
31a9643ea8Slogwang /*
32a9643ea8Slogwang * Create a mutex
33a9643ea8Slogwang */
34a9643ea8Slogwang int
lthread_mutex_init(char * name,struct lthread_mutex ** mutex,__rte_unused const struct lthread_mutexattr * attr)35a9643ea8Slogwang lthread_mutex_init(char *name, struct lthread_mutex **mutex,
36a9643ea8Slogwang __rte_unused const struct lthread_mutexattr *attr)
37a9643ea8Slogwang {
38a9643ea8Slogwang struct lthread_mutex *m;
39a9643ea8Slogwang
40a9643ea8Slogwang if (mutex == NULL)
41a9643ea8Slogwang return POSIX_ERRNO(EINVAL);
42a9643ea8Slogwang
43a9643ea8Slogwang
44a9643ea8Slogwang m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
45a9643ea8Slogwang if (m == NULL)
46a9643ea8Slogwang return POSIX_ERRNO(EAGAIN);
47a9643ea8Slogwang
48a9643ea8Slogwang m->blocked = _lthread_queue_create("blocked queue");
49a9643ea8Slogwang if (m->blocked == NULL) {
50a9643ea8Slogwang _lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
51a9643ea8Slogwang return POSIX_ERRNO(EAGAIN);
52a9643ea8Slogwang }
53a9643ea8Slogwang
54a9643ea8Slogwang if (name == NULL)
55a9643ea8Slogwang strncpy(m->name, "no name", sizeof(m->name));
56a9643ea8Slogwang else
57a9643ea8Slogwang strncpy(m->name, name, sizeof(m->name));
58a9643ea8Slogwang m->name[sizeof(m->name)-1] = 0;
59a9643ea8Slogwang
60a9643ea8Slogwang m->root_sched = THIS_SCHED;
61a9643ea8Slogwang m->owner = NULL;
62a9643ea8Slogwang
63a9643ea8Slogwang rte_atomic64_init(&m->count);
64a9643ea8Slogwang
65a9643ea8Slogwang DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
66a9643ea8Slogwang /* success */
67a9643ea8Slogwang (*mutex) = m;
68a9643ea8Slogwang return 0;
69a9643ea8Slogwang }
70a9643ea8Slogwang
71a9643ea8Slogwang /*
72a9643ea8Slogwang * Destroy a mutex
73a9643ea8Slogwang */
lthread_mutex_destroy(struct lthread_mutex * m)74a9643ea8Slogwang int lthread_mutex_destroy(struct lthread_mutex *m)
75a9643ea8Slogwang {
76a9643ea8Slogwang if ((m == NULL) || (m->blocked == NULL)) {
77a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
78a9643ea8Slogwang return POSIX_ERRNO(EINVAL);
79a9643ea8Slogwang }
80a9643ea8Slogwang
81a9643ea8Slogwang if (m->owner == NULL) {
82a9643ea8Slogwang /* try to delete the blocked queue */
83a9643ea8Slogwang if (_lthread_queue_destroy(m->blocked) < 0) {
84a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
85a9643ea8Slogwang m, POSIX_ERRNO(EBUSY));
86a9643ea8Slogwang return POSIX_ERRNO(EBUSY);
87a9643ea8Slogwang }
88a9643ea8Slogwang
89a9643ea8Slogwang /* free the mutex to cache */
90a9643ea8Slogwang _lthread_objcache_free(m->root_sched->mutex_cache, m);
91a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
92a9643ea8Slogwang return 0;
93a9643ea8Slogwang }
94a9643ea8Slogwang /* can't do its still in use */
95a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
96a9643ea8Slogwang return POSIX_ERRNO(EBUSY);
97a9643ea8Slogwang }
98a9643ea8Slogwang
99a9643ea8Slogwang /*
100a9643ea8Slogwang * Try to obtain a mutex
101a9643ea8Slogwang */
lthread_mutex_lock(struct lthread_mutex * m)102a9643ea8Slogwang int lthread_mutex_lock(struct lthread_mutex *m)
103a9643ea8Slogwang {
104a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD;
105a9643ea8Slogwang
106a9643ea8Slogwang if ((m == NULL) || (m->blocked == NULL)) {
107a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
108a9643ea8Slogwang return POSIX_ERRNO(EINVAL);
109a9643ea8Slogwang }
110a9643ea8Slogwang
111a9643ea8Slogwang /* allow no recursion */
112a9643ea8Slogwang if (m->owner == lt) {
113a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
114a9643ea8Slogwang return POSIX_ERRNO(EDEADLK);
115a9643ea8Slogwang }
116a9643ea8Slogwang
117a9643ea8Slogwang for (;;) {
118a9643ea8Slogwang rte_atomic64_inc(&m->count);
119a9643ea8Slogwang do {
120a9643ea8Slogwang if (rte_atomic64_cmpset
121a9643ea8Slogwang ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {
122a9643ea8Slogwang /* happy days, we got the lock */
123a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
124a9643ea8Slogwang return 0;
125a9643ea8Slogwang }
126a9643ea8Slogwang /* spin due to race with unlock when
127a9643ea8Slogwang * nothing was blocked
128a9643ea8Slogwang */
129a9643ea8Slogwang } while ((rte_atomic64_read(&m->count) == 1) &&
130a9643ea8Slogwang (m->owner == NULL));
131a9643ea8Slogwang
132a9643ea8Slogwang /* queue the current thread in the blocked queue
133a9643ea8Slogwang * we defer this to after we return to the scheduler
134a9643ea8Slogwang * to ensure that the current thread context is saved
135a9643ea8Slogwang * before unlock could result in it being dequeued and
136a9643ea8Slogwang * resumed
137a9643ea8Slogwang */
138a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
139a9643ea8Slogwang lt->pending_wr_queue = m->blocked;
140a9643ea8Slogwang /* now relinquish cpu */
141a9643ea8Slogwang _suspend();
142a9643ea8Slogwang /* resumed, must loop and compete for the lock again */
143a9643ea8Slogwang }
144a9643ea8Slogwang return 0;
145a9643ea8Slogwang }
146a9643ea8Slogwang
1472bfe3f2eSlogwang /* try to lock a mutex but don't block */
lthread_mutex_trylock(struct lthread_mutex * m)148a9643ea8Slogwang int lthread_mutex_trylock(struct lthread_mutex *m)
149a9643ea8Slogwang {
150a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD;
151a9643ea8Slogwang
152a9643ea8Slogwang if ((m == NULL) || (m->blocked == NULL)) {
153a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
154a9643ea8Slogwang return POSIX_ERRNO(EINVAL);
155a9643ea8Slogwang }
156a9643ea8Slogwang
157a9643ea8Slogwang if (m->owner == lt) {
158a9643ea8Slogwang /* no recursion */
159a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
160a9643ea8Slogwang return POSIX_ERRNO(EDEADLK);
161a9643ea8Slogwang }
162a9643ea8Slogwang
163a9643ea8Slogwang rte_atomic64_inc(&m->count);
164a9643ea8Slogwang if (rte_atomic64_cmpset
165a9643ea8Slogwang ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {
166a9643ea8Slogwang /* got the lock */
167a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
168a9643ea8Slogwang return 0;
169a9643ea8Slogwang }
170a9643ea8Slogwang
171a9643ea8Slogwang /* failed so return busy */
172a9643ea8Slogwang rte_atomic64_dec(&m->count);
173a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
174a9643ea8Slogwang return POSIX_ERRNO(EBUSY);
175a9643ea8Slogwang }
176a9643ea8Slogwang
177a9643ea8Slogwang /*
178a9643ea8Slogwang * Unlock a mutex
179a9643ea8Slogwang */
lthread_mutex_unlock(struct lthread_mutex * m)180a9643ea8Slogwang int lthread_mutex_unlock(struct lthread_mutex *m)
181a9643ea8Slogwang {
182a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD;
183a9643ea8Slogwang struct lthread *unblocked;
184a9643ea8Slogwang
185a9643ea8Slogwang if ((m == NULL) || (m->blocked == NULL)) {
186a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
187a9643ea8Slogwang return POSIX_ERRNO(EINVAL);
188a9643ea8Slogwang }
189a9643ea8Slogwang
190a9643ea8Slogwang /* fail if its owned */
191a9643ea8Slogwang if (m->owner != lt || m->owner == NULL) {
192a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
193a9643ea8Slogwang return POSIX_ERRNO(EPERM);
194a9643ea8Slogwang }
195a9643ea8Slogwang
196a9643ea8Slogwang rte_atomic64_dec(&m->count);
197a9643ea8Slogwang /* if there are blocked threads then make one ready */
198a9643ea8Slogwang while (rte_atomic64_read(&m->count) > 0) {
199a9643ea8Slogwang unblocked = _lthread_queue_remove(m->blocked);
200a9643ea8Slogwang
201a9643ea8Slogwang if (unblocked != NULL) {
202a9643ea8Slogwang rte_atomic64_dec(&m->count);
203a9643ea8Slogwang DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
204a9643ea8Slogwang RTE_ASSERT(unblocked->sched != NULL);
205a9643ea8Slogwang _ready_queue_insert((struct lthread_sched *)
206a9643ea8Slogwang unblocked->sched, unblocked);
207a9643ea8Slogwang break;
208a9643ea8Slogwang }
209a9643ea8Slogwang }
210a9643ea8Slogwang /* release the lock */
211a9643ea8Slogwang m->owner = NULL;
212a9643ea8Slogwang return 0;
213a9643ea8Slogwang }
214a9643ea8Slogwang
215a9643ea8Slogwang /*
216a9643ea8Slogwang * return the diagnostic ref val stored in a mutex
217a9643ea8Slogwang */
218a9643ea8Slogwang uint64_t
lthread_mutex_diag_ref(struct lthread_mutex * m)219a9643ea8Slogwang lthread_mutex_diag_ref(struct lthread_mutex *m)
220a9643ea8Slogwang {
221a9643ea8Slogwang if (m == NULL)
222a9643ea8Slogwang return 0;
223a9643ea8Slogwang return m->diag_ref;
224a9643ea8Slogwang }
225