1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdio.h> 35 #include <stdlib.h> 36 #include <string.h> 37 #include <stdint.h> 38 #include <stddef.h> 39 #include <limits.h> 40 #include <inttypes.h> 41 #include <unistd.h> 42 #include <pthread.h> 43 #include <fcntl.h> 44 #include <sys/time.h> 45 #include <sys/mman.h> 46 47 #include <rte_per_lcore.h> 48 #include <rte_log.h> 49 #include <rte_spinlock.h> 50 #include <rte_common.h> 51 52 #include "lthread_api.h" 53 #include "lthread_int.h" 54 #include "lthread_mutex.h" 55 #include "lthread_sched.h" 56 #include "lthread_queue.h" 57 #include "lthread_objcache.h" 58 #include "lthread_diag.h" 59 60 /* 61 * Create a mutex 62 */ 63 int 64 lthread_mutex_init(char *name, struct lthread_mutex **mutex, 65 __rte_unused const struct lthread_mutexattr *attr) 66 { 67 struct lthread_mutex *m; 68 69 if (mutex == NULL) 70 return POSIX_ERRNO(EINVAL); 71 72 73 m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache); 74 if (m == NULL) 75 return POSIX_ERRNO(EAGAIN); 76 77 m->blocked = _lthread_queue_create("blocked queue"); 78 if (m->blocked == NULL) { 79 _lthread_objcache_free((THIS_SCHED)->mutex_cache, m); 80 return POSIX_ERRNO(EAGAIN); 81 } 82 83 if (name == NULL) 84 strncpy(m->name, "no name", sizeof(m->name)); 85 else 86 strncpy(m->name, name, sizeof(m->name)); 87 m->name[sizeof(m->name)-1] = 0; 88 89 m->root_sched = THIS_SCHED; 90 m->owner = NULL; 91 92 rte_atomic64_init(&m->count); 93 94 DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE); 95 /* success */ 96 (*mutex) = m; 97 return 0; 98 } 99 100 /* 101 * Destroy a mutex 102 */ 103 int lthread_mutex_destroy(struct lthread_mutex *m) 104 { 105 if ((m == NULL) || (m->blocked == NULL)) { 106 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL)); 107 return POSIX_ERRNO(EINVAL); 108 } 109 110 if (m->owner == NULL) { 111 /* try to delete the blocked queue */ 112 if (_lthread_queue_destroy(m->blocked) < 0) { 113 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, 114 m, POSIX_ERRNO(EBUSY)); 115 return POSIX_ERRNO(EBUSY); 116 } 117 118 /* free the mutex to cache */ 119 _lthread_objcache_free(m->root_sched->mutex_cache, m); 120 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0); 121 return 0; 122 } 123 /* can't do its still in use */ 124 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY)); 125 return POSIX_ERRNO(EBUSY); 126 } 127 128 /* 129 * Try to obtain a mutex 130 */ 131 int lthread_mutex_lock(struct lthread_mutex *m) 132 { 133 struct lthread *lt = THIS_LTHREAD; 134 135 if ((m == NULL) || (m->blocked == NULL)) { 136 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL)); 137 return POSIX_ERRNO(EINVAL); 138 } 139 140 /* allow no recursion */ 141 if (m->owner == lt) { 142 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK)); 143 return POSIX_ERRNO(EDEADLK); 144 } 145 146 for (;;) { 147 rte_atomic64_inc(&m->count); 148 do { 149 if (rte_atomic64_cmpset 150 ((uint64_t *) &m->owner, 0, (uint64_t) lt)) { 151 /* happy days, we got the lock */ 152 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0); 153 return 0; 154 } 155 /* spin due to race with unlock when 156 * nothing was blocked 157 */ 158 } while ((rte_atomic64_read(&m->count) == 1) && 159 (m->owner == NULL)); 160 161 /* queue the current thread in the blocked queue 162 * we defer this to after we return to the scheduler 163 * to ensure that the current thread context is saved 164 * before unlock could result in it being dequeued and 165 * resumed 166 */ 167 DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt); 168 lt->pending_wr_queue = m->blocked; 169 /* now relinquish cpu */ 170 _suspend(); 171 /* resumed, must loop and compete for the lock again */ 172 } 173 return 0; 174 } 175 176 /* try to lock a mutex but don't block */ 177 int lthread_mutex_trylock(struct lthread_mutex *m) 178 { 179 struct lthread *lt = THIS_LTHREAD; 180 181 if ((m == NULL) || (m->blocked == NULL)) { 182 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL)); 183 return POSIX_ERRNO(EINVAL); 184 } 185 186 if (m->owner == lt) { 187 /* no recursion */ 188 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK)); 189 return POSIX_ERRNO(EDEADLK); 190 } 191 192 rte_atomic64_inc(&m->count); 193 if (rte_atomic64_cmpset 194 ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) { 195 /* got the lock */ 196 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0); 197 return 0; 198 } 199 200 /* failed so return busy */ 201 rte_atomic64_dec(&m->count); 202 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY)); 203 return POSIX_ERRNO(EBUSY); 204 } 205 206 /* 207 * Unlock a mutex 208 */ 209 int lthread_mutex_unlock(struct lthread_mutex *m) 210 { 211 struct lthread *lt = THIS_LTHREAD; 212 struct lthread *unblocked; 213 214 if ((m == NULL) || (m->blocked == NULL)) { 215 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL)); 216 return POSIX_ERRNO(EINVAL); 217 } 218 219 /* fail if its owned */ 220 if (m->owner != lt || m->owner == NULL) { 221 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM)); 222 return POSIX_ERRNO(EPERM); 223 } 224 225 rte_atomic64_dec(&m->count); 226 /* if there are blocked threads then make one ready */ 227 while (rte_atomic64_read(&m->count) > 0) { 228 unblocked = _lthread_queue_remove(m->blocked); 229 230 if (unblocked != NULL) { 231 rte_atomic64_dec(&m->count); 232 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked); 233 RTE_ASSERT(unblocked->sched != NULL); 234 _ready_queue_insert((struct lthread_sched *) 235 unblocked->sched, unblocked); 236 break; 237 } 238 } 239 /* release the lock */ 240 m->owner = NULL; 241 return 0; 242 } 243 244 /* 245 * return the diagnostic ref val stored in a mutex 246 */ 247 uint64_t 248 lthread_mutex_diag_ref(struct lthread_mutex *m) 249 { 250 if (m == NULL) 251 return 0; 252 return m->diag_ref; 253 } 254