1*a9643ea8Slogwang /*- 2*a9643ea8Slogwang * BSD LICENSE 3*a9643ea8Slogwang * 4*a9643ea8Slogwang * Copyright(c) 2015 Intel Corporation. All rights reserved. 5*a9643ea8Slogwang * All rights reserved. 6*a9643ea8Slogwang * 7*a9643ea8Slogwang * Redistribution and use in source and binary forms, with or without 8*a9643ea8Slogwang * modification, are permitted provided that the following conditions 9*a9643ea8Slogwang * are met: 10*a9643ea8Slogwang * 11*a9643ea8Slogwang * * Redistributions of source code must retain the above copyright 12*a9643ea8Slogwang * notice, this list of conditions and the following disclaimer. 13*a9643ea8Slogwang * * Redistributions in binary form must reproduce the above copyright 14*a9643ea8Slogwang * notice, this list of conditions and the following disclaimer in 15*a9643ea8Slogwang * the documentation and/or other materials provided with the 16*a9643ea8Slogwang * distribution. 17*a9643ea8Slogwang * * Neither the name of Intel Corporation nor the names of its 18*a9643ea8Slogwang * contributors may be used to endorse or promote products derived 19*a9643ea8Slogwang * from this software without specific prior written permission. 20*a9643ea8Slogwang * 21*a9643ea8Slogwang * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22*a9643ea8Slogwang * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23*a9643ea8Slogwang * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24*a9643ea8Slogwang * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25*a9643ea8Slogwang * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26*a9643ea8Slogwang * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27*a9643ea8Slogwang * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28*a9643ea8Slogwang * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29*a9643ea8Slogwang * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30*a9643ea8Slogwang * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31*a9643ea8Slogwang * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32*a9643ea8Slogwang */ 33*a9643ea8Slogwang 34*a9643ea8Slogwang /* 35*a9643ea8Slogwang * Some portions of this software is derived from the 36*a9643ea8Slogwang * https://github.com/halayli/lthread which carrys the following license. 37*a9643ea8Slogwang * 38*a9643ea8Slogwang * Copyright (C) 2012, Hasan Alayli <[email protected]> 39*a9643ea8Slogwang * 40*a9643ea8Slogwang * Redistribution and use in source and binary forms, with or without 41*a9643ea8Slogwang * modification, are permitted provided that the following conditions 42*a9643ea8Slogwang * are met: 43*a9643ea8Slogwang * 1. Redistributions of source code must retain the above copyright 44*a9643ea8Slogwang * notice, this list of conditions and the following disclaimer. 45*a9643ea8Slogwang * 2. Redistributions in binary form must reproduce the above copyright 46*a9643ea8Slogwang * notice, this list of conditions and the following disclaimer in the 47*a9643ea8Slogwang * documentation and/or other materials provided with the distribution. 48*a9643ea8Slogwang * 49*a9643ea8Slogwang * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 50*a9643ea8Slogwang * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51*a9643ea8Slogwang * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52*a9643ea8Slogwang * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 53*a9643ea8Slogwang * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54*a9643ea8Slogwang * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55*a9643ea8Slogwang * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56*a9643ea8Slogwang * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57*a9643ea8Slogwang * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58*a9643ea8Slogwang * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59*a9643ea8Slogwang * SUCH DAMAGE. 60*a9643ea8Slogwang */ 61*a9643ea8Slogwang 62*a9643ea8Slogwang #define RTE_MEM 1 63*a9643ea8Slogwang 64*a9643ea8Slogwang #include <stdio.h> 65*a9643ea8Slogwang #include <stdlib.h> 66*a9643ea8Slogwang #include <string.h> 67*a9643ea8Slogwang #include <stdint.h> 68*a9643ea8Slogwang #include <stddef.h> 69*a9643ea8Slogwang #include <limits.h> 70*a9643ea8Slogwang #include <inttypes.h> 71*a9643ea8Slogwang #include <unistd.h> 72*a9643ea8Slogwang #include <pthread.h> 73*a9643ea8Slogwang #include <fcntl.h> 74*a9643ea8Slogwang #include <sys/time.h> 75*a9643ea8Slogwang #include <sys/mman.h> 76*a9643ea8Slogwang 77*a9643ea8Slogwang #include <rte_log.h> 78*a9643ea8Slogwang #include <ctx.h> 79*a9643ea8Slogwang 80*a9643ea8Slogwang #include "lthread_api.h" 81*a9643ea8Slogwang #include "lthread.h" 82*a9643ea8Slogwang #include "lthread_timer.h" 83*a9643ea8Slogwang #include "lthread_tls.h" 84*a9643ea8Slogwang #include "lthread_objcache.h" 85*a9643ea8Slogwang #include "lthread_diag.h" 86*a9643ea8Slogwang 87*a9643ea8Slogwang 88*a9643ea8Slogwang /* 89*a9643ea8Slogwang * This function gets called after an lthread function has returned. 90*a9643ea8Slogwang */ 91*a9643ea8Slogwang void _lthread_exit_handler(struct lthread *lt) 92*a9643ea8Slogwang { 93*a9643ea8Slogwang 94*a9643ea8Slogwang lt->state |= BIT(ST_LT_EXITED); 95*a9643ea8Slogwang 96*a9643ea8Slogwang if (!(lt->state & BIT(ST_LT_DETACH))) { 97*a9643ea8Slogwang /* thread is this not explicitly detached 98*a9643ea8Slogwang * it must be joinable, so we call lthread_exit(). 99*a9643ea8Slogwang */ 100*a9643ea8Slogwang lthread_exit(NULL); 101*a9643ea8Slogwang } 102*a9643ea8Slogwang 103*a9643ea8Slogwang /* if we get here the thread is detached so we can reschedule it, 104*a9643ea8Slogwang * allowing the scheduler to free it 105*a9643ea8Slogwang */ 106*a9643ea8Slogwang _reschedule(); 107*a9643ea8Slogwang } 108*a9643ea8Slogwang 109*a9643ea8Slogwang 110*a9643ea8Slogwang /* 111*a9643ea8Slogwang * Free resources allocated to an lthread 112*a9643ea8Slogwang */ 113*a9643ea8Slogwang void _lthread_free(struct lthread *lt) 114*a9643ea8Slogwang { 115*a9643ea8Slogwang 116*a9643ea8Slogwang DIAG_EVENT(lt, LT_DIAG_LTHREAD_FREE, lt, 0); 117*a9643ea8Slogwang 118*a9643ea8Slogwang /* invoke any user TLS destructor functions */ 119*a9643ea8Slogwang _lthread_tls_destroy(lt); 120*a9643ea8Slogwang 121*a9643ea8Slogwang /* free memory allocated for TLS defined using RTE_PER_LTHREAD macros */ 122*a9643ea8Slogwang if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) 123*a9643ea8Slogwang _lthread_objcache_free(lt->tls->root_sched->per_lthread_cache, 124*a9643ea8Slogwang lt->per_lthread_data); 125*a9643ea8Slogwang 126*a9643ea8Slogwang /* free pthread style TLS memory */ 127*a9643ea8Slogwang _lthread_objcache_free(lt->tls->root_sched->tls_cache, lt->tls); 128*a9643ea8Slogwang 129*a9643ea8Slogwang /* free the stack */ 130*a9643ea8Slogwang _lthread_objcache_free(lt->stack_container->root_sched->stack_cache, 131*a9643ea8Slogwang lt->stack_container); 132*a9643ea8Slogwang 133*a9643ea8Slogwang /* now free the thread */ 134*a9643ea8Slogwang _lthread_objcache_free(lt->root_sched->lthread_cache, lt); 135*a9643ea8Slogwang 136*a9643ea8Slogwang } 137*a9643ea8Slogwang 138*a9643ea8Slogwang /* 139*a9643ea8Slogwang * Allocate a stack and maintain a cache of stacks 140*a9643ea8Slogwang */ 141*a9643ea8Slogwang struct lthread_stack *_stack_alloc(void) 142*a9643ea8Slogwang { 143*a9643ea8Slogwang struct lthread_stack *s; 144*a9643ea8Slogwang 145*a9643ea8Slogwang s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache); 146*a9643ea8Slogwang RTE_ASSERT(s != NULL); 147*a9643ea8Slogwang 148*a9643ea8Slogwang s->root_sched = THIS_SCHED; 149*a9643ea8Slogwang s->stack_size = LTHREAD_MAX_STACK_SIZE; 150*a9643ea8Slogwang return s; 151*a9643ea8Slogwang } 152*a9643ea8Slogwang 153*a9643ea8Slogwang /* 154*a9643ea8Slogwang * Execute a ctx by invoking the start function 155*a9643ea8Slogwang * On return call an exit handler if the user has provided one 156*a9643ea8Slogwang */ 157*a9643ea8Slogwang static void _lthread_exec(void *arg) 158*a9643ea8Slogwang { 159*a9643ea8Slogwang struct lthread *lt = (struct lthread *)arg; 160*a9643ea8Slogwang 161*a9643ea8Slogwang /* invoke the contexts function */ 162*a9643ea8Slogwang lt->fun(lt->arg); 163*a9643ea8Slogwang /* do exit handling */ 164*a9643ea8Slogwang if (lt->exit_handler != NULL) 165*a9643ea8Slogwang lt->exit_handler(lt); 166*a9643ea8Slogwang } 167*a9643ea8Slogwang 168*a9643ea8Slogwang /* 169*a9643ea8Slogwang * Initialize an lthread 170*a9643ea8Slogwang * Set its function, args, and exit handler 171*a9643ea8Slogwang */ 172*a9643ea8Slogwang void 173*a9643ea8Slogwang _lthread_init(struct lthread *lt, 174*a9643ea8Slogwang lthread_func_t fun, void *arg, lthread_exit_func exit_handler) 175*a9643ea8Slogwang { 176*a9643ea8Slogwang 177*a9643ea8Slogwang /* set ctx func and args */ 178*a9643ea8Slogwang lt->fun = fun; 179*a9643ea8Slogwang lt->arg = arg; 180*a9643ea8Slogwang lt->exit_handler = exit_handler; 181*a9643ea8Slogwang 182*a9643ea8Slogwang /* set initial state */ 183*a9643ea8Slogwang lt->birth = _sched_now(); 184*a9643ea8Slogwang lt->state = BIT(ST_LT_INIT); 185*a9643ea8Slogwang lt->join = LT_JOIN_INITIAL; 186*a9643ea8Slogwang } 187*a9643ea8Slogwang 188*a9643ea8Slogwang /* 189*a9643ea8Slogwang * set the lthread stack 190*a9643ea8Slogwang */ 191*a9643ea8Slogwang void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size) 192*a9643ea8Slogwang { 193*a9643ea8Slogwang char *stack_top = (char *)stack + stack_size; 194*a9643ea8Slogwang void **s = (void **)stack_top; 195*a9643ea8Slogwang 196*a9643ea8Slogwang /* set stack */ 197*a9643ea8Slogwang lt->stack = stack; 198*a9643ea8Slogwang lt->stack_size = stack_size; 199*a9643ea8Slogwang 200*a9643ea8Slogwang /* set initial context */ 201*a9643ea8Slogwang s[-3] = NULL; 202*a9643ea8Slogwang s[-2] = (void *)lt; 203*a9643ea8Slogwang lt->ctx.rsp = (void *)(stack_top - (4 * sizeof(void *))); 204*a9643ea8Slogwang lt->ctx.rbp = (void *)(stack_top - (3 * sizeof(void *))); 205*a9643ea8Slogwang lt->ctx.rip = (void *)_lthread_exec; 206*a9643ea8Slogwang } 207*a9643ea8Slogwang 208*a9643ea8Slogwang /* 209*a9643ea8Slogwang * Create an lthread on the current scheduler 210*a9643ea8Slogwang * If there is no current scheduler on this pthread then first create one 211*a9643ea8Slogwang */ 212*a9643ea8Slogwang int 213*a9643ea8Slogwang lthread_create(struct lthread **new_lt, int lcore_id, 214*a9643ea8Slogwang lthread_func_t fun, void *arg) 215*a9643ea8Slogwang { 216*a9643ea8Slogwang if ((new_lt == NULL) || (fun == NULL)) 217*a9643ea8Slogwang return POSIX_ERRNO(EINVAL); 218*a9643ea8Slogwang 219*a9643ea8Slogwang if (lcore_id < 0) 220*a9643ea8Slogwang lcore_id = rte_lcore_id(); 221*a9643ea8Slogwang else if (lcore_id > LTHREAD_MAX_LCORES) 222*a9643ea8Slogwang return POSIX_ERRNO(EINVAL); 223*a9643ea8Slogwang 224*a9643ea8Slogwang struct lthread *lt = NULL; 225*a9643ea8Slogwang 226*a9643ea8Slogwang if (THIS_SCHED == NULL) { 227*a9643ea8Slogwang THIS_SCHED = _lthread_sched_create(0); 228*a9643ea8Slogwang if (THIS_SCHED == NULL) { 229*a9643ea8Slogwang perror("Failed to create scheduler"); 230*a9643ea8Slogwang return POSIX_ERRNO(EAGAIN); 231*a9643ea8Slogwang } 232*a9643ea8Slogwang } 233*a9643ea8Slogwang 234*a9643ea8Slogwang /* allocate a thread structure */ 235*a9643ea8Slogwang lt = _lthread_objcache_alloc((THIS_SCHED)->lthread_cache); 236*a9643ea8Slogwang if (lt == NULL) 237*a9643ea8Slogwang return POSIX_ERRNO(EAGAIN); 238*a9643ea8Slogwang 239*a9643ea8Slogwang bzero(lt, sizeof(struct lthread)); 240*a9643ea8Slogwang lt->root_sched = THIS_SCHED; 241*a9643ea8Slogwang 242*a9643ea8Slogwang /* set the function args and exit handlder */ 243*a9643ea8Slogwang _lthread_init(lt, fun, arg, _lthread_exit_handler); 244*a9643ea8Slogwang 245*a9643ea8Slogwang /* put it in the ready queue */ 246*a9643ea8Slogwang *new_lt = lt; 247*a9643ea8Slogwang 248*a9643ea8Slogwang if (lcore_id < 0) 249*a9643ea8Slogwang lcore_id = rte_lcore_id(); 250*a9643ea8Slogwang 251*a9643ea8Slogwang DIAG_CREATE_EVENT(lt, LT_DIAG_LTHREAD_CREATE); 252*a9643ea8Slogwang 253*a9643ea8Slogwang rte_wmb(); 254*a9643ea8Slogwang _ready_queue_insert(_lthread_sched_get(lcore_id), lt); 255*a9643ea8Slogwang return 0; 256*a9643ea8Slogwang } 257*a9643ea8Slogwang 258*a9643ea8Slogwang /* 259*a9643ea8Slogwang * Schedules lthread to sleep for `nsecs` 260*a9643ea8Slogwang * setting the lthread state to LT_ST_SLEEPING. 261*a9643ea8Slogwang * lthread state is cleared upon resumption or expiry. 262*a9643ea8Slogwang */ 263*a9643ea8Slogwang static inline void _lthread_sched_sleep(struct lthread *lt, uint64_t nsecs) 264*a9643ea8Slogwang { 265*a9643ea8Slogwang uint64_t state = lt->state; 266*a9643ea8Slogwang uint64_t clks = _ns_to_clks(nsecs); 267*a9643ea8Slogwang 268*a9643ea8Slogwang if (clks) { 269*a9643ea8Slogwang _timer_start(lt, clks); 270*a9643ea8Slogwang lt->state = state | BIT(ST_LT_SLEEPING); 271*a9643ea8Slogwang } 272*a9643ea8Slogwang DIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0); 273*a9643ea8Slogwang _suspend(); 274*a9643ea8Slogwang } 275*a9643ea8Slogwang 276*a9643ea8Slogwang 277*a9643ea8Slogwang 278*a9643ea8Slogwang /* 279*a9643ea8Slogwang * Cancels any running timer. 280*a9643ea8Slogwang * This can be called multiple times on the same lthread regardless if it was 281*a9643ea8Slogwang * sleeping or not. 282*a9643ea8Slogwang */ 283*a9643ea8Slogwang int _lthread_desched_sleep(struct lthread *lt) 284*a9643ea8Slogwang { 285*a9643ea8Slogwang uint64_t state = lt->state; 286*a9643ea8Slogwang 287*a9643ea8Slogwang if (state & BIT(ST_LT_SLEEPING)) { 288*a9643ea8Slogwang _timer_stop(lt); 289*a9643ea8Slogwang state &= (CLEARBIT(ST_LT_SLEEPING) & CLEARBIT(ST_LT_EXPIRED)); 290*a9643ea8Slogwang lt->state = state | BIT(ST_LT_READY); 291*a9643ea8Slogwang return 1; 292*a9643ea8Slogwang } 293*a9643ea8Slogwang return 0; 294*a9643ea8Slogwang } 295*a9643ea8Slogwang 296*a9643ea8Slogwang /* 297*a9643ea8Slogwang * set user data pointer in an lthread 298*a9643ea8Slogwang */ 299*a9643ea8Slogwang void lthread_set_data(void *data) 300*a9643ea8Slogwang { 301*a9643ea8Slogwang if (sizeof(void *) == RTE_PER_LTHREAD_SECTION_SIZE) 302*a9643ea8Slogwang THIS_LTHREAD->per_lthread_data = data; 303*a9643ea8Slogwang } 304*a9643ea8Slogwang 305*a9643ea8Slogwang /* 306*a9643ea8Slogwang * Retrieve user data pointer from an lthread 307*a9643ea8Slogwang */ 308*a9643ea8Slogwang void *lthread_get_data(void) 309*a9643ea8Slogwang { 310*a9643ea8Slogwang return THIS_LTHREAD->per_lthread_data; 311*a9643ea8Slogwang } 312*a9643ea8Slogwang 313*a9643ea8Slogwang /* 314*a9643ea8Slogwang * Return the current lthread handle 315*a9643ea8Slogwang */ 316*a9643ea8Slogwang struct lthread *lthread_current(void) 317*a9643ea8Slogwang { 318*a9643ea8Slogwang struct lthread_sched *sched = THIS_SCHED; 319*a9643ea8Slogwang 320*a9643ea8Slogwang if (sched) 321*a9643ea8Slogwang return sched->current_lthread; 322*a9643ea8Slogwang return NULL; 323*a9643ea8Slogwang } 324*a9643ea8Slogwang 325*a9643ea8Slogwang 326*a9643ea8Slogwang 327*a9643ea8Slogwang /* 328*a9643ea8Slogwang * Tasklet to cancel a thread 329*a9643ea8Slogwang */ 330*a9643ea8Slogwang static void 331*a9643ea8Slogwang _cancel(void *arg) 332*a9643ea8Slogwang { 333*a9643ea8Slogwang struct lthread *lt = (struct lthread *) arg; 334*a9643ea8Slogwang 335*a9643ea8Slogwang lt->state |= BIT(ST_LT_CANCELLED); 336*a9643ea8Slogwang lthread_detach(); 337*a9643ea8Slogwang } 338*a9643ea8Slogwang 339*a9643ea8Slogwang 340*a9643ea8Slogwang /* 341*a9643ea8Slogwang * Mark the specified as canceled 342*a9643ea8Slogwang */ 343*a9643ea8Slogwang int lthread_cancel(struct lthread *cancel_lt) 344*a9643ea8Slogwang { 345*a9643ea8Slogwang struct lthread *lt; 346*a9643ea8Slogwang 347*a9643ea8Slogwang if ((cancel_lt == NULL) || (cancel_lt == THIS_LTHREAD)) 348*a9643ea8Slogwang return POSIX_ERRNO(EINVAL); 349*a9643ea8Slogwang 350*a9643ea8Slogwang DIAG_EVENT(cancel_lt, LT_DIAG_LTHREAD_CANCEL, cancel_lt, 0); 351*a9643ea8Slogwang 352*a9643ea8Slogwang if (cancel_lt->sched != THIS_SCHED) { 353*a9643ea8Slogwang 354*a9643ea8Slogwang /* spawn task-let to cancel the thread */ 355*a9643ea8Slogwang lthread_create(<, 356*a9643ea8Slogwang cancel_lt->sched->lcore_id, 357*a9643ea8Slogwang _cancel, 358*a9643ea8Slogwang cancel_lt); 359*a9643ea8Slogwang return 0; 360*a9643ea8Slogwang } 361*a9643ea8Slogwang cancel_lt->state |= BIT(ST_LT_CANCELLED); 362*a9643ea8Slogwang return 0; 363*a9643ea8Slogwang } 364*a9643ea8Slogwang 365*a9643ea8Slogwang /* 366*a9643ea8Slogwang * Suspend the current lthread for specified time 367*a9643ea8Slogwang */ 368*a9643ea8Slogwang void lthread_sleep(uint64_t nsecs) 369*a9643ea8Slogwang { 370*a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD; 371*a9643ea8Slogwang 372*a9643ea8Slogwang _lthread_sched_sleep(lt, nsecs); 373*a9643ea8Slogwang 374*a9643ea8Slogwang } 375*a9643ea8Slogwang 376*a9643ea8Slogwang /* 377*a9643ea8Slogwang * Suspend the current lthread for specified time 378*a9643ea8Slogwang */ 379*a9643ea8Slogwang void lthread_sleep_clks(uint64_t clks) 380*a9643ea8Slogwang { 381*a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD; 382*a9643ea8Slogwang uint64_t state = lt->state; 383*a9643ea8Slogwang 384*a9643ea8Slogwang if (clks) { 385*a9643ea8Slogwang _timer_start(lt, clks); 386*a9643ea8Slogwang lt->state = state | BIT(ST_LT_SLEEPING); 387*a9643ea8Slogwang } 388*a9643ea8Slogwang DIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0); 389*a9643ea8Slogwang _suspend(); 390*a9643ea8Slogwang } 391*a9643ea8Slogwang 392*a9643ea8Slogwang /* 393*a9643ea8Slogwang * Requeue the current thread to the back of the ready queue 394*a9643ea8Slogwang */ 395*a9643ea8Slogwang void lthread_yield(void) 396*a9643ea8Slogwang { 397*a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD; 398*a9643ea8Slogwang 399*a9643ea8Slogwang DIAG_EVENT(lt, LT_DIAG_LTHREAD_YIELD, 0, 0); 400*a9643ea8Slogwang 401*a9643ea8Slogwang _ready_queue_insert(THIS_SCHED, lt); 402*a9643ea8Slogwang ctx_switch(&(THIS_SCHED)->ctx, <->ctx); 403*a9643ea8Slogwang } 404*a9643ea8Slogwang 405*a9643ea8Slogwang /* 406*a9643ea8Slogwang * Exit the current lthread 407*a9643ea8Slogwang * If a thread is joining pass the user pointer to it 408*a9643ea8Slogwang */ 409*a9643ea8Slogwang void lthread_exit(void *ptr) 410*a9643ea8Slogwang { 411*a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD; 412*a9643ea8Slogwang 413*a9643ea8Slogwang /* if thread is detached (this is not valid) just exit */ 414*a9643ea8Slogwang if (lt->state & BIT(ST_LT_DETACH)) 415*a9643ea8Slogwang return; 416*a9643ea8Slogwang 417*a9643ea8Slogwang /* There is a race between lthread_join() and lthread_exit() 418*a9643ea8Slogwang * - if exit before join then we suspend and resume on join 419*a9643ea8Slogwang * - if join before exit then we resume the joining thread 420*a9643ea8Slogwang */ 421*a9643ea8Slogwang if ((lt->join == LT_JOIN_INITIAL) 422*a9643ea8Slogwang && rte_atomic64_cmpset(<->join, LT_JOIN_INITIAL, 423*a9643ea8Slogwang LT_JOIN_EXITING)) { 424*a9643ea8Slogwang 425*a9643ea8Slogwang DIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 1, 0); 426*a9643ea8Slogwang _suspend(); 427*a9643ea8Slogwang /* set the exit value */ 428*a9643ea8Slogwang if ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL)) 429*a9643ea8Slogwang *(lt->lt_join->lt_exit_ptr) = ptr; 430*a9643ea8Slogwang 431*a9643ea8Slogwang /* let the joining thread know we have set the exit value */ 432*a9643ea8Slogwang lt->join = LT_JOIN_EXIT_VAL_SET; 433*a9643ea8Slogwang } else { 434*a9643ea8Slogwang 435*a9643ea8Slogwang DIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 0, 0); 436*a9643ea8Slogwang /* set the exit value */ 437*a9643ea8Slogwang if ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL)) 438*a9643ea8Slogwang *(lt->lt_join->lt_exit_ptr) = ptr; 439*a9643ea8Slogwang /* let the joining thread know we have set the exit value */ 440*a9643ea8Slogwang lt->join = LT_JOIN_EXIT_VAL_SET; 441*a9643ea8Slogwang _ready_queue_insert(lt->lt_join->sched, 442*a9643ea8Slogwang (struct lthread *)lt->lt_join); 443*a9643ea8Slogwang } 444*a9643ea8Slogwang 445*a9643ea8Slogwang 446*a9643ea8Slogwang /* wait until the joinging thread has collected the exit value */ 447*a9643ea8Slogwang while (lt->join != LT_JOIN_EXIT_VAL_READ) 448*a9643ea8Slogwang _reschedule(); 449*a9643ea8Slogwang 450*a9643ea8Slogwang /* reset join state */ 451*a9643ea8Slogwang lt->join = LT_JOIN_INITIAL; 452*a9643ea8Slogwang 453*a9643ea8Slogwang /* detach it so its resources can be released */ 454*a9643ea8Slogwang lt->state |= (BIT(ST_LT_DETACH) | BIT(ST_LT_EXITED)); 455*a9643ea8Slogwang } 456*a9643ea8Slogwang 457*a9643ea8Slogwang /* 458*a9643ea8Slogwang * Join an lthread 459*a9643ea8Slogwang * Suspend until the joined thread returns 460*a9643ea8Slogwang */ 461*a9643ea8Slogwang int lthread_join(struct lthread *lt, void **ptr) 462*a9643ea8Slogwang { 463*a9643ea8Slogwang if (lt == NULL) 464*a9643ea8Slogwang return POSIX_ERRNO(EINVAL); 465*a9643ea8Slogwang 466*a9643ea8Slogwang struct lthread *current = THIS_LTHREAD; 467*a9643ea8Slogwang uint64_t lt_state = lt->state; 468*a9643ea8Slogwang 469*a9643ea8Slogwang /* invalid to join a detached thread, or a thread that is joined */ 470*a9643ea8Slogwang if ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET)) 471*a9643ea8Slogwang return POSIX_ERRNO(EINVAL); 472*a9643ea8Slogwang /* pointer to the joining thread and a poingter to return a value */ 473*a9643ea8Slogwang lt->lt_join = current; 474*a9643ea8Slogwang current->lt_exit_ptr = ptr; 475*a9643ea8Slogwang /* There is a race between lthread_join() and lthread_exit() 476*a9643ea8Slogwang * - if join before exit we suspend and will resume when exit is called 477*a9643ea8Slogwang * - if exit before join we resume the exiting thread 478*a9643ea8Slogwang */ 479*a9643ea8Slogwang if ((lt->join == LT_JOIN_INITIAL) 480*a9643ea8Slogwang && rte_atomic64_cmpset(<->join, LT_JOIN_INITIAL, 481*a9643ea8Slogwang LT_JOIN_THREAD_SET)) { 482*a9643ea8Slogwang 483*a9643ea8Slogwang DIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 1); 484*a9643ea8Slogwang _suspend(); 485*a9643ea8Slogwang } else { 486*a9643ea8Slogwang DIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 0); 487*a9643ea8Slogwang _ready_queue_insert(lt->sched, lt); 488*a9643ea8Slogwang } 489*a9643ea8Slogwang 490*a9643ea8Slogwang /* wait for exiting thread to set return value */ 491*a9643ea8Slogwang while (lt->join != LT_JOIN_EXIT_VAL_SET) 492*a9643ea8Slogwang _reschedule(); 493*a9643ea8Slogwang 494*a9643ea8Slogwang /* collect the return value */ 495*a9643ea8Slogwang if (ptr != NULL) 496*a9643ea8Slogwang *ptr = *current->lt_exit_ptr; 497*a9643ea8Slogwang 498*a9643ea8Slogwang /* let the exiting thread proceed to exit */ 499*a9643ea8Slogwang lt->join = LT_JOIN_EXIT_VAL_READ; 500*a9643ea8Slogwang return 0; 501*a9643ea8Slogwang } 502*a9643ea8Slogwang 503*a9643ea8Slogwang 504*a9643ea8Slogwang /* 505*a9643ea8Slogwang * Detach current lthread 506*a9643ea8Slogwang * A detached thread cannot be joined 507*a9643ea8Slogwang */ 508*a9643ea8Slogwang void lthread_detach(void) 509*a9643ea8Slogwang { 510*a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD; 511*a9643ea8Slogwang 512*a9643ea8Slogwang DIAG_EVENT(lt, LT_DIAG_LTHREAD_DETACH, 0, 0); 513*a9643ea8Slogwang 514*a9643ea8Slogwang uint64_t state = lt->state; 515*a9643ea8Slogwang 516*a9643ea8Slogwang lt->state = state | BIT(ST_LT_DETACH); 517*a9643ea8Slogwang } 518*a9643ea8Slogwang 519*a9643ea8Slogwang /* 520*a9643ea8Slogwang * Set function name of an lthread 521*a9643ea8Slogwang * this is a debug aid 522*a9643ea8Slogwang */ 523*a9643ea8Slogwang void lthread_set_funcname(const char *f) 524*a9643ea8Slogwang { 525*a9643ea8Slogwang struct lthread *lt = THIS_LTHREAD; 526*a9643ea8Slogwang 527*a9643ea8Slogwang strncpy(lt->funcname, f, sizeof(lt->funcname)); 528*a9643ea8Slogwang lt->funcname[sizeof(lt->funcname)-1] = 0; 529*a9643ea8Slogwang } 530