1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2015 Intel Corporation.
4 * Copyright 2012 Hasan Alayli <[email protected]>
5 */
6
7 #define RTE_MEM 1
8
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <stdint.h>
13 #include <stddef.h>
14 #include <limits.h>
15 #include <inttypes.h>
16 #include <unistd.h>
17 #include <pthread.h>
18 #include <fcntl.h>
19 #include <sys/time.h>
20 #include <sys/mman.h>
21
22 #include <rte_log.h>
23 #include <ctx.h>
24 #include <stack.h>
25
26 #include "lthread_api.h"
27 #include "lthread.h"
28 #include "lthread_timer.h"
29 #include "lthread_tls.h"
30 #include "lthread_objcache.h"
31 #include "lthread_diag.h"
32
33
34 /*
35 * This function gets called after an lthread function has returned.
36 */
_lthread_exit_handler(struct lthread * lt)37 void _lthread_exit_handler(struct lthread *lt)
38 {
39
40 lt->state |= BIT(ST_LT_EXITED);
41
42 if (!(lt->state & BIT(ST_LT_DETACH))) {
43 /* thread is this not explicitly detached
44 * it must be joinable, so we call lthread_exit().
45 */
46 lthread_exit(NULL);
47 }
48
49 /* if we get here the thread is detached so we can reschedule it,
50 * allowing the scheduler to free it
51 */
52 _reschedule();
53 }
54
55
56 /*
57 * Free resources allocated to an lthread
58 */
_lthread_free(struct lthread * lt)59 void _lthread_free(struct lthread *lt)
60 {
61
62 DIAG_EVENT(lt, LT_DIAG_LTHREAD_FREE, lt, 0);
63
64 /* invoke any user TLS destructor functions */
65 _lthread_tls_destroy(lt);
66
67 /* free memory allocated for TLS defined using RTE_PER_LTHREAD macros */
68 if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE)
69 _lthread_objcache_free(lt->tls->root_sched->per_lthread_cache,
70 lt->per_lthread_data);
71
72 /* free pthread style TLS memory */
73 _lthread_objcache_free(lt->tls->root_sched->tls_cache, lt->tls);
74
75 /* free the stack */
76 _lthread_objcache_free(lt->stack_container->root_sched->stack_cache,
77 lt->stack_container);
78
79 /* now free the thread */
80 _lthread_objcache_free(lt->root_sched->lthread_cache, lt);
81
82 }
83
84 /*
85 * Allocate a stack and maintain a cache of stacks
86 */
_stack_alloc(void)87 struct lthread_stack *_stack_alloc(void)
88 {
89 struct lthread_stack *s;
90
91 s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
92 RTE_ASSERT(s != NULL);
93
94 s->root_sched = THIS_SCHED;
95 s->stack_size = LTHREAD_MAX_STACK_SIZE;
96 return s;
97 }
98
99 /*
100 * Execute a ctx by invoking the start function
101 * On return call an exit handler if the user has provided one
102 */
_lthread_exec(void * arg)103 static void _lthread_exec(void *arg)
104 {
105 struct lthread *lt = (struct lthread *)arg;
106
107 /* invoke the contexts function */
108 lt->fun(lt->arg);
109 /* do exit handling */
110 if (lt->exit_handler != NULL)
111 lt->exit_handler(lt);
112 }
113
114 /*
115 * Initialize an lthread
116 * Set its function, args, and exit handler
117 */
118 void
_lthread_init(struct lthread * lt,lthread_func_t fun,void * arg,lthread_exit_func exit_handler)119 _lthread_init(struct lthread *lt,
120 lthread_func_t fun, void *arg, lthread_exit_func exit_handler)
121 {
122
123 /* set ctx func and args */
124 lt->fun = fun;
125 lt->arg = arg;
126 lt->exit_handler = exit_handler;
127
128 /* set initial state */
129 lt->birth = _sched_now();
130 lt->state = BIT(ST_LT_INIT);
131 lt->join = LT_JOIN_INITIAL;
132 }
133
134 /*
135 * set the lthread stack
136 */
_lthread_set_stack(struct lthread * lt,void * stack,size_t stack_size)137 void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size)
138 {
139 /* set stack */
140 lt->stack = stack;
141 lt->stack_size = stack_size;
142
143 arch_set_stack(lt, _lthread_exec);
144 }
145
146 /*
147 * Create an lthread on the current scheduler
148 * If there is no current scheduler on this pthread then first create one
149 */
150 int
lthread_create(struct lthread ** new_lt,int lcore_id,lthread_func_t fun,void * arg)151 lthread_create(struct lthread **new_lt, int lcore_id,
152 lthread_func_t fun, void *arg)
153 {
154 if ((new_lt == NULL) || (fun == NULL))
155 return POSIX_ERRNO(EINVAL);
156
157 if (lcore_id < 0)
158 lcore_id = rte_lcore_id();
159 else if (lcore_id > LTHREAD_MAX_LCORES)
160 return POSIX_ERRNO(EINVAL);
161
162 struct lthread *lt = NULL;
163
164 if (THIS_SCHED == NULL) {
165 THIS_SCHED = _lthread_sched_create(0);
166 if (THIS_SCHED == NULL) {
167 perror("Failed to create scheduler");
168 return POSIX_ERRNO(EAGAIN);
169 }
170 }
171
172 /* allocate a thread structure */
173 lt = _lthread_objcache_alloc((THIS_SCHED)->lthread_cache);
174 if (lt == NULL)
175 return POSIX_ERRNO(EAGAIN);
176
177 bzero(lt, sizeof(struct lthread));
178 lt->root_sched = THIS_SCHED;
179
180 /* set the function args and exit handlder */
181 _lthread_init(lt, fun, arg, _lthread_exit_handler);
182
183 /* put it in the ready queue */
184 *new_lt = lt;
185
186 if (lcore_id < 0)
187 lcore_id = rte_lcore_id();
188
189 DIAG_CREATE_EVENT(lt, LT_DIAG_LTHREAD_CREATE);
190
191 rte_wmb();
192 _ready_queue_insert(_lthread_sched_get(lcore_id), lt);
193 return 0;
194 }
195
196 /*
197 * Schedules lthread to sleep for `nsecs`
198 * setting the lthread state to LT_ST_SLEEPING.
199 * lthread state is cleared upon resumption or expiry.
200 */
_lthread_sched_sleep(struct lthread * lt,uint64_t nsecs)201 static inline void _lthread_sched_sleep(struct lthread *lt, uint64_t nsecs)
202 {
203 uint64_t state = lt->state;
204 uint64_t clks = _ns_to_clks(nsecs);
205
206 if (clks) {
207 _timer_start(lt, clks);
208 lt->state = state | BIT(ST_LT_SLEEPING);
209 }
210 DIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0);
211 _suspend();
212 }
213
214
215
216 /*
217 * Cancels any running timer.
218 * This can be called multiple times on the same lthread regardless if it was
219 * sleeping or not.
220 */
_lthread_desched_sleep(struct lthread * lt)221 int _lthread_desched_sleep(struct lthread *lt)
222 {
223 uint64_t state = lt->state;
224
225 if (state & BIT(ST_LT_SLEEPING)) {
226 _timer_stop(lt);
227 state &= (CLEARBIT(ST_LT_SLEEPING) & CLEARBIT(ST_LT_EXPIRED));
228 lt->state = state | BIT(ST_LT_READY);
229 return 1;
230 }
231 return 0;
232 }
233
234 /*
235 * set user data pointer in an lthread
236 */
lthread_set_data(void * data)237 void lthread_set_data(void *data)
238 {
239 if (sizeof(void *) == RTE_PER_LTHREAD_SECTION_SIZE)
240 THIS_LTHREAD->per_lthread_data = data;
241 }
242
243 /*
244 * Retrieve user data pointer from an lthread
245 */
lthread_get_data(void)246 void *lthread_get_data(void)
247 {
248 return THIS_LTHREAD->per_lthread_data;
249 }
250
251 /*
252 * Return the current lthread handle
253 */
lthread_current(void)254 struct lthread *lthread_current(void)
255 {
256 struct lthread_sched *sched = THIS_SCHED;
257
258 if (sched)
259 return sched->current_lthread;
260 return NULL;
261 }
262
263
264
265 /*
266 * Tasklet to cancel a thread
267 */
268 static void *
_cancel(void * arg)269 _cancel(void *arg)
270 {
271 struct lthread *lt = (struct lthread *) arg;
272
273 lt->state |= BIT(ST_LT_CANCELLED);
274 lthread_detach();
275 return NULL;
276 }
277
278
279 /*
280 * Mark the specified as canceled
281 */
lthread_cancel(struct lthread * cancel_lt)282 int lthread_cancel(struct lthread *cancel_lt)
283 {
284 struct lthread *lt;
285
286 if ((cancel_lt == NULL) || (cancel_lt == THIS_LTHREAD))
287 return POSIX_ERRNO(EINVAL);
288
289 DIAG_EVENT(cancel_lt, LT_DIAG_LTHREAD_CANCEL, cancel_lt, 0);
290
291 if (cancel_lt->sched != THIS_SCHED) {
292
293 /* spawn task-let to cancel the thread */
294 lthread_create(<,
295 cancel_lt->sched->lcore_id,
296 _cancel,
297 cancel_lt);
298 return 0;
299 }
300 cancel_lt->state |= BIT(ST_LT_CANCELLED);
301 return 0;
302 }
303
304 /*
305 * Suspend the current lthread for specified time
306 */
lthread_sleep(uint64_t nsecs)307 void lthread_sleep(uint64_t nsecs)
308 {
309 struct lthread *lt = THIS_LTHREAD;
310
311 _lthread_sched_sleep(lt, nsecs);
312
313 }
314
315 /*
316 * Suspend the current lthread for specified time
317 */
lthread_sleep_clks(uint64_t clks)318 void lthread_sleep_clks(uint64_t clks)
319 {
320 struct lthread *lt = THIS_LTHREAD;
321 uint64_t state = lt->state;
322
323 if (clks) {
324 _timer_start(lt, clks);
325 lt->state = state | BIT(ST_LT_SLEEPING);
326 }
327 DIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0);
328 _suspend();
329 }
330
331 /*
332 * Requeue the current thread to the back of the ready queue
333 */
lthread_yield(void)334 void lthread_yield(void)
335 {
336 struct lthread *lt = THIS_LTHREAD;
337
338 DIAG_EVENT(lt, LT_DIAG_LTHREAD_YIELD, 0, 0);
339
340 _ready_queue_insert(THIS_SCHED, lt);
341 ctx_switch(&(THIS_SCHED)->ctx, <->ctx);
342 }
343
344 /*
345 * Exit the current lthread
346 * If a thread is joining pass the user pointer to it
347 */
lthread_exit(void * ptr)348 void lthread_exit(void *ptr)
349 {
350 struct lthread *lt = THIS_LTHREAD;
351
352 /* if thread is detached (this is not valid) just exit */
353 if (lt->state & BIT(ST_LT_DETACH))
354 return;
355
356 /* There is a race between lthread_join() and lthread_exit()
357 * - if exit before join then we suspend and resume on join
358 * - if join before exit then we resume the joining thread
359 */
360 if ((lt->join == LT_JOIN_INITIAL)
361 && rte_atomic64_cmpset(<->join, LT_JOIN_INITIAL,
362 LT_JOIN_EXITING)) {
363
364 DIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 1, 0);
365 _suspend();
366 /* set the exit value */
367 if ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL))
368 *(lt->lt_join->lt_exit_ptr) = ptr;
369
370 /* let the joining thread know we have set the exit value */
371 lt->join = LT_JOIN_EXIT_VAL_SET;
372 } else {
373
374 DIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 0, 0);
375 /* set the exit value */
376 if ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL))
377 *(lt->lt_join->lt_exit_ptr) = ptr;
378 /* let the joining thread know we have set the exit value */
379 lt->join = LT_JOIN_EXIT_VAL_SET;
380 _ready_queue_insert(lt->lt_join->sched,
381 (struct lthread *)lt->lt_join);
382 }
383
384
385 /* wait until the joinging thread has collected the exit value */
386 while (lt->join != LT_JOIN_EXIT_VAL_READ)
387 _reschedule();
388
389 /* reset join state */
390 lt->join = LT_JOIN_INITIAL;
391
392 /* detach it so its resources can be released */
393 lt->state |= (BIT(ST_LT_DETACH) | BIT(ST_LT_EXITED));
394 }
395
396 /*
397 * Join an lthread
398 * Suspend until the joined thread returns
399 */
lthread_join(struct lthread * lt,void ** ptr)400 int lthread_join(struct lthread *lt, void **ptr)
401 {
402 if (lt == NULL)
403 return POSIX_ERRNO(EINVAL);
404
405 struct lthread *current = THIS_LTHREAD;
406 uint64_t lt_state = lt->state;
407
408 /* invalid to join a detached thread, or a thread that is joined */
409 if ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET))
410 return POSIX_ERRNO(EINVAL);
411 /* pointer to the joining thread and a poingter to return a value */
412 lt->lt_join = current;
413 current->lt_exit_ptr = ptr;
414 /* There is a race between lthread_join() and lthread_exit()
415 * - if join before exit we suspend and will resume when exit is called
416 * - if exit before join we resume the exiting thread
417 */
418 if ((lt->join == LT_JOIN_INITIAL)
419 && rte_atomic64_cmpset(<->join, LT_JOIN_INITIAL,
420 LT_JOIN_THREAD_SET)) {
421
422 DIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 1);
423 _suspend();
424 } else {
425 DIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 0);
426 _ready_queue_insert(lt->sched, lt);
427 }
428
429 /* wait for exiting thread to set return value */
430 while (lt->join != LT_JOIN_EXIT_VAL_SET)
431 _reschedule();
432
433 /* collect the return value */
434 if (ptr != NULL)
435 *ptr = *current->lt_exit_ptr;
436
437 /* let the exiting thread proceed to exit */
438 lt->join = LT_JOIN_EXIT_VAL_READ;
439 return 0;
440 }
441
442
443 /*
444 * Detach current lthread
445 * A detached thread cannot be joined
446 */
lthread_detach(void)447 void lthread_detach(void)
448 {
449 struct lthread *lt = THIS_LTHREAD;
450
451 DIAG_EVENT(lt, LT_DIAG_LTHREAD_DETACH, 0, 0);
452
453 uint64_t state = lt->state;
454
455 lt->state = state | BIT(ST_LT_DETACH);
456 }
457
458 /*
459 * Set function name of an lthread
460 * this is a debug aid
461 */
lthread_set_funcname(const char * f)462 void lthread_set_funcname(const char *f)
463 {
464 struct lthread *lt = THIS_LTHREAD;
465
466 strncpy(lt->funcname, f, sizeof(lt->funcname));
467 lt->funcname[sizeof(lt->funcname)-1] = 0;
468 }
469