1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Some portions of this software is derived from the
36  * https://github.com/halayli/lthread which carrys the following license.
37  *
38  * Copyright (C) 2012, Hasan Alayli <[email protected]>
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  *
49  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  */
61 
62 #define RTE_MEM 1
63 
64 #include <stdio.h>
65 #include <stdlib.h>
66 #include <string.h>
67 #include <stdint.h>
68 #include <stddef.h>
69 #include <limits.h>
70 #include <inttypes.h>
71 #include <unistd.h>
72 #include <pthread.h>
73 #include <fcntl.h>
74 #include <sys/time.h>
75 #include <sys/mman.h>
76 
77 #include <rte_log.h>
78 #include <ctx.h>
79 
80 #include "lthread_api.h"
81 #include "lthread.h"
82 #include "lthread_timer.h"
83 #include "lthread_tls.h"
84 #include "lthread_objcache.h"
85 #include "lthread_diag.h"
86 
87 
88 /*
89  * This function gets called after an lthread function has returned.
90  */
91 void _lthread_exit_handler(struct lthread *lt)
92 {
93 
94 	lt->state |= BIT(ST_LT_EXITED);
95 
96 	if (!(lt->state & BIT(ST_LT_DETACH))) {
97 		/* thread is this not explicitly detached
98 		 * it must be joinable, so we call lthread_exit().
99 		 */
100 		lthread_exit(NULL);
101 	}
102 
103 	/* if we get here the thread is detached so we can reschedule it,
104 	 * allowing the scheduler to free it
105 	 */
106 	_reschedule();
107 }
108 
109 
110 /*
111  * Free resources allocated to an lthread
112  */
113 void _lthread_free(struct lthread *lt)
114 {
115 
116 	DIAG_EVENT(lt, LT_DIAG_LTHREAD_FREE, lt, 0);
117 
118 	/* invoke any user TLS destructor functions */
119 	_lthread_tls_destroy(lt);
120 
121 	/* free memory allocated for TLS defined using RTE_PER_LTHREAD macros */
122 	if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE)
123 		_lthread_objcache_free(lt->tls->root_sched->per_lthread_cache,
124 					lt->per_lthread_data);
125 
126 	/* free pthread style TLS memory */
127 	_lthread_objcache_free(lt->tls->root_sched->tls_cache, lt->tls);
128 
129 	/* free the stack */
130 	_lthread_objcache_free(lt->stack_container->root_sched->stack_cache,
131 				lt->stack_container);
132 
133 	/* now free the thread */
134 	_lthread_objcache_free(lt->root_sched->lthread_cache, lt);
135 
136 }
137 
138 /*
139  * Allocate a stack and maintain a cache of stacks
140  */
141 struct lthread_stack *_stack_alloc(void)
142 {
143 	struct lthread_stack *s;
144 
145 	s = _lthread_objcache_alloc((THIS_SCHED)->stack_cache);
146 	RTE_ASSERT(s != NULL);
147 
148 	s->root_sched = THIS_SCHED;
149 	s->stack_size = LTHREAD_MAX_STACK_SIZE;
150 	return s;
151 }
152 
153 /*
154  * Execute a ctx by invoking the start function
155  * On return call an exit handler if the user has provided one
156  */
157 static void _lthread_exec(void *arg)
158 {
159 	struct lthread *lt = (struct lthread *)arg;
160 
161 	/* invoke the contexts function */
162 	lt->fun(lt->arg);
163 	/* do exit handling */
164 	if (lt->exit_handler != NULL)
165 		lt->exit_handler(lt);
166 }
167 
168 /*
169  *	Initialize an lthread
170  *	Set its function, args, and exit handler
171  */
172 void
173 _lthread_init(struct lthread *lt,
174 	lthread_func_t fun, void *arg, lthread_exit_func exit_handler)
175 {
176 
177 	/* set ctx func and args */
178 	lt->fun = fun;
179 	lt->arg = arg;
180 	lt->exit_handler = exit_handler;
181 
182 	/* set initial state */
183 	lt->birth = _sched_now();
184 	lt->state = BIT(ST_LT_INIT);
185 	lt->join = LT_JOIN_INITIAL;
186 }
187 
188 /*
189  *	set the lthread stack
190  */
191 void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size)
192 {
193 	char *stack_top = (char *)stack + stack_size;
194 	void **s = (void **)stack_top;
195 
196 	/* set stack */
197 	lt->stack = stack;
198 	lt->stack_size = stack_size;
199 
200 	/* set initial context */
201 	s[-3] = NULL;
202 	s[-2] = (void *)lt;
203 	lt->ctx.rsp = (void *)(stack_top - (4 * sizeof(void *)));
204 	lt->ctx.rbp = (void *)(stack_top - (3 * sizeof(void *)));
205 	lt->ctx.rip = (void *)_lthread_exec;
206 }
207 
208 /*
209  * Create an lthread on the current scheduler
210  * If there is no current scheduler on this pthread then first create one
211  */
212 int
213 lthread_create(struct lthread **new_lt, int lcore_id,
214 		lthread_func_t fun, void *arg)
215 {
216 	if ((new_lt == NULL) || (fun == NULL))
217 		return POSIX_ERRNO(EINVAL);
218 
219 	if (lcore_id < 0)
220 		lcore_id = rte_lcore_id();
221 	else if (lcore_id > LTHREAD_MAX_LCORES)
222 		return POSIX_ERRNO(EINVAL);
223 
224 	struct lthread *lt = NULL;
225 
226 	if (THIS_SCHED == NULL) {
227 		THIS_SCHED = _lthread_sched_create(0);
228 		if (THIS_SCHED == NULL) {
229 			perror("Failed to create scheduler");
230 			return POSIX_ERRNO(EAGAIN);
231 		}
232 	}
233 
234 	/* allocate a thread structure */
235 	lt = _lthread_objcache_alloc((THIS_SCHED)->lthread_cache);
236 	if (lt == NULL)
237 		return POSIX_ERRNO(EAGAIN);
238 
239 	bzero(lt, sizeof(struct lthread));
240 	lt->root_sched = THIS_SCHED;
241 
242 	/* set the function args and exit handlder */
243 	_lthread_init(lt, fun, arg, _lthread_exit_handler);
244 
245 	/* put it in the ready queue */
246 	*new_lt = lt;
247 
248 	if (lcore_id < 0)
249 		lcore_id = rte_lcore_id();
250 
251 	DIAG_CREATE_EVENT(lt, LT_DIAG_LTHREAD_CREATE);
252 
253 	rte_wmb();
254 	_ready_queue_insert(_lthread_sched_get(lcore_id), lt);
255 	return 0;
256 }
257 
258 /*
259  * Schedules lthread to sleep for `nsecs`
260  * setting the lthread state to LT_ST_SLEEPING.
261  * lthread state is cleared upon resumption or expiry.
262  */
263 static inline void _lthread_sched_sleep(struct lthread *lt, uint64_t nsecs)
264 {
265 	uint64_t state = lt->state;
266 	uint64_t clks = _ns_to_clks(nsecs);
267 
268 	if (clks) {
269 		_timer_start(lt, clks);
270 		lt->state = state | BIT(ST_LT_SLEEPING);
271 	}
272 	DIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0);
273 	_suspend();
274 }
275 
276 
277 
278 /*
279  * Cancels any running timer.
280  * This can be called multiple times on the same lthread regardless if it was
281  * sleeping or not.
282  */
283 int _lthread_desched_sleep(struct lthread *lt)
284 {
285 	uint64_t state = lt->state;
286 
287 	if (state & BIT(ST_LT_SLEEPING)) {
288 		_timer_stop(lt);
289 		state &= (CLEARBIT(ST_LT_SLEEPING) & CLEARBIT(ST_LT_EXPIRED));
290 		lt->state = state | BIT(ST_LT_READY);
291 		return 1;
292 	}
293 	return 0;
294 }
295 
296 /*
297  * set user data pointer in an lthread
298  */
299 void lthread_set_data(void *data)
300 {
301 	if (sizeof(void *) == RTE_PER_LTHREAD_SECTION_SIZE)
302 		THIS_LTHREAD->per_lthread_data = data;
303 }
304 
305 /*
306  * Retrieve user data pointer from an lthread
307  */
308 void *lthread_get_data(void)
309 {
310 	return THIS_LTHREAD->per_lthread_data;
311 }
312 
313 /*
314  * Return the current lthread handle
315  */
316 struct lthread *lthread_current(void)
317 {
318 	struct lthread_sched *sched = THIS_SCHED;
319 
320 	if (sched)
321 		return sched->current_lthread;
322 	return NULL;
323 }
324 
325 
326 
327 /*
328  * Tasklet to cancel a thread
329  */
330 static void
331 _cancel(void *arg)
332 {
333 	struct lthread *lt = (struct lthread *) arg;
334 
335 	lt->state |= BIT(ST_LT_CANCELLED);
336 	lthread_detach();
337 }
338 
339 
340 /*
341  * Mark the specified as canceled
342  */
343 int lthread_cancel(struct lthread *cancel_lt)
344 {
345 	struct lthread *lt;
346 
347 	if ((cancel_lt == NULL) || (cancel_lt == THIS_LTHREAD))
348 		return POSIX_ERRNO(EINVAL);
349 
350 	DIAG_EVENT(cancel_lt, LT_DIAG_LTHREAD_CANCEL, cancel_lt, 0);
351 
352 	if (cancel_lt->sched != THIS_SCHED) {
353 
354 		/* spawn task-let to cancel the thread */
355 		lthread_create(&lt,
356 				cancel_lt->sched->lcore_id,
357 				_cancel,
358 				cancel_lt);
359 		return 0;
360 	}
361 	cancel_lt->state |= BIT(ST_LT_CANCELLED);
362 	return 0;
363 }
364 
365 /*
366  * Suspend the current lthread for specified time
367  */
368 void lthread_sleep(uint64_t nsecs)
369 {
370 	struct lthread *lt = THIS_LTHREAD;
371 
372 	_lthread_sched_sleep(lt, nsecs);
373 
374 }
375 
376 /*
377  * Suspend the current lthread for specified time
378  */
379 void lthread_sleep_clks(uint64_t clks)
380 {
381 	struct lthread *lt = THIS_LTHREAD;
382 	uint64_t state = lt->state;
383 
384 	if (clks) {
385 		_timer_start(lt, clks);
386 		lt->state = state | BIT(ST_LT_SLEEPING);
387 	}
388 	DIAG_EVENT(lt, LT_DIAG_LTHREAD_SLEEP, clks, 0);
389 	_suspend();
390 }
391 
392 /*
393  * Requeue the current thread to the back of the ready queue
394  */
395 void lthread_yield(void)
396 {
397 	struct lthread *lt = THIS_LTHREAD;
398 
399 	DIAG_EVENT(lt, LT_DIAG_LTHREAD_YIELD, 0, 0);
400 
401 	_ready_queue_insert(THIS_SCHED, lt);
402 	ctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);
403 }
404 
405 /*
406  * Exit the current lthread
407  * If a thread is joining pass the user pointer to it
408  */
409 void lthread_exit(void *ptr)
410 {
411 	struct lthread *lt = THIS_LTHREAD;
412 
413 	/* if thread is detached (this is not valid) just exit */
414 	if (lt->state & BIT(ST_LT_DETACH))
415 		return;
416 
417 	/* There is a race between lthread_join() and lthread_exit()
418 	 *  - if exit before join then we suspend and resume on join
419 	 *  - if join before exit then we resume the joining thread
420 	 */
421 	if ((lt->join == LT_JOIN_INITIAL)
422 	    && rte_atomic64_cmpset(&lt->join, LT_JOIN_INITIAL,
423 				   LT_JOIN_EXITING)) {
424 
425 		DIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 1, 0);
426 		_suspend();
427 		/* set the exit value */
428 		if ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL))
429 			*(lt->lt_join->lt_exit_ptr) = ptr;
430 
431 		/* let the joining thread know we have set the exit value */
432 		lt->join = LT_JOIN_EXIT_VAL_SET;
433 	} else {
434 
435 		DIAG_EVENT(lt, LT_DIAG_LTHREAD_EXIT, 0, 0);
436 		/* set the exit value */
437 		if ((ptr != NULL) && (lt->lt_join->lt_exit_ptr != NULL))
438 			*(lt->lt_join->lt_exit_ptr) = ptr;
439 		/* let the joining thread know we have set the exit value */
440 		lt->join = LT_JOIN_EXIT_VAL_SET;
441 		_ready_queue_insert(lt->lt_join->sched,
442 				    (struct lthread *)lt->lt_join);
443 	}
444 
445 
446 	/* wait until the joinging thread has collected the exit value */
447 	while (lt->join != LT_JOIN_EXIT_VAL_READ)
448 		_reschedule();
449 
450 	/* reset join state */
451 	lt->join = LT_JOIN_INITIAL;
452 
453 	/* detach it so its resources can be released */
454 	lt->state |= (BIT(ST_LT_DETACH) | BIT(ST_LT_EXITED));
455 }
456 
457 /*
458  * Join an lthread
459  * Suspend until the joined thread returns
460  */
461 int lthread_join(struct lthread *lt, void **ptr)
462 {
463 	if (lt == NULL)
464 		return POSIX_ERRNO(EINVAL);
465 
466 	struct lthread *current = THIS_LTHREAD;
467 	uint64_t lt_state = lt->state;
468 
469 	/* invalid to join a detached thread, or a thread that is joined */
470 	if ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET))
471 		return POSIX_ERRNO(EINVAL);
472 	/* pointer to the joining thread and a poingter to return a value */
473 	lt->lt_join = current;
474 	current->lt_exit_ptr = ptr;
475 	/* There is a race between lthread_join() and lthread_exit()
476 	 *  - if join before exit we suspend and will resume when exit is called
477 	 *  - if exit before join we resume the exiting thread
478 	 */
479 	if ((lt->join == LT_JOIN_INITIAL)
480 	    && rte_atomic64_cmpset(&lt->join, LT_JOIN_INITIAL,
481 				   LT_JOIN_THREAD_SET)) {
482 
483 		DIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 1);
484 		_suspend();
485 	} else {
486 		DIAG_EVENT(current, LT_DIAG_LTHREAD_JOIN, lt, 0);
487 		_ready_queue_insert(lt->sched, lt);
488 	}
489 
490 	/* wait for exiting thread to set return value */
491 	while (lt->join != LT_JOIN_EXIT_VAL_SET)
492 		_reschedule();
493 
494 	/* collect the return value */
495 	if (ptr != NULL)
496 		*ptr = *current->lt_exit_ptr;
497 
498 	/* let the exiting thread proceed to exit */
499 	lt->join = LT_JOIN_EXIT_VAL_READ;
500 	return 0;
501 }
502 
503 
504 /*
505  * Detach current lthread
506  * A detached thread cannot be joined
507  */
508 void lthread_detach(void)
509 {
510 	struct lthread *lt = THIS_LTHREAD;
511 
512 	DIAG_EVENT(lt, LT_DIAG_LTHREAD_DETACH, 0, 0);
513 
514 	uint64_t state = lt->state;
515 
516 	lt->state = state | BIT(ST_LT_DETACH);
517 }
518 
519 /*
520  * Set function name of an lthread
521  * this is a debug aid
522  */
523 void lthread_set_funcname(const char *f)
524 {
525 	struct lthread *lt = THIS_LTHREAD;
526 
527 	strncpy(lt->funcname, f, sizeof(lt->funcname));
528 	lt->funcname[sizeof(lt->funcname)-1] = 0;
529 }
530