1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <limits.h>
10 #include <inttypes.h>
11 #include <unistd.h>
12 #include <pthread.h>
13 #include <fcntl.h>
14 #include <sys/time.h>
15 #include <sys/mman.h>
16 #include <sched.h>
17 
18 #include <rte_malloc.h>
19 #include <rte_log.h>
20 #include <rte_ring.h>
21 #include <rte_atomic_64.h>
22 
23 #include "lthread_tls.h"
24 #include "lthread_queue.h"
25 #include "lthread_objcache.h"
26 #include "lthread_sched.h"
27 
28 static struct rte_ring *key_pool;
29 static uint64_t key_pool_init;
30 
31 /* needed to cause section start and end to be defined */
32 RTE_DEFINE_PER_LTHREAD(void *, dummy);
33 
34 static struct lthread_key key_table[LTHREAD_MAX_KEYS];
35 
RTE_INIT(thread_tls_ctor)36 RTE_INIT(thread_tls_ctor)
37 {
38 	key_pool = NULL;
39 	key_pool_init = 0;
40 }
41 
42 /*
43  * Initialize a pool of keys
44  * These are unique tokens that can be obtained by threads
45  * calling lthread_key_create()
46  */
_lthread_key_pool_init(void)47 void _lthread_key_pool_init(void)
48 {
49 	static struct rte_ring *pool;
50 	struct lthread_key *new_key;
51 	char name[MAX_LTHREAD_NAME_SIZE];
52 
53 	bzero(key_table, sizeof(key_table));
54 
55 	/* only one lcore should do this */
56 	if (rte_atomic64_cmpset(&key_pool_init, 0, 1)) {
57 
58 		snprintf(name,
59 			MAX_LTHREAD_NAME_SIZE,
60 			"lthread_key_pool_%d",
61 			getpid());
62 
63 		pool = rte_ring_create(name,
64 					LTHREAD_MAX_KEYS, 0, 0);
65 		RTE_ASSERT(pool);
66 
67 		int i;
68 
69 		for (i = 1; i < LTHREAD_MAX_KEYS; i++) {
70 			new_key = &key_table[i];
71 			rte_ring_mp_enqueue((struct rte_ring *)pool,
72 						(void *)new_key);
73 		}
74 		key_pool = pool;
75 	}
76 	/* other lcores wait here till done */
77 	while (key_pool == NULL) {
78 		rte_compiler_barrier();
79 		sched_yield();
80 	};
81 }
82 
83 /*
84  * Create a key
85  * this means getting a key from the pool
86  */
lthread_key_create(unsigned int * key,tls_destructor_func destructor)87 int lthread_key_create(unsigned int *key, tls_destructor_func destructor)
88 {
89 	if (key == NULL)
90 		return POSIX_ERRNO(EINVAL);
91 
92 	struct lthread_key *new_key;
93 
94 	if (rte_ring_mc_dequeue((struct rte_ring *)key_pool, (void **)&new_key)
95 	    == 0) {
96 		new_key->destructor = destructor;
97 		*key = (new_key - key_table);
98 
99 		return 0;
100 	}
101 	return POSIX_ERRNO(EAGAIN);
102 }
103 
104 
105 /*
106  * Delete a key
107  */
lthread_key_delete(unsigned int k)108 int lthread_key_delete(unsigned int k)
109 {
110 	struct lthread_key *key;
111 
112 	key = (struct lthread_key *) &key_table[k];
113 
114 	if (k > LTHREAD_MAX_KEYS)
115 		return POSIX_ERRNO(EINVAL);
116 
117 	key->destructor = NULL;
118 	rte_ring_mp_enqueue((struct rte_ring *)key_pool,
119 					(void *)key);
120 	return 0;
121 }
122 
123 
124 
125 /*
126  * Break association for all keys in use by this thread
127  * invoke the destructor if available.
128  * Since a destructor can create keys we could enter an infinite loop
129  * therefore we give up after LTHREAD_DESTRUCTOR_ITERATIONS
130  * the behavior is modelled on pthread
131  */
_lthread_tls_destroy(struct lthread * lt)132 void _lthread_tls_destroy(struct lthread *lt)
133 {
134 	int i, k;
135 	int nb_keys;
136 	void *data;
137 
138 	for (i = 0; i < LTHREAD_DESTRUCTOR_ITERATIONS; i++) {
139 
140 		for (k = 1; k < LTHREAD_MAX_KEYS; k++) {
141 
142 			/* no keys in use ? */
143 			nb_keys = lt->tls->nb_keys_inuse;
144 			if (nb_keys == 0)
145 				return;
146 
147 			/* this key not in use ? */
148 			if (lt->tls->data[k] == NULL)
149 				continue;
150 
151 			/* remove this key */
152 			data = lt->tls->data[k];
153 			lt->tls->data[k] = NULL;
154 			lt->tls->nb_keys_inuse = nb_keys-1;
155 
156 			/* invoke destructor */
157 			if (key_table[k].destructor != NULL)
158 				key_table[k].destructor(data);
159 		}
160 	}
161 }
162 
163 /*
164  * Return the pointer associated with a key
165  * If the key is no longer valid return NULL
166  */
167 void
lthread_getspecific(unsigned int k)168 *lthread_getspecific(unsigned int k)
169 {
170 	void *res = NULL;
171 
172 	if (k < LTHREAD_MAX_KEYS)
173 		res = THIS_LTHREAD->tls->data[k];
174 
175 	return res;
176 }
177 
178 /*
179  * Set a value against a key
180  * If the key is no longer valid return an error
181  * when storing value
182  */
lthread_setspecific(unsigned int k,const void * data)183 int lthread_setspecific(unsigned int k, const void *data)
184 {
185 	if (k >= LTHREAD_MAX_KEYS)
186 		return POSIX_ERRNO(EINVAL);
187 
188 	int n = THIS_LTHREAD->tls->nb_keys_inuse;
189 
190 	/* discard const qualifier */
191 	char *p = (char *) (uintptr_t) data;
192 
193 
194 	if (data != NULL) {
195 		if (THIS_LTHREAD->tls->data[k] == NULL)
196 			THIS_LTHREAD->tls->nb_keys_inuse = n+1;
197 	}
198 
199 	THIS_LTHREAD->tls->data[k] = (void *) p;
200 	return 0;
201 }
202 
203 /*
204  * Allocate data for TLS cache
205 */
_lthread_tls_alloc(struct lthread * lt)206 void _lthread_tls_alloc(struct lthread *lt)
207 {
208 	struct lthread_tls *tls;
209 
210 	tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
211 
212 	RTE_ASSERT(tls != NULL);
213 
214 	tls->root_sched = (THIS_SCHED);
215 	lt->tls = tls;
216 
217 	/* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */
218 	if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {
219 		lt->per_lthread_data =
220 		    _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);
221 	}
222 }
223