1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6
7 #include <rte_common.h>
8 #include <rte_branch_prediction.h>
9 #include <rte_errno.h>
10 #include <rte_lcore.h>
11 #include <rte_log.h>
12
13 #include "eal_private.h"
14 #include "eal_thread.h"
15
rte_get_main_lcore(void)16 unsigned int rte_get_main_lcore(void)
17 {
18 return rte_eal_get_configuration()->main_lcore;
19 }
20
rte_lcore_count(void)21 unsigned int rte_lcore_count(void)
22 {
23 return rte_eal_get_configuration()->lcore_count;
24 }
25
rte_lcore_index(int lcore_id)26 int rte_lcore_index(int lcore_id)
27 {
28 if (unlikely(lcore_id >= RTE_MAX_LCORE))
29 return -1;
30
31 if (lcore_id < 0) {
32 if (rte_lcore_id() == LCORE_ID_ANY)
33 return -1;
34
35 lcore_id = (int)rte_lcore_id();
36 }
37
38 return lcore_config[lcore_id].core_index;
39 }
40
rte_lcore_to_cpu_id(int lcore_id)41 int rte_lcore_to_cpu_id(int lcore_id)
42 {
43 if (unlikely(lcore_id >= RTE_MAX_LCORE))
44 return -1;
45
46 if (lcore_id < 0) {
47 if (rte_lcore_id() == LCORE_ID_ANY)
48 return -1;
49
50 lcore_id = (int)rte_lcore_id();
51 }
52
53 return lcore_config[lcore_id].core_id;
54 }
55
rte_lcore_cpuset(unsigned int lcore_id)56 rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id)
57 {
58 return lcore_config[lcore_id].cpuset;
59 }
60
61 enum rte_lcore_role_t
rte_eal_lcore_role(unsigned int lcore_id)62 rte_eal_lcore_role(unsigned int lcore_id)
63 {
64 struct rte_config *cfg = rte_eal_get_configuration();
65
66 if (lcore_id >= RTE_MAX_LCORE)
67 return ROLE_OFF;
68 return cfg->lcore_role[lcore_id];
69 }
70
71 int
rte_lcore_has_role(unsigned int lcore_id,enum rte_lcore_role_t role)72 rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
73 {
74 struct rte_config *cfg = rte_eal_get_configuration();
75
76 if (lcore_id >= RTE_MAX_LCORE)
77 return -EINVAL;
78
79 return cfg->lcore_role[lcore_id] == role;
80 }
81
rte_lcore_is_enabled(unsigned int lcore_id)82 int rte_lcore_is_enabled(unsigned int lcore_id)
83 {
84 struct rte_config *cfg = rte_eal_get_configuration();
85
86 if (lcore_id >= RTE_MAX_LCORE)
87 return 0;
88 return cfg->lcore_role[lcore_id] == ROLE_RTE;
89 }
90
rte_get_next_lcore(unsigned int i,int skip_main,int wrap)91 unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap)
92 {
93 i++;
94 if (wrap)
95 i %= RTE_MAX_LCORE;
96
97 while (i < RTE_MAX_LCORE) {
98 if (!rte_lcore_is_enabled(i) ||
99 (skip_main && (i == rte_get_main_lcore()))) {
100 i++;
101 if (wrap)
102 i %= RTE_MAX_LCORE;
103 continue;
104 }
105 break;
106 }
107 return i;
108 }
109
110 unsigned int
rte_lcore_to_socket_id(unsigned int lcore_id)111 rte_lcore_to_socket_id(unsigned int lcore_id)
112 {
113 return lcore_config[lcore_id].socket_id;
114 }
115
116 static int
socket_id_cmp(const void * a,const void * b)117 socket_id_cmp(const void *a, const void *b)
118 {
119 const int *lcore_id_a = a;
120 const int *lcore_id_b = b;
121
122 if (*lcore_id_a < *lcore_id_b)
123 return -1;
124 if (*lcore_id_a > *lcore_id_b)
125 return 1;
126 return 0;
127 }
128
129 /*
130 * Parse /sys/devices/system/cpu to get the number of physical and logical
131 * processors on the machine. The function will fill the cpu_info
132 * structure.
133 */
134 int
rte_eal_cpu_init(void)135 rte_eal_cpu_init(void)
136 {
137 /* pointer to global configuration */
138 struct rte_config *config = rte_eal_get_configuration();
139 unsigned lcore_id;
140 unsigned count = 0;
141 unsigned int socket_id, prev_socket_id;
142 int lcore_to_socket_id[RTE_MAX_LCORE];
143
144 /*
145 * Parse the maximum set of logical cores, detect the subset of running
146 * ones and enable them by default.
147 */
148 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
149 lcore_config[lcore_id].core_index = count;
150
151 /* init cpuset for per lcore config */
152 CPU_ZERO(&lcore_config[lcore_id].cpuset);
153
154 /* find socket first */
155 socket_id = eal_cpu_socket_id(lcore_id);
156 lcore_to_socket_id[lcore_id] = socket_id;
157
158 if (eal_cpu_detected(lcore_id) == 0) {
159 config->lcore_role[lcore_id] = ROLE_OFF;
160 lcore_config[lcore_id].core_index = -1;
161 continue;
162 }
163
164 /* By default, lcore 1:1 map to cpu id */
165 CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);
166
167 /* By default, each detected core is enabled */
168 config->lcore_role[lcore_id] = ROLE_RTE;
169 lcore_config[lcore_id].core_role = ROLE_RTE;
170 lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
171 lcore_config[lcore_id].socket_id = socket_id;
172 RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
173 "core %u on socket %u\n",
174 lcore_id, lcore_config[lcore_id].core_id,
175 lcore_config[lcore_id].socket_id);
176 count++;
177 }
178 for (; lcore_id < CPU_SETSIZE; lcore_id++) {
179 if (eal_cpu_detected(lcore_id) == 0)
180 continue;
181 RTE_LOG(DEBUG, EAL, "Skipped lcore %u as core %u on socket %u\n",
182 lcore_id, eal_cpu_core_id(lcore_id),
183 eal_cpu_socket_id(lcore_id));
184 }
185
186 /* Set the count of enabled logical cores of the EAL configuration */
187 config->lcore_count = count;
188 RTE_LOG(DEBUG, EAL,
189 "Maximum logical cores by configuration: %u\n",
190 RTE_MAX_LCORE);
191 RTE_LOG(INFO, EAL, "Detected CPU lcores: %u\n", config->lcore_count);
192
193 /* sort all socket id's in ascending order */
194 qsort(lcore_to_socket_id, RTE_DIM(lcore_to_socket_id),
195 sizeof(lcore_to_socket_id[0]), socket_id_cmp);
196
197 prev_socket_id = -1;
198 config->numa_node_count = 0;
199 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
200 socket_id = lcore_to_socket_id[lcore_id];
201 if (socket_id != prev_socket_id)
202 config->numa_nodes[config->numa_node_count++] =
203 socket_id;
204 prev_socket_id = socket_id;
205 }
206 RTE_LOG(INFO, EAL, "Detected NUMA nodes: %u\n", config->numa_node_count);
207
208 return 0;
209 }
210
211 unsigned int
rte_socket_count(void)212 rte_socket_count(void)
213 {
214 const struct rte_config *config = rte_eal_get_configuration();
215 return config->numa_node_count;
216 }
217
218 int
rte_socket_id_by_idx(unsigned int idx)219 rte_socket_id_by_idx(unsigned int idx)
220 {
221 const struct rte_config *config = rte_eal_get_configuration();
222 if (idx >= config->numa_node_count) {
223 rte_errno = EINVAL;
224 return -1;
225 }
226 return config->numa_nodes[idx];
227 }
228
229 static rte_rwlock_t lcore_lock = RTE_RWLOCK_INITIALIZER;
230 struct lcore_callback {
231 TAILQ_ENTRY(lcore_callback) next;
232 char *name;
233 rte_lcore_init_cb init;
234 rte_lcore_uninit_cb uninit;
235 void *arg;
236 };
237 static TAILQ_HEAD(lcore_callbacks_head, lcore_callback) lcore_callbacks =
238 TAILQ_HEAD_INITIALIZER(lcore_callbacks);
239
240 static int
callback_init(struct lcore_callback * callback,unsigned int lcore_id)241 callback_init(struct lcore_callback *callback, unsigned int lcore_id)
242 {
243 if (callback->init == NULL)
244 return 0;
245 RTE_LOG(DEBUG, EAL, "Call init for lcore callback %s, lcore_id %u\n",
246 callback->name, lcore_id);
247 return callback->init(lcore_id, callback->arg);
248 }
249
250 static void
callback_uninit(struct lcore_callback * callback,unsigned int lcore_id)251 callback_uninit(struct lcore_callback *callback, unsigned int lcore_id)
252 {
253 if (callback->uninit == NULL)
254 return;
255 RTE_LOG(DEBUG, EAL, "Call uninit for lcore callback %s, lcore_id %u\n",
256 callback->name, lcore_id);
257 callback->uninit(lcore_id, callback->arg);
258 }
259
260 static void
free_callback(struct lcore_callback * callback)261 free_callback(struct lcore_callback *callback)
262 {
263 free(callback->name);
264 free(callback);
265 }
266
267 void *
rte_lcore_callback_register(const char * name,rte_lcore_init_cb init,rte_lcore_uninit_cb uninit,void * arg)268 rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
269 rte_lcore_uninit_cb uninit, void *arg)
270 {
271 struct rte_config *cfg = rte_eal_get_configuration();
272 struct lcore_callback *callback;
273 unsigned int lcore_id;
274
275 if (name == NULL)
276 return NULL;
277 callback = calloc(1, sizeof(*callback));
278 if (callback == NULL)
279 return NULL;
280 if (asprintf(&callback->name, "%s-%p", name, arg) == -1) {
281 free(callback);
282 return NULL;
283 }
284 callback->init = init;
285 callback->uninit = uninit;
286 callback->arg = arg;
287 rte_rwlock_write_lock(&lcore_lock);
288 if (callback->init == NULL)
289 goto no_init;
290 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
291 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
292 continue;
293 if (callback_init(callback, lcore_id) == 0)
294 continue;
295 /* Callback refused init for this lcore, uninitialize all
296 * previous lcore.
297 */
298 while (lcore_id-- != 0) {
299 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
300 continue;
301 callback_uninit(callback, lcore_id);
302 }
303 free_callback(callback);
304 callback = NULL;
305 goto out;
306 }
307 no_init:
308 TAILQ_INSERT_TAIL(&lcore_callbacks, callback, next);
309 RTE_LOG(DEBUG, EAL, "Registered new lcore callback %s (%sinit, %suninit).\n",
310 callback->name, callback->init == NULL ? "NO " : "",
311 callback->uninit == NULL ? "NO " : "");
312 out:
313 rte_rwlock_write_unlock(&lcore_lock);
314 return callback;
315 }
316
317 void
rte_lcore_callback_unregister(void * handle)318 rte_lcore_callback_unregister(void *handle)
319 {
320 struct rte_config *cfg = rte_eal_get_configuration();
321 struct lcore_callback *callback = handle;
322 unsigned int lcore_id;
323
324 if (callback == NULL)
325 return;
326 rte_rwlock_write_lock(&lcore_lock);
327 if (callback->uninit == NULL)
328 goto no_uninit;
329 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
330 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
331 continue;
332 callback_uninit(callback, lcore_id);
333 }
334 no_uninit:
335 TAILQ_REMOVE(&lcore_callbacks, callback, next);
336 rte_rwlock_write_unlock(&lcore_lock);
337 RTE_LOG(DEBUG, EAL, "Unregistered lcore callback %s-%p.\n",
338 callback->name, callback->arg);
339 free_callback(callback);
340 }
341
342 unsigned int
eal_lcore_non_eal_allocate(void)343 eal_lcore_non_eal_allocate(void)
344 {
345 struct rte_config *cfg = rte_eal_get_configuration();
346 struct lcore_callback *callback;
347 struct lcore_callback *prev;
348 unsigned int lcore_id;
349
350 rte_rwlock_write_lock(&lcore_lock);
351 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
352 if (cfg->lcore_role[lcore_id] != ROLE_OFF)
353 continue;
354 cfg->lcore_role[lcore_id] = ROLE_NON_EAL;
355 cfg->lcore_count++;
356 break;
357 }
358 if (lcore_id == RTE_MAX_LCORE) {
359 RTE_LOG(DEBUG, EAL, "No lcore available.\n");
360 goto out;
361 }
362 TAILQ_FOREACH(callback, &lcore_callbacks, next) {
363 if (callback_init(callback, lcore_id) == 0)
364 continue;
365 /* Callback refused init for this lcore, call uninit for all
366 * previous callbacks.
367 */
368 prev = TAILQ_PREV(callback, lcore_callbacks_head, next);
369 while (prev != NULL) {
370 callback_uninit(prev, lcore_id);
371 prev = TAILQ_PREV(prev, lcore_callbacks_head, next);
372 }
373 RTE_LOG(DEBUG, EAL, "Initialization refused for lcore %u.\n",
374 lcore_id);
375 cfg->lcore_role[lcore_id] = ROLE_OFF;
376 cfg->lcore_count--;
377 lcore_id = RTE_MAX_LCORE;
378 goto out;
379 }
380 out:
381 rte_rwlock_write_unlock(&lcore_lock);
382 return lcore_id;
383 }
384
385 void
eal_lcore_non_eal_release(unsigned int lcore_id)386 eal_lcore_non_eal_release(unsigned int lcore_id)
387 {
388 struct rte_config *cfg = rte_eal_get_configuration();
389 struct lcore_callback *callback;
390
391 rte_rwlock_write_lock(&lcore_lock);
392 if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
393 goto out;
394 TAILQ_FOREACH(callback, &lcore_callbacks, next)
395 callback_uninit(callback, lcore_id);
396 cfg->lcore_role[lcore_id] = ROLE_OFF;
397 cfg->lcore_count--;
398 out:
399 rte_rwlock_write_unlock(&lcore_lock);
400 }
401
402 int
rte_lcore_iterate(rte_lcore_iterate_cb cb,void * arg)403 rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
404 {
405 struct rte_config *cfg = rte_eal_get_configuration();
406 unsigned int lcore_id;
407 int ret = 0;
408
409 rte_rwlock_read_lock(&lcore_lock);
410 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
411 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
412 continue;
413 ret = cb(lcore_id, arg);
414 if (ret != 0)
415 break;
416 }
417 rte_rwlock_read_unlock(&lcore_lock);
418 return ret;
419 }
420
421 static int
lcore_dump_cb(unsigned int lcore_id,void * arg)422 lcore_dump_cb(unsigned int lcore_id, void *arg)
423 {
424 struct rte_config *cfg = rte_eal_get_configuration();
425 char cpuset[RTE_CPU_AFFINITY_STR_LEN];
426 const char *role;
427 FILE *f = arg;
428 int ret;
429
430 switch (cfg->lcore_role[lcore_id]) {
431 case ROLE_RTE:
432 role = "RTE";
433 break;
434 case ROLE_SERVICE:
435 role = "SERVICE";
436 break;
437 case ROLE_NON_EAL:
438 role = "NON_EAL";
439 break;
440 default:
441 role = "UNKNOWN";
442 break;
443 }
444
445 ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
446 sizeof(cpuset));
447 fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
448 rte_lcore_to_socket_id(lcore_id), role, cpuset,
449 ret == 0 ? "" : "...");
450 return 0;
451 }
452
453 void
rte_lcore_dump(FILE * f)454 rte_lcore_dump(FILE *f)
455 {
456 rte_lcore_iterate(lcore_dump_cb, f);
457 }
458