1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
4 */
5
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <string.h>
9 #ifndef RTE_EXEC_ENV_WINDOWS
10 #include <syslog.h>
11 #endif
12 #include <ctype.h>
13 #include <limits.h>
14 #include <errno.h>
15 #include <getopt.h>
16 #ifndef RTE_EXEC_ENV_WINDOWS
17 #include <dlfcn.h>
18 #include <libgen.h>
19 #endif
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 #ifndef RTE_EXEC_ENV_WINDOWS
23 #include <dirent.h>
24 #endif
25
26 #include <rte_string_fns.h>
27 #include <rte_eal.h>
28 #include <rte_log.h>
29 #include <rte_lcore.h>
30 #include <rte_memory.h>
31 #include <rte_tailq.h>
32 #include <rte_version.h>
33 #include <rte_devargs.h>
34 #include <rte_memcpy.h>
35 #ifndef RTE_EXEC_ENV_WINDOWS
36 #include <rte_telemetry.h>
37 #endif
38 #include <rte_vect.h>
39
40 #include "eal_internal_cfg.h"
41 #include "eal_options.h"
42 #include "eal_filesystem.h"
43 #include "eal_private.h"
44 #ifndef RTE_EXEC_ENV_WINDOWS
45 #include "eal_trace.h"
46 #endif
47
48 #define BITS_PER_HEX 4
49 #define LCORE_OPT_LST 1
50 #define LCORE_OPT_MSK 2
51 #define LCORE_OPT_MAP 3
52
53 const char
54 eal_short_options[] =
55 "a:" /* allow */
56 "b:" /* block */
57 "c:" /* coremask */
58 "s:" /* service coremask */
59 "d:" /* driver */
60 "h" /* help */
61 "l:" /* corelist */
62 "S:" /* service corelist */
63 "m:" /* memory size */
64 "n:" /* memory channels */
65 "r:" /* memory ranks */
66 "v" /* version */
67 "w:" /* pci-whitelist (deprecated) */
68 ;
69
70 const struct option
71 eal_long_options[] = {
72 {OPT_BASE_VIRTADDR, 1, NULL, OPT_BASE_VIRTADDR_NUM },
73 {OPT_CREATE_UIO_DEV, 0, NULL, OPT_CREATE_UIO_DEV_NUM },
74 {OPT_FILE_PREFIX, 1, NULL, OPT_FILE_PREFIX_NUM },
75 {OPT_HELP, 0, NULL, OPT_HELP_NUM },
76 {OPT_HUGE_DIR, 1, NULL, OPT_HUGE_DIR_NUM },
77 {OPT_HUGE_UNLINK, 0, NULL, OPT_HUGE_UNLINK_NUM },
78 {OPT_IOVA_MODE, 1, NULL, OPT_IOVA_MODE_NUM },
79 {OPT_LCORES, 1, NULL, OPT_LCORES_NUM },
80 {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM },
81 {OPT_TRACE, 1, NULL, OPT_TRACE_NUM },
82 {OPT_TRACE_DIR, 1, NULL, OPT_TRACE_DIR_NUM },
83 {OPT_TRACE_BUF_SIZE, 1, NULL, OPT_TRACE_BUF_SIZE_NUM },
84 {OPT_TRACE_MODE, 1, NULL, OPT_TRACE_MODE_NUM },
85 {OPT_MASTER_LCORE, 1, NULL, OPT_MASTER_LCORE_NUM },
86 {OPT_MAIN_LCORE, 1, NULL, OPT_MAIN_LCORE_NUM },
87 {OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM},
88 {OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM },
89 {OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM },
90 {OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM },
91 {OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM },
92 {OPT_IN_MEMORY, 0, NULL, OPT_IN_MEMORY_NUM },
93 {OPT_DEV_BLOCK, 1, NULL, OPT_DEV_BLOCK_NUM },
94 {OPT_DEV_ALLOW, 1, NULL, OPT_DEV_ALLOW_NUM },
95 {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM },
96 {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM },
97 {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM },
98 {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM },
99 {OPT_VDEV, 1, NULL, OPT_VDEV_NUM },
100 {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM },
101 {OPT_VFIO_VF_TOKEN, 1, NULL, OPT_VFIO_VF_TOKEN_NUM },
102 {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM },
103 {OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM },
104 {OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM},
105 {OPT_MATCH_ALLOCATIONS, 0, NULL, OPT_MATCH_ALLOCATIONS_NUM},
106 {OPT_TELEMETRY, 0, NULL, OPT_TELEMETRY_NUM },
107 {OPT_NO_TELEMETRY, 0, NULL, OPT_NO_TELEMETRY_NUM },
108 {OPT_FORCE_MAX_SIMD_BITWIDTH, 1, NULL, OPT_FORCE_MAX_SIMD_BITWIDTH_NUM},
109
110 /* legacy options that will be removed in future */
111 {OPT_PCI_BLACKLIST, 1, NULL, OPT_PCI_BLACKLIST_NUM },
112 {OPT_PCI_WHITELIST, 1, NULL, OPT_PCI_WHITELIST_NUM },
113
114 {0, 0, NULL, 0 }
115 };
116
117 TAILQ_HEAD(shared_driver_list, shared_driver);
118
119 /* Definition for shared object drivers. */
120 struct shared_driver {
121 TAILQ_ENTRY(shared_driver) next;
122
123 char name[PATH_MAX];
124 void* lib_handle;
125 };
126
127 /* List of external loadable drivers */
128 static struct shared_driver_list solib_list =
129 TAILQ_HEAD_INITIALIZER(solib_list);
130
131 #ifndef RTE_EXEC_ENV_WINDOWS
132 /* Default path of external loadable drivers */
133 static const char *default_solib_dir = RTE_EAL_PMD_PATH;
134 #endif
135
136 /*
137 * Stringified version of solib path used by dpdk-pmdinfo.py
138 * Note: PLEASE DO NOT ALTER THIS without making a corresponding
139 * change to usertools/dpdk-pmdinfo.py
140 */
141 static const char dpdk_solib_path[] __rte_used =
142 "DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH;
143
144 TAILQ_HEAD(device_option_list, device_option);
145
146 struct device_option {
147 TAILQ_ENTRY(device_option) next;
148
149 enum rte_devtype type;
150 char arg[];
151 };
152
153 static struct device_option_list devopt_list =
154 TAILQ_HEAD_INITIALIZER(devopt_list);
155
156 static int main_lcore_parsed;
157 static int mem_parsed;
158 static int core_parsed;
159
160 /* Allow the application to print its usage message too if set */
161 static rte_usage_hook_t rte_application_usage_hook;
162
163 /* Returns rte_usage_hook_t */
164 rte_usage_hook_t
eal_get_application_usage_hook(void)165 eal_get_application_usage_hook(void)
166 {
167 return rte_application_usage_hook;
168 }
169
170 /* Set a per-application usage message */
171 rte_usage_hook_t
rte_set_application_usage_hook(rte_usage_hook_t usage_func)172 rte_set_application_usage_hook(rte_usage_hook_t usage_func)
173 {
174 rte_usage_hook_t old_func;
175
176 /* Will be NULL on the first call to denote the last usage routine. */
177 old_func = rte_application_usage_hook;
178 rte_application_usage_hook = usage_func;
179
180 return old_func;
181 }
182
183 #ifndef RTE_EXEC_ENV_WINDOWS
184 static char **eal_args;
185 static char **eal_app_args;
186
187 #define EAL_PARAM_REQ "/eal/params"
188 #define EAL_APP_PARAM_REQ "/eal/app_params"
189
190 /* callback handler for telemetry library to report out EAL flags */
191 int
handle_eal_info_request(const char * cmd,const char * params __rte_unused,struct rte_tel_data * d)192 handle_eal_info_request(const char *cmd, const char *params __rte_unused,
193 struct rte_tel_data *d)
194 {
195 char **args;
196 int used = 0;
197 int i = 0;
198
199 if (strcmp(cmd, EAL_PARAM_REQ) == 0)
200 args = eal_args;
201 else
202 args = eal_app_args;
203
204 rte_tel_data_start_array(d, RTE_TEL_STRING_VAL);
205 if (args == NULL || args[0] == NULL)
206 return 0;
207
208 for ( ; args[i] != NULL; i++)
209 used = rte_tel_data_add_array_string(d, args[i]);
210 return used;
211 }
212
213 int
eal_save_args(int argc,char ** argv)214 eal_save_args(int argc, char **argv)
215 {
216 int i, j;
217
218 rte_telemetry_register_cmd(EAL_PARAM_REQ, handle_eal_info_request,
219 "Returns EAL commandline parameters used. Takes no parameters");
220 rte_telemetry_register_cmd(EAL_APP_PARAM_REQ, handle_eal_info_request,
221 "Returns app commandline parameters used. Takes no parameters");
222
223 /* clone argv to report out later. We overprovision, but
224 * this does not waste huge amounts of memory
225 */
226 eal_args = calloc(argc + 1, sizeof(*eal_args));
227 if (eal_args == NULL)
228 return -1;
229
230 for (i = 0; i < argc; i++) {
231 eal_args[i] = strdup(argv[i]);
232 if (strcmp(argv[i], "--") == 0)
233 break;
234 }
235 eal_args[i++] = NULL; /* always finish with NULL */
236
237 /* allow reporting of any app args we know about too */
238 if (i >= argc)
239 return 0;
240
241 eal_app_args = calloc(argc - i + 1, sizeof(*eal_args));
242 if (eal_app_args == NULL)
243 return -1;
244
245 for (j = 0; i < argc; j++, i++)
246 eal_app_args[j] = strdup(argv[i]);
247 eal_app_args[j] = NULL;
248
249 return 0;
250 }
251 #endif
252
253 static int
eal_option_device_add(enum rte_devtype type,const char * optarg)254 eal_option_device_add(enum rte_devtype type, const char *optarg)
255 {
256 struct device_option *devopt;
257 size_t optlen;
258 int ret;
259
260 optlen = strlen(optarg) + 1;
261 devopt = calloc(1, sizeof(*devopt) + optlen);
262 if (devopt == NULL) {
263 RTE_LOG(ERR, EAL, "Unable to allocate device option\n");
264 return -ENOMEM;
265 }
266
267 devopt->type = type;
268 ret = strlcpy(devopt->arg, optarg, optlen);
269 if (ret < 0) {
270 RTE_LOG(ERR, EAL, "Unable to copy device option\n");
271 free(devopt);
272 return -EINVAL;
273 }
274 TAILQ_INSERT_TAIL(&devopt_list, devopt, next);
275 return 0;
276 }
277
278 int
eal_option_device_parse(void)279 eal_option_device_parse(void)
280 {
281 struct device_option *devopt;
282 void *tmp;
283 int ret = 0;
284
285 TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) {
286 if (ret == 0) {
287 ret = rte_devargs_add(devopt->type, devopt->arg);
288 if (ret)
289 RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n",
290 devopt->arg);
291 }
292 TAILQ_REMOVE(&devopt_list, devopt, next);
293 free(devopt);
294 }
295 return ret;
296 }
297
298 const char *
eal_get_hugefile_prefix(void)299 eal_get_hugefile_prefix(void)
300 {
301 const struct internal_config *internal_conf =
302 eal_get_internal_configuration();
303
304 if (internal_conf->hugefile_prefix != NULL)
305 return internal_conf->hugefile_prefix;
306 return HUGEFILE_PREFIX_DEFAULT;
307 }
308
309 void
eal_reset_internal_config(struct internal_config * internal_cfg)310 eal_reset_internal_config(struct internal_config *internal_cfg)
311 {
312 int i;
313
314 internal_cfg->memory = 0;
315 internal_cfg->force_nrank = 0;
316 internal_cfg->force_nchannel = 0;
317 internal_cfg->hugefile_prefix = NULL;
318 internal_cfg->hugepage_dir = NULL;
319 internal_cfg->force_sockets = 0;
320 /* zero out the NUMA config */
321 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
322 internal_cfg->socket_mem[i] = 0;
323 internal_cfg->force_socket_limits = 0;
324 /* zero out the NUMA limits config */
325 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
326 internal_cfg->socket_limit[i] = 0;
327 /* zero out hugedir descriptors */
328 for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) {
329 memset(&internal_cfg->hugepage_info[i], 0,
330 sizeof(internal_cfg->hugepage_info[0]));
331 internal_cfg->hugepage_info[i].lock_descriptor = -1;
332 }
333 internal_cfg->base_virtaddr = 0;
334
335 #ifdef LOG_DAEMON
336 internal_cfg->syslog_facility = LOG_DAEMON;
337 #endif
338
339 /* if set to NONE, interrupt mode is determined automatically */
340 internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE;
341 memset(internal_cfg->vfio_vf_token, 0,
342 sizeof(internal_cfg->vfio_vf_token));
343
344 #ifdef RTE_LIBEAL_USE_HPET
345 internal_cfg->no_hpet = 0;
346 #else
347 internal_cfg->no_hpet = 1;
348 #endif
349 internal_cfg->vmware_tsc_map = 0;
350 internal_cfg->create_uio_dev = 0;
351 internal_cfg->iova_mode = RTE_IOVA_DC;
352 internal_cfg->user_mbuf_pool_ops_name = NULL;
353 CPU_ZERO(&internal_cfg->ctrl_cpuset);
354 internal_cfg->init_complete = 0;
355 internal_cfg->max_simd_bitwidth.bitwidth = RTE_VECT_DEFAULT_SIMD_BITWIDTH;
356 internal_cfg->max_simd_bitwidth.forced = 0;
357 }
358
359 static int
eal_plugin_add(const char * path)360 eal_plugin_add(const char *path)
361 {
362 struct shared_driver *solib;
363
364 solib = malloc(sizeof(*solib));
365 if (solib == NULL) {
366 RTE_LOG(ERR, EAL, "malloc(solib) failed\n");
367 return -1;
368 }
369 memset(solib, 0, sizeof(*solib));
370 strlcpy(solib->name, path, PATH_MAX);
371 TAILQ_INSERT_TAIL(&solib_list, solib, next);
372
373 return 0;
374 }
375
376 #ifdef RTE_EXEC_ENV_WINDOWS
377 int
eal_plugins_init(void)378 eal_plugins_init(void)
379 {
380 return 0;
381 }
382 #else
383
384 static int
eal_plugindir_init(const char * path)385 eal_plugindir_init(const char *path)
386 {
387 DIR *d = NULL;
388 struct dirent *dent = NULL;
389 char sopath[PATH_MAX];
390
391 if (path == NULL || *path == '\0')
392 return 0;
393
394 d = opendir(path);
395 if (d == NULL) {
396 RTE_LOG(ERR, EAL, "failed to open directory %s: %s\n",
397 path, strerror(errno));
398 return -1;
399 }
400
401 while ((dent = readdir(d)) != NULL) {
402 struct stat sb;
403 int nlen = strnlen(dent->d_name, sizeof(dent->d_name));
404
405 /* check if name ends in .so or .so.ABI_VERSION */
406 if (strcmp(&dent->d_name[nlen - 3], ".so") != 0 &&
407 strcmp(&dent->d_name[nlen - 4 - strlen(ABI_VERSION)],
408 ".so."ABI_VERSION) != 0)
409 continue;
410
411 snprintf(sopath, sizeof(sopath), "%s/%s", path, dent->d_name);
412
413 /* if a regular file, add to list to load */
414 if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode)))
415 continue;
416
417 if (eal_plugin_add(sopath) == -1)
418 break;
419 }
420
421 closedir(d);
422 /* XXX this ignores failures from readdir() itself */
423 return (dent == NULL) ? 0 : -1;
424 }
425
426 static int
verify_perms(const char * dirpath)427 verify_perms(const char *dirpath)
428 {
429 struct stat st;
430
431 /* if not root, check down one level first */
432 if (strcmp(dirpath, "/") != 0) {
433 static __thread char last_dir_checked[PATH_MAX];
434 char copy[PATH_MAX];
435 const char *dir;
436
437 strlcpy(copy, dirpath, PATH_MAX);
438 dir = dirname(copy);
439 if (strncmp(dir, last_dir_checked, PATH_MAX) != 0) {
440 if (verify_perms(dir) != 0)
441 return -1;
442 strlcpy(last_dir_checked, dir, PATH_MAX);
443 }
444 }
445
446 /* call stat to check for permissions and ensure not world writable */
447 if (stat(dirpath, &st) != 0) {
448 RTE_LOG(ERR, EAL, "Error with stat on %s, %s\n",
449 dirpath, strerror(errno));
450 return -1;
451 }
452 if (st.st_mode & S_IWOTH) {
453 RTE_LOG(ERR, EAL,
454 "Error, directory path %s is world-writable and insecure\n",
455 dirpath);
456 return -1;
457 }
458
459 return 0;
460 }
461
462 static void *
eal_dlopen(const char * pathname)463 eal_dlopen(const char *pathname)
464 {
465 void *retval = NULL;
466 char *realp = realpath(pathname, NULL);
467
468 if (realp == NULL && errno == ENOENT) {
469 /* not a full or relative path, try a load from system dirs */
470 retval = dlopen(pathname, RTLD_NOW);
471 if (retval == NULL)
472 RTE_LOG(ERR, EAL, "%s\n", dlerror());
473 return retval;
474 }
475 if (realp == NULL) {
476 RTE_LOG(ERR, EAL, "Error with realpath for %s, %s\n",
477 pathname, strerror(errno));
478 goto out;
479 }
480 if (strnlen(realp, PATH_MAX) == PATH_MAX) {
481 RTE_LOG(ERR, EAL, "Error, driver path greater than PATH_MAX\n");
482 goto out;
483 }
484
485 /* do permissions checks */
486 if (verify_perms(realp) != 0)
487 goto out;
488
489 retval = dlopen(realp, RTLD_NOW);
490 if (retval == NULL)
491 RTE_LOG(ERR, EAL, "%s\n", dlerror());
492 out:
493 free(realp);
494 return retval;
495 }
496
497 int
eal_plugins_init(void)498 eal_plugins_init(void)
499 {
500 struct shared_driver *solib = NULL;
501 struct stat sb;
502
503 /* If we are not statically linked, add default driver loading
504 * path if it exists as a directory.
505 * (Using dlopen with NOLOAD flag on EAL, will return NULL if the EAL
506 * shared library is not already loaded i.e. it's statically linked.)
507 */
508 if (dlopen("librte_eal.so."ABI_VERSION, RTLD_LAZY | RTLD_NOLOAD) != NULL &&
509 *default_solib_dir != '\0' &&
510 stat(default_solib_dir, &sb) == 0 &&
511 S_ISDIR(sb.st_mode))
512 eal_plugin_add(default_solib_dir);
513
514 TAILQ_FOREACH(solib, &solib_list, next) {
515
516 if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) {
517 if (eal_plugindir_init(solib->name) == -1) {
518 RTE_LOG(ERR, EAL,
519 "Cannot init plugin directory %s\n",
520 solib->name);
521 return -1;
522 }
523 } else {
524 RTE_LOG(DEBUG, EAL, "open shared lib %s\n",
525 solib->name);
526 solib->lib_handle = eal_dlopen(solib->name);
527 if (solib->lib_handle == NULL)
528 return -1;
529 }
530
531 }
532 return 0;
533 }
534 #endif
535
536 /*
537 * Parse the coremask given as argument (hexadecimal string) and fill
538 * the global configuration (core role and core count) with the parsed
539 * value.
540 */
xdigit2val(unsigned char c)541 static int xdigit2val(unsigned char c)
542 {
543 int val;
544
545 if (isdigit(c))
546 val = c - '0';
547 else if (isupper(c))
548 val = c - 'A' + 10;
549 else
550 val = c - 'a' + 10;
551 return val;
552 }
553
554 static int
eal_parse_service_coremask(const char * coremask)555 eal_parse_service_coremask(const char *coremask)
556 {
557 struct rte_config *cfg = rte_eal_get_configuration();
558 int i, j, idx = 0;
559 unsigned int count = 0;
560 char c;
561 int val;
562 uint32_t taken_lcore_count = 0;
563
564 if (coremask == NULL)
565 return -1;
566 /* Remove all blank characters ahead and after .
567 * Remove 0x/0X if exists.
568 */
569 while (isblank(*coremask))
570 coremask++;
571 if (coremask[0] == '0' && ((coremask[1] == 'x')
572 || (coremask[1] == 'X')))
573 coremask += 2;
574 i = strlen(coremask);
575 while ((i > 0) && isblank(coremask[i - 1]))
576 i--;
577
578 if (i == 0)
579 return -1;
580
581 for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
582 c = coremask[i];
583 if (isxdigit(c) == 0) {
584 /* invalid characters */
585 return -1;
586 }
587 val = xdigit2val(c);
588 for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE;
589 j++, idx++) {
590 if ((1 << j) & val) {
591 /* handle main lcore already parsed */
592 uint32_t lcore = idx;
593 if (main_lcore_parsed &&
594 cfg->main_lcore == lcore) {
595 RTE_LOG(ERR, EAL,
596 "lcore %u is main lcore, cannot use as service core\n",
597 idx);
598 return -1;
599 }
600
601 if (eal_cpu_detected(idx) == 0) {
602 RTE_LOG(ERR, EAL,
603 "lcore %u unavailable\n", idx);
604 return -1;
605 }
606
607 if (cfg->lcore_role[idx] == ROLE_RTE)
608 taken_lcore_count++;
609
610 lcore_config[idx].core_role = ROLE_SERVICE;
611 count++;
612 }
613 }
614 }
615
616 for (; i >= 0; i--)
617 if (coremask[i] != '0')
618 return -1;
619
620 for (; idx < RTE_MAX_LCORE; idx++)
621 lcore_config[idx].core_index = -1;
622
623 if (count == 0)
624 return -1;
625
626 if (core_parsed && taken_lcore_count != count) {
627 RTE_LOG(WARNING, EAL,
628 "Not all service cores are in the coremask. "
629 "Please ensure -c or -l includes service cores\n");
630 }
631
632 cfg->service_lcore_count = count;
633 return 0;
634 }
635
636 static int
eal_service_cores_parsed(void)637 eal_service_cores_parsed(void)
638 {
639 int idx;
640 for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
641 if (lcore_config[idx].core_role == ROLE_SERVICE)
642 return 1;
643 }
644 return 0;
645 }
646
647 static int
update_lcore_config(int * cores)648 update_lcore_config(int *cores)
649 {
650 struct rte_config *cfg = rte_eal_get_configuration();
651 unsigned int count = 0;
652 unsigned int i;
653 int ret = 0;
654
655 for (i = 0; i < RTE_MAX_LCORE; i++) {
656 if (cores[i] != -1) {
657 if (eal_cpu_detected(i) == 0) {
658 RTE_LOG(ERR, EAL, "lcore %u unavailable\n", i);
659 ret = -1;
660 continue;
661 }
662 cfg->lcore_role[i] = ROLE_RTE;
663 count++;
664 } else {
665 cfg->lcore_role[i] = ROLE_OFF;
666 }
667 lcore_config[i].core_index = cores[i];
668 }
669 if (!ret)
670 cfg->lcore_count = count;
671 return ret;
672 }
673
674 static int
eal_parse_coremask(const char * coremask,int * cores)675 eal_parse_coremask(const char *coremask, int *cores)
676 {
677 unsigned count = 0;
678 int i, j, idx;
679 int val;
680 char c;
681
682 for (idx = 0; idx < RTE_MAX_LCORE; idx++)
683 cores[idx] = -1;
684 idx = 0;
685
686 /* Remove all blank characters ahead and after .
687 * Remove 0x/0X if exists.
688 */
689 while (isblank(*coremask))
690 coremask++;
691 if (coremask[0] == '0' && ((coremask[1] == 'x')
692 || (coremask[1] == 'X')))
693 coremask += 2;
694 i = strlen(coremask);
695 while ((i > 0) && isblank(coremask[i - 1]))
696 i--;
697 if (i == 0)
698 return -1;
699
700 for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
701 c = coremask[i];
702 if (isxdigit(c) == 0) {
703 /* invalid characters */
704 return -1;
705 }
706 val = xdigit2val(c);
707 for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++)
708 {
709 if ((1 << j) & val) {
710 cores[idx] = count;
711 count++;
712 }
713 }
714 }
715 for (; i >= 0; i--)
716 if (coremask[i] != '0')
717 return -1;
718 if (count == 0)
719 return -1;
720 return 0;
721 }
722
723 static int
eal_parse_service_corelist(const char * corelist)724 eal_parse_service_corelist(const char *corelist)
725 {
726 struct rte_config *cfg = rte_eal_get_configuration();
727 int i, idx = 0;
728 unsigned count = 0;
729 char *end = NULL;
730 int min, max;
731 uint32_t taken_lcore_count = 0;
732
733 if (corelist == NULL)
734 return -1;
735
736 /* Remove all blank characters ahead and after */
737 while (isblank(*corelist))
738 corelist++;
739 i = strlen(corelist);
740 while ((i > 0) && isblank(corelist[i - 1]))
741 i--;
742
743 /* Get list of cores */
744 min = RTE_MAX_LCORE;
745 do {
746 while (isblank(*corelist))
747 corelist++;
748 if (*corelist == '\0')
749 return -1;
750 errno = 0;
751 idx = strtoul(corelist, &end, 10);
752 if (errno || end == NULL)
753 return -1;
754 while (isblank(*end))
755 end++;
756 if (*end == '-') {
757 min = idx;
758 } else if ((*end == ',') || (*end == '\0')) {
759 max = idx;
760 if (min == RTE_MAX_LCORE)
761 min = idx;
762 for (idx = min; idx <= max; idx++) {
763 if (cfg->lcore_role[idx] != ROLE_SERVICE) {
764 /* handle main lcore already parsed */
765 uint32_t lcore = idx;
766 if (cfg->main_lcore == lcore &&
767 main_lcore_parsed) {
768 RTE_LOG(ERR, EAL,
769 "Error: lcore %u is main lcore, cannot use as service core\n",
770 idx);
771 return -1;
772 }
773 if (cfg->lcore_role[idx] == ROLE_RTE)
774 taken_lcore_count++;
775
776 lcore_config[idx].core_role =
777 ROLE_SERVICE;
778 count++;
779 }
780 }
781 min = RTE_MAX_LCORE;
782 } else
783 return -1;
784 corelist = end + 1;
785 } while (*end != '\0');
786
787 if (count == 0)
788 return -1;
789
790 if (core_parsed && taken_lcore_count != count) {
791 RTE_LOG(WARNING, EAL,
792 "Not all service cores were in the coremask. "
793 "Please ensure -c or -l includes service cores\n");
794 }
795
796 return 0;
797 }
798
799 static int
eal_parse_corelist(const char * corelist,int * cores)800 eal_parse_corelist(const char *corelist, int *cores)
801 {
802 unsigned count = 0;
803 char *end = NULL;
804 int min, max;
805 int idx;
806
807 for (idx = 0; idx < RTE_MAX_LCORE; idx++)
808 cores[idx] = -1;
809
810 /* Remove all blank characters ahead */
811 while (isblank(*corelist))
812 corelist++;
813
814 /* Get list of cores */
815 min = RTE_MAX_LCORE;
816 do {
817 while (isblank(*corelist))
818 corelist++;
819 if (*corelist == '\0')
820 return -1;
821 errno = 0;
822 idx = strtol(corelist, &end, 10);
823 if (errno || end == NULL)
824 return -1;
825 if (idx < 0 || idx >= RTE_MAX_LCORE)
826 return -1;
827 while (isblank(*end))
828 end++;
829 if (*end == '-') {
830 min = idx;
831 } else if ((*end == ',') || (*end == '\0')) {
832 max = idx;
833 if (min == RTE_MAX_LCORE)
834 min = idx;
835 for (idx = min; idx <= max; idx++) {
836 if (cores[idx] == -1) {
837 cores[idx] = count;
838 count++;
839 }
840 }
841 min = RTE_MAX_LCORE;
842 } else
843 return -1;
844 corelist = end + 1;
845 } while (*end != '\0');
846
847 if (count == 0)
848 return -1;
849 return 0;
850 }
851
852 /* Changes the lcore id of the main thread */
853 static int
eal_parse_main_lcore(const char * arg)854 eal_parse_main_lcore(const char *arg)
855 {
856 char *parsing_end;
857 struct rte_config *cfg = rte_eal_get_configuration();
858
859 errno = 0;
860 cfg->main_lcore = (uint32_t) strtol(arg, &parsing_end, 0);
861 if (errno || parsing_end[0] != 0)
862 return -1;
863 if (cfg->main_lcore >= RTE_MAX_LCORE)
864 return -1;
865 main_lcore_parsed = 1;
866
867 /* ensure main core is not used as service core */
868 if (lcore_config[cfg->main_lcore].core_role == ROLE_SERVICE) {
869 RTE_LOG(ERR, EAL,
870 "Error: Main lcore is used as a service core\n");
871 return -1;
872 }
873
874 return 0;
875 }
876
877 /*
878 * Parse elem, the elem could be single number/range or '(' ')' group
879 * 1) A single number elem, it's just a simple digit. e.g. 9
880 * 2) A single range elem, two digits with a '-' between. e.g. 2-6
881 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
882 * Within group elem, '-' used for a range separator;
883 * ',' used for a single number.
884 */
885 static int
eal_parse_set(const char * input,rte_cpuset_t * set)886 eal_parse_set(const char *input, rte_cpuset_t *set)
887 {
888 unsigned idx;
889 const char *str = input;
890 char *end = NULL;
891 unsigned min, max;
892
893 CPU_ZERO(set);
894
895 while (isblank(*str))
896 str++;
897
898 /* only digit or left bracket is qualify for start point */
899 if ((!isdigit(*str) && *str != '(') || *str == '\0')
900 return -1;
901
902 /* process single number or single range of number */
903 if (*str != '(') {
904 errno = 0;
905 idx = strtoul(str, &end, 10);
906 if (errno || end == NULL || idx >= CPU_SETSIZE)
907 return -1;
908 else {
909 while (isblank(*end))
910 end++;
911
912 min = idx;
913 max = idx;
914 if (*end == '-') {
915 /* process single <number>-<number> */
916 end++;
917 while (isblank(*end))
918 end++;
919 if (!isdigit(*end))
920 return -1;
921
922 errno = 0;
923 idx = strtoul(end, &end, 10);
924 if (errno || end == NULL || idx >= CPU_SETSIZE)
925 return -1;
926 max = idx;
927 while (isblank(*end))
928 end++;
929 if (*end != ',' && *end != '\0')
930 return -1;
931 }
932
933 if (*end != ',' && *end != '\0' &&
934 *end != '@')
935 return -1;
936
937 for (idx = RTE_MIN(min, max);
938 idx <= RTE_MAX(min, max); idx++)
939 CPU_SET(idx, set);
940
941 return end - input;
942 }
943 }
944
945 /* process set within bracket */
946 str++;
947 while (isblank(*str))
948 str++;
949 if (*str == '\0')
950 return -1;
951
952 min = RTE_MAX_LCORE;
953 do {
954
955 /* go ahead to the first digit */
956 while (isblank(*str))
957 str++;
958 if (!isdigit(*str))
959 return -1;
960
961 /* get the digit value */
962 errno = 0;
963 idx = strtoul(str, &end, 10);
964 if (errno || end == NULL || idx >= CPU_SETSIZE)
965 return -1;
966
967 /* go ahead to separator '-',',' and ')' */
968 while (isblank(*end))
969 end++;
970 if (*end == '-') {
971 if (min == RTE_MAX_LCORE)
972 min = idx;
973 else /* avoid continuous '-' */
974 return -1;
975 } else if ((*end == ',') || (*end == ')')) {
976 max = idx;
977 if (min == RTE_MAX_LCORE)
978 min = idx;
979 for (idx = RTE_MIN(min, max);
980 idx <= RTE_MAX(min, max); idx++)
981 CPU_SET(idx, set);
982
983 min = RTE_MAX_LCORE;
984 } else
985 return -1;
986
987 str = end + 1;
988 } while (*end != '\0' && *end != ')');
989
990 /*
991 * to avoid failure that tail blank makes end character check fail
992 * in eal_parse_lcores( )
993 */
994 while (isblank(*str))
995 str++;
996
997 return str - input;
998 }
999
1000 static int
check_cpuset(rte_cpuset_t * set)1001 check_cpuset(rte_cpuset_t *set)
1002 {
1003 unsigned int idx;
1004
1005 for (idx = 0; idx < CPU_SETSIZE; idx++) {
1006 if (!CPU_ISSET(idx, set))
1007 continue;
1008
1009 if (eal_cpu_detected(idx) == 0) {
1010 RTE_LOG(ERR, EAL, "core %u "
1011 "unavailable\n", idx);
1012 return -1;
1013 }
1014 }
1015 return 0;
1016 }
1017
1018 /*
1019 * The format pattern: --lcores='<lcores[@cpus]>[<,lcores[@cpus]>...]'
1020 * lcores, cpus could be a single digit/range or a group.
1021 * '(' and ')' are necessary if it's a group.
1022 * If not supply '@cpus', the value of cpus uses the same as lcores.
1023 * e.g. '1,2@(5-7),(3-5)@(0,2),(0,6),7-8' means start 9 EAL thread as below
1024 * lcore 0 runs on cpuset 0x41 (cpu 0,6)
1025 * lcore 1 runs on cpuset 0x2 (cpu 1)
1026 * lcore 2 runs on cpuset 0xe0 (cpu 5,6,7)
1027 * lcore 3,4,5 runs on cpuset 0x5 (cpu 0,2)
1028 * lcore 6 runs on cpuset 0x41 (cpu 0,6)
1029 * lcore 7 runs on cpuset 0x80 (cpu 7)
1030 * lcore 8 runs on cpuset 0x100 (cpu 8)
1031 */
1032 static int
eal_parse_lcores(const char * lcores)1033 eal_parse_lcores(const char *lcores)
1034 {
1035 struct rte_config *cfg = rte_eal_get_configuration();
1036 rte_cpuset_t lcore_set;
1037 unsigned int set_count;
1038 unsigned idx = 0;
1039 unsigned count = 0;
1040 const char *lcore_start = NULL;
1041 const char *end = NULL;
1042 int offset;
1043 rte_cpuset_t cpuset;
1044 int lflags;
1045 int ret = -1;
1046
1047 if (lcores == NULL)
1048 return -1;
1049
1050 /* Remove all blank characters ahead and after */
1051 while (isblank(*lcores))
1052 lcores++;
1053
1054 CPU_ZERO(&cpuset);
1055
1056 /* Reset lcore config */
1057 for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
1058 cfg->lcore_role[idx] = ROLE_OFF;
1059 lcore_config[idx].core_index = -1;
1060 CPU_ZERO(&lcore_config[idx].cpuset);
1061 }
1062
1063 /* Get list of cores */
1064 do {
1065 while (isblank(*lcores))
1066 lcores++;
1067 if (*lcores == '\0')
1068 goto err;
1069
1070 lflags = 0;
1071
1072 /* record lcore_set start point */
1073 lcore_start = lcores;
1074
1075 /* go across a complete bracket */
1076 if (*lcore_start == '(') {
1077 lcores += strcspn(lcores, ")");
1078 if (*lcores++ == '\0')
1079 goto err;
1080 }
1081
1082 /* scan the separator '@', ','(next) or '\0'(finish) */
1083 lcores += strcspn(lcores, "@,");
1084
1085 if (*lcores == '@') {
1086 /* explicit assign cpuset and update the end cursor */
1087 offset = eal_parse_set(lcores + 1, &cpuset);
1088 if (offset < 0)
1089 goto err;
1090 end = lcores + 1 + offset;
1091 } else { /* ',' or '\0' */
1092 /* haven't given cpuset, current loop done */
1093 end = lcores;
1094
1095 /* go back to check <number>-<number> */
1096 offset = strcspn(lcore_start, "(-");
1097 if (offset < (end - lcore_start) &&
1098 *(lcore_start + offset) != '(')
1099 lflags = 1;
1100 }
1101
1102 if (*end != ',' && *end != '\0')
1103 goto err;
1104
1105 /* parse lcore_set from start point */
1106 if (eal_parse_set(lcore_start, &lcore_set) < 0)
1107 goto err;
1108
1109 /* without '@', by default using lcore_set as cpuset */
1110 if (*lcores != '@')
1111 rte_memcpy(&cpuset, &lcore_set, sizeof(cpuset));
1112
1113 set_count = CPU_COUNT(&lcore_set);
1114 /* start to update lcore_set */
1115 for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
1116 if (!CPU_ISSET(idx, &lcore_set))
1117 continue;
1118 set_count--;
1119
1120 if (cfg->lcore_role[idx] != ROLE_RTE) {
1121 lcore_config[idx].core_index = count;
1122 cfg->lcore_role[idx] = ROLE_RTE;
1123 count++;
1124 }
1125
1126 if (lflags) {
1127 CPU_ZERO(&cpuset);
1128 CPU_SET(idx, &cpuset);
1129 }
1130
1131 if (check_cpuset(&cpuset) < 0)
1132 goto err;
1133 rte_memcpy(&lcore_config[idx].cpuset, &cpuset,
1134 sizeof(rte_cpuset_t));
1135 }
1136
1137 /* some cores from the lcore_set can't be handled by EAL */
1138 if (set_count != 0)
1139 goto err;
1140
1141 lcores = end + 1;
1142 } while (*end != '\0');
1143
1144 if (count == 0)
1145 goto err;
1146
1147 cfg->lcore_count = count;
1148 ret = 0;
1149
1150 err:
1151
1152 return ret;
1153 }
1154
1155 #ifndef RTE_EXEC_ENV_WINDOWS
1156 static int
eal_parse_syslog(const char * facility,struct internal_config * conf)1157 eal_parse_syslog(const char *facility, struct internal_config *conf)
1158 {
1159 int i;
1160 static const struct {
1161 const char *name;
1162 int value;
1163 } map[] = {
1164 { "auth", LOG_AUTH },
1165 { "cron", LOG_CRON },
1166 { "daemon", LOG_DAEMON },
1167 { "ftp", LOG_FTP },
1168 { "kern", LOG_KERN },
1169 { "lpr", LOG_LPR },
1170 { "mail", LOG_MAIL },
1171 { "news", LOG_NEWS },
1172 { "syslog", LOG_SYSLOG },
1173 { "user", LOG_USER },
1174 { "uucp", LOG_UUCP },
1175 { "local0", LOG_LOCAL0 },
1176 { "local1", LOG_LOCAL1 },
1177 { "local2", LOG_LOCAL2 },
1178 { "local3", LOG_LOCAL3 },
1179 { "local4", LOG_LOCAL4 },
1180 { "local5", LOG_LOCAL5 },
1181 { "local6", LOG_LOCAL6 },
1182 { "local7", LOG_LOCAL7 },
1183 { NULL, 0 }
1184 };
1185
1186 for (i = 0; map[i].name; i++) {
1187 if (!strcmp(facility, map[i].name)) {
1188 conf->syslog_facility = map[i].value;
1189 return 0;
1190 }
1191 }
1192 return -1;
1193 }
1194 #endif
1195
1196 static int
eal_parse_log_priority(const char * level)1197 eal_parse_log_priority(const char *level)
1198 {
1199 static const char * const levels[] = {
1200 [RTE_LOG_EMERG] = "emergency",
1201 [RTE_LOG_ALERT] = "alert",
1202 [RTE_LOG_CRIT] = "critical",
1203 [RTE_LOG_ERR] = "error",
1204 [RTE_LOG_WARNING] = "warning",
1205 [RTE_LOG_NOTICE] = "notice",
1206 [RTE_LOG_INFO] = "info",
1207 [RTE_LOG_DEBUG] = "debug",
1208 };
1209 size_t len = strlen(level);
1210 unsigned long tmp;
1211 char *end;
1212 unsigned int i;
1213
1214 if (len == 0)
1215 return -1;
1216
1217 /* look for named values, skip 0 which is not a valid level */
1218 for (i = 1; i < RTE_DIM(levels); i++) {
1219 if (strncmp(levels[i], level, len) == 0)
1220 return i;
1221 }
1222
1223 /* not a string, maybe it is numeric */
1224 errno = 0;
1225 tmp = strtoul(level, &end, 0);
1226
1227 /* check for errors */
1228 if (errno != 0 || end == NULL || *end != '\0' ||
1229 tmp >= UINT32_MAX)
1230 return -1;
1231
1232 return tmp;
1233 }
1234
1235 static int
eal_parse_log_level(const char * arg)1236 eal_parse_log_level(const char *arg)
1237 {
1238 const char *pattern = NULL;
1239 const char *regex = NULL;
1240 char *str, *level;
1241 int priority;
1242
1243 str = strdup(arg);
1244 if (str == NULL)
1245 return -1;
1246
1247 if ((level = strchr(str, ','))) {
1248 regex = str;
1249 *level++ = '\0';
1250 } else if ((level = strchr(str, ':'))) {
1251 pattern = str;
1252 *level++ = '\0';
1253 } else {
1254 level = str;
1255 }
1256
1257 priority = eal_parse_log_priority(level);
1258 if (priority < 0) {
1259 fprintf(stderr, "invalid log priority: %s\n", level);
1260 goto fail;
1261 }
1262
1263 if (regex) {
1264 if (rte_log_set_level_regexp(regex, priority) < 0) {
1265 fprintf(stderr, "cannot set log level %s,%d\n",
1266 regex, priority);
1267 goto fail;
1268 }
1269 if (rte_log_save_regexp(regex, priority) < 0)
1270 goto fail;
1271 } else if (pattern) {
1272 if (rte_log_set_level_pattern(pattern, priority) < 0) {
1273 fprintf(stderr, "cannot set log level %s:%d\n",
1274 pattern, priority);
1275 goto fail;
1276 }
1277 if (rte_log_save_pattern(pattern, priority) < 0)
1278 goto fail;
1279 } else {
1280 rte_log_set_global_level(priority);
1281 }
1282
1283 free(str);
1284 return 0;
1285
1286 fail:
1287 free(str);
1288 return -1;
1289 }
1290
1291 static enum rte_proc_type_t
eal_parse_proc_type(const char * arg)1292 eal_parse_proc_type(const char *arg)
1293 {
1294 if (strncasecmp(arg, "primary", sizeof("primary")) == 0)
1295 return RTE_PROC_PRIMARY;
1296 if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0)
1297 return RTE_PROC_SECONDARY;
1298 if (strncasecmp(arg, "auto", sizeof("auto")) == 0)
1299 return RTE_PROC_AUTO;
1300
1301 return RTE_PROC_INVALID;
1302 }
1303
1304 static int
eal_parse_iova_mode(const char * name)1305 eal_parse_iova_mode(const char *name)
1306 {
1307 int mode;
1308 struct internal_config *internal_conf =
1309 eal_get_internal_configuration();
1310
1311 if (name == NULL)
1312 return -1;
1313
1314 if (!strcmp("pa", name))
1315 mode = RTE_IOVA_PA;
1316 else if (!strcmp("va", name))
1317 mode = RTE_IOVA_VA;
1318 else
1319 return -1;
1320
1321 internal_conf->iova_mode = mode;
1322 return 0;
1323 }
1324
1325 static int
eal_parse_simd_bitwidth(const char * arg)1326 eal_parse_simd_bitwidth(const char *arg)
1327 {
1328 char *end;
1329 unsigned long bitwidth;
1330 int ret;
1331 struct internal_config *internal_conf =
1332 eal_get_internal_configuration();
1333
1334 if (arg == NULL || arg[0] == '\0')
1335 return -1;
1336
1337 errno = 0;
1338 bitwidth = strtoul(arg, &end, 0);
1339
1340 /* check for errors */
1341 if (errno != 0 || end == NULL || *end != '\0' || bitwidth > RTE_VECT_SIMD_MAX)
1342 return -1;
1343
1344 if (bitwidth == 0)
1345 bitwidth = (unsigned long) RTE_VECT_SIMD_MAX;
1346 ret = rte_vect_set_max_simd_bitwidth(bitwidth);
1347 if (ret < 0)
1348 return -1;
1349 internal_conf->max_simd_bitwidth.forced = 1;
1350 return 0;
1351 }
1352
1353 static int
eal_parse_base_virtaddr(const char * arg)1354 eal_parse_base_virtaddr(const char *arg)
1355 {
1356 char *end;
1357 uint64_t addr;
1358 struct internal_config *internal_conf =
1359 eal_get_internal_configuration();
1360
1361 errno = 0;
1362 addr = strtoull(arg, &end, 16);
1363
1364 /* check for errors */
1365 if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0'))
1366 return -1;
1367
1368 /* make sure we don't exceed 32-bit boundary on 32-bit target */
1369 #ifndef RTE_ARCH_64
1370 if (addr >= UINTPTR_MAX)
1371 return -1;
1372 #endif
1373
1374 /* align the addr on 16M boundary, 16MB is the minimum huge page
1375 * size on IBM Power architecture. If the addr is aligned to 16MB,
1376 * it can align to 2MB for x86. So this alignment can also be used
1377 * on x86 and other architectures.
1378 */
1379 internal_conf->base_virtaddr =
1380 RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M);
1381
1382 return 0;
1383 }
1384
1385 /* caller is responsible for freeing the returned string */
1386 static char *
available_cores(void)1387 available_cores(void)
1388 {
1389 char *str = NULL;
1390 int previous;
1391 int sequence;
1392 char *tmp;
1393 int idx;
1394
1395 /* find the first available cpu */
1396 for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
1397 if (eal_cpu_detected(idx) == 0)
1398 continue;
1399 break;
1400 }
1401 if (idx >= RTE_MAX_LCORE)
1402 return NULL;
1403
1404 /* first sequence */
1405 if (asprintf(&str, "%d", idx) < 0)
1406 return NULL;
1407 previous = idx;
1408 sequence = 0;
1409
1410 for (idx++ ; idx < RTE_MAX_LCORE; idx++) {
1411 if (eal_cpu_detected(idx) == 0)
1412 continue;
1413
1414 if (idx == previous + 1) {
1415 previous = idx;
1416 sequence = 1;
1417 continue;
1418 }
1419
1420 /* finish current sequence */
1421 if (sequence) {
1422 if (asprintf(&tmp, "%s-%d", str, previous) < 0) {
1423 free(str);
1424 return NULL;
1425 }
1426 free(str);
1427 str = tmp;
1428 }
1429
1430 /* new sequence */
1431 if (asprintf(&tmp, "%s,%d", str, idx) < 0) {
1432 free(str);
1433 return NULL;
1434 }
1435 free(str);
1436 str = tmp;
1437 previous = idx;
1438 sequence = 0;
1439 }
1440
1441 /* finish last sequence */
1442 if (sequence) {
1443 if (asprintf(&tmp, "%s-%d", str, previous) < 0) {
1444 free(str);
1445 return NULL;
1446 }
1447 free(str);
1448 str = tmp;
1449 }
1450
1451 return str;
1452 }
1453
1454 int
eal_parse_common_option(int opt,const char * optarg,struct internal_config * conf)1455 eal_parse_common_option(int opt, const char *optarg,
1456 struct internal_config *conf)
1457 {
1458 static int b_used;
1459 static int a_used;
1460
1461 switch (opt) {
1462 case OPT_PCI_BLACKLIST_NUM:
1463 fprintf(stderr,
1464 "Option --pci-blacklist is deprecated, use -b, --block instead\n");
1465 /* fallthrough */
1466 case 'b':
1467 if (a_used)
1468 goto ba_conflict;
1469 if (eal_option_device_add(RTE_DEVTYPE_BLOCKED, optarg) < 0)
1470 return -1;
1471 b_used = 1;
1472 break;
1473
1474 case 'w':
1475 fprintf(stderr,
1476 "Option -w, --pci-whitelist is deprecated, use -a, --allow option instead\n");
1477 /* fallthrough */
1478 case 'a':
1479 if (b_used)
1480 goto ba_conflict;
1481 if (eal_option_device_add(RTE_DEVTYPE_ALLOWED, optarg) < 0)
1482 return -1;
1483 a_used = 1;
1484 break;
1485 /* coremask */
1486 case 'c': {
1487 int lcore_indexes[RTE_MAX_LCORE];
1488
1489 if (eal_service_cores_parsed())
1490 RTE_LOG(WARNING, EAL,
1491 "Service cores parsed before dataplane cores. Please ensure -c is before -s or -S\n");
1492 if (eal_parse_coremask(optarg, lcore_indexes) < 0) {
1493 RTE_LOG(ERR, EAL, "invalid coremask syntax\n");
1494 return -1;
1495 }
1496 if (update_lcore_config(lcore_indexes) < 0) {
1497 char *available = available_cores();
1498
1499 RTE_LOG(ERR, EAL,
1500 "invalid coremask, please check specified cores are part of %s\n",
1501 available);
1502 free(available);
1503 return -1;
1504 }
1505
1506 if (core_parsed) {
1507 RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n",
1508 (core_parsed == LCORE_OPT_LST) ? "-l" :
1509 (core_parsed == LCORE_OPT_MAP) ? "--lcore" :
1510 "-c");
1511 return -1;
1512 }
1513
1514 core_parsed = LCORE_OPT_MSK;
1515 break;
1516 }
1517 /* corelist */
1518 case 'l': {
1519 int lcore_indexes[RTE_MAX_LCORE];
1520
1521 if (eal_service_cores_parsed())
1522 RTE_LOG(WARNING, EAL,
1523 "Service cores parsed before dataplane cores. Please ensure -l is before -s or -S\n");
1524
1525 if (eal_parse_corelist(optarg, lcore_indexes) < 0) {
1526 RTE_LOG(ERR, EAL, "invalid core list syntax\n");
1527 return -1;
1528 }
1529 if (update_lcore_config(lcore_indexes) < 0) {
1530 char *available = available_cores();
1531
1532 RTE_LOG(ERR, EAL,
1533 "invalid core list, please check specified cores are part of %s\n",
1534 available);
1535 free(available);
1536 return -1;
1537 }
1538
1539 if (core_parsed) {
1540 RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n",
1541 (core_parsed == LCORE_OPT_MSK) ? "-c" :
1542 (core_parsed == LCORE_OPT_MAP) ? "--lcore" :
1543 "-l");
1544 return -1;
1545 }
1546
1547 core_parsed = LCORE_OPT_LST;
1548 break;
1549 }
1550 /* service coremask */
1551 case 's':
1552 if (eal_parse_service_coremask(optarg) < 0) {
1553 RTE_LOG(ERR, EAL, "invalid service coremask\n");
1554 return -1;
1555 }
1556 break;
1557 /* service corelist */
1558 case 'S':
1559 if (eal_parse_service_corelist(optarg) < 0) {
1560 RTE_LOG(ERR, EAL, "invalid service core list\n");
1561 return -1;
1562 }
1563 break;
1564 /* size of memory */
1565 case 'm':
1566 conf->memory = atoi(optarg);
1567 conf->memory *= 1024ULL;
1568 conf->memory *= 1024ULL;
1569 mem_parsed = 1;
1570 break;
1571 /* force number of channels */
1572 case 'n':
1573 conf->force_nchannel = atoi(optarg);
1574 if (conf->force_nchannel == 0) {
1575 RTE_LOG(ERR, EAL, "invalid channel number\n");
1576 return -1;
1577 }
1578 break;
1579 /* force number of ranks */
1580 case 'r':
1581 conf->force_nrank = atoi(optarg);
1582 if (conf->force_nrank == 0 ||
1583 conf->force_nrank > 16) {
1584 RTE_LOG(ERR, EAL, "invalid rank number\n");
1585 return -1;
1586 }
1587 break;
1588 /* force loading of external driver */
1589 case 'd':
1590 if (eal_plugin_add(optarg) == -1)
1591 return -1;
1592 break;
1593 case 'v':
1594 /* since message is explicitly requested by user, we
1595 * write message at highest log level so it can always
1596 * be seen
1597 * even if info or warning messages are disabled */
1598 RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version());
1599 break;
1600
1601 /* long options */
1602 case OPT_HUGE_UNLINK_NUM:
1603 conf->hugepage_unlink = 1;
1604 break;
1605
1606 case OPT_NO_HUGE_NUM:
1607 conf->no_hugetlbfs = 1;
1608 /* no-huge is legacy mem */
1609 conf->legacy_mem = 1;
1610 break;
1611
1612 case OPT_NO_PCI_NUM:
1613 conf->no_pci = 1;
1614 break;
1615
1616 case OPT_NO_HPET_NUM:
1617 conf->no_hpet = 1;
1618 break;
1619
1620 case OPT_VMWARE_TSC_MAP_NUM:
1621 conf->vmware_tsc_map = 1;
1622 break;
1623
1624 case OPT_NO_SHCONF_NUM:
1625 conf->no_shconf = 1;
1626 break;
1627
1628 case OPT_IN_MEMORY_NUM:
1629 conf->in_memory = 1;
1630 /* in-memory is a superset of noshconf and huge-unlink */
1631 conf->no_shconf = 1;
1632 conf->hugepage_unlink = 1;
1633 break;
1634
1635 case OPT_PROC_TYPE_NUM:
1636 conf->process_type = eal_parse_proc_type(optarg);
1637 break;
1638
1639 case OPT_MASTER_LCORE_NUM:
1640 fprintf(stderr,
1641 "Option --" OPT_MASTER_LCORE
1642 " is deprecated use " OPT_MAIN_LCORE "\n");
1643 /* fallthrough */
1644 case OPT_MAIN_LCORE_NUM:
1645 if (eal_parse_main_lcore(optarg) < 0) {
1646 RTE_LOG(ERR, EAL, "invalid parameter for --"
1647 OPT_MAIN_LCORE "\n");
1648 return -1;
1649 }
1650 break;
1651
1652 case OPT_VDEV_NUM:
1653 if (eal_option_device_add(RTE_DEVTYPE_VIRTUAL,
1654 optarg) < 0) {
1655 return -1;
1656 }
1657 break;
1658
1659 #ifndef RTE_EXEC_ENV_WINDOWS
1660 case OPT_SYSLOG_NUM:
1661 if (eal_parse_syslog(optarg, conf) < 0) {
1662 RTE_LOG(ERR, EAL, "invalid parameters for --"
1663 OPT_SYSLOG "\n");
1664 return -1;
1665 }
1666 break;
1667 #endif
1668
1669 case OPT_LOG_LEVEL_NUM: {
1670 if (eal_parse_log_level(optarg) < 0) {
1671 RTE_LOG(ERR, EAL,
1672 "invalid parameters for --"
1673 OPT_LOG_LEVEL "\n");
1674 return -1;
1675 }
1676 break;
1677 }
1678
1679 #ifndef RTE_EXEC_ENV_WINDOWS
1680 case OPT_TRACE_NUM: {
1681 if (eal_trace_args_save(optarg) < 0) {
1682 RTE_LOG(ERR, EAL, "invalid parameters for --"
1683 OPT_TRACE "\n");
1684 return -1;
1685 }
1686 break;
1687 }
1688
1689 case OPT_TRACE_DIR_NUM: {
1690 if (eal_trace_dir_args_save(optarg) < 0) {
1691 RTE_LOG(ERR, EAL, "invalid parameters for --"
1692 OPT_TRACE_DIR "\n");
1693 return -1;
1694 }
1695 break;
1696 }
1697
1698 case OPT_TRACE_BUF_SIZE_NUM: {
1699 if (eal_trace_bufsz_args_save(optarg) < 0) {
1700 RTE_LOG(ERR, EAL, "invalid parameters for --"
1701 OPT_TRACE_BUF_SIZE "\n");
1702 return -1;
1703 }
1704 break;
1705 }
1706
1707 case OPT_TRACE_MODE_NUM: {
1708 if (eal_trace_mode_args_save(optarg) < 0) {
1709 RTE_LOG(ERR, EAL, "invalid parameters for --"
1710 OPT_TRACE_MODE "\n");
1711 return -1;
1712 }
1713 break;
1714 }
1715 #endif /* !RTE_EXEC_ENV_WINDOWS */
1716
1717 case OPT_LCORES_NUM:
1718 if (eal_parse_lcores(optarg) < 0) {
1719 RTE_LOG(ERR, EAL, "invalid parameter for --"
1720 OPT_LCORES "\n");
1721 return -1;
1722 }
1723
1724 if (core_parsed) {
1725 RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n",
1726 (core_parsed == LCORE_OPT_LST) ? "-l" :
1727 (core_parsed == LCORE_OPT_MSK) ? "-c" :
1728 "--lcore");
1729 return -1;
1730 }
1731
1732 core_parsed = LCORE_OPT_MAP;
1733 break;
1734 case OPT_LEGACY_MEM_NUM:
1735 conf->legacy_mem = 1;
1736 break;
1737 case OPT_SINGLE_FILE_SEGMENTS_NUM:
1738 conf->single_file_segments = 1;
1739 break;
1740 case OPT_IOVA_MODE_NUM:
1741 if (eal_parse_iova_mode(optarg) < 0) {
1742 RTE_LOG(ERR, EAL, "invalid parameters for --"
1743 OPT_IOVA_MODE "\n");
1744 return -1;
1745 }
1746 break;
1747 case OPT_BASE_VIRTADDR_NUM:
1748 if (eal_parse_base_virtaddr(optarg) < 0) {
1749 RTE_LOG(ERR, EAL, "invalid parameter for --"
1750 OPT_BASE_VIRTADDR "\n");
1751 return -1;
1752 }
1753 break;
1754 case OPT_TELEMETRY_NUM:
1755 break;
1756 case OPT_NO_TELEMETRY_NUM:
1757 conf->no_telemetry = 1;
1758 break;
1759 case OPT_FORCE_MAX_SIMD_BITWIDTH_NUM:
1760 if (eal_parse_simd_bitwidth(optarg) < 0) {
1761 RTE_LOG(ERR, EAL, "invalid parameter for --"
1762 OPT_FORCE_MAX_SIMD_BITWIDTH "\n");
1763 return -1;
1764 }
1765 break;
1766
1767 /* don't know what to do, leave this to caller */
1768 default:
1769 return 1;
1770
1771 }
1772
1773 return 0;
1774
1775 ba_conflict:
1776 RTE_LOG(ERR, EAL,
1777 "Options allow (-a) and block (-b) can't be used at the same time\n");
1778 return -1;
1779 }
1780
1781 static void
eal_auto_detect_cores(struct rte_config * cfg)1782 eal_auto_detect_cores(struct rte_config *cfg)
1783 {
1784 unsigned int lcore_id;
1785 unsigned int removed = 0;
1786 rte_cpuset_t affinity_set;
1787
1788 if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
1789 &affinity_set))
1790 CPU_ZERO(&affinity_set);
1791
1792 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1793 if (cfg->lcore_role[lcore_id] == ROLE_RTE &&
1794 !CPU_ISSET(lcore_id, &affinity_set)) {
1795 cfg->lcore_role[lcore_id] = ROLE_OFF;
1796 removed++;
1797 }
1798 }
1799
1800 cfg->lcore_count -= removed;
1801 }
1802
1803 static void
compute_ctrl_threads_cpuset(struct internal_config * internal_cfg)1804 compute_ctrl_threads_cpuset(struct internal_config *internal_cfg)
1805 {
1806 rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset;
1807 rte_cpuset_t default_set;
1808 unsigned int lcore_id;
1809
1810 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1811 if (rte_lcore_has_role(lcore_id, ROLE_OFF))
1812 continue;
1813 RTE_CPU_OR(cpuset, cpuset, &lcore_config[lcore_id].cpuset);
1814 }
1815 RTE_CPU_NOT(cpuset, cpuset);
1816
1817 if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
1818 &default_set))
1819 CPU_ZERO(&default_set);
1820
1821 RTE_CPU_AND(cpuset, cpuset, &default_set);
1822
1823 /* if no remaining cpu, use main lcore cpu affinity */
1824 if (!CPU_COUNT(cpuset)) {
1825 memcpy(cpuset, &lcore_config[rte_get_main_lcore()].cpuset,
1826 sizeof(*cpuset));
1827 }
1828 }
1829
1830 int
eal_cleanup_config(struct internal_config * internal_cfg)1831 eal_cleanup_config(struct internal_config *internal_cfg)
1832 {
1833 if (internal_cfg->hugefile_prefix != NULL)
1834 free(internal_cfg->hugefile_prefix);
1835 if (internal_cfg->hugepage_dir != NULL)
1836 free(internal_cfg->hugepage_dir);
1837 if (internal_cfg->user_mbuf_pool_ops_name != NULL)
1838 free(internal_cfg->user_mbuf_pool_ops_name);
1839
1840 return 0;
1841 }
1842
1843 int
eal_adjust_config(struct internal_config * internal_cfg)1844 eal_adjust_config(struct internal_config *internal_cfg)
1845 {
1846 int i;
1847 struct rte_config *cfg = rte_eal_get_configuration();
1848 struct internal_config *internal_conf =
1849 eal_get_internal_configuration();
1850
1851 if (!core_parsed)
1852 eal_auto_detect_cores(cfg);
1853
1854 if (internal_conf->process_type == RTE_PROC_AUTO)
1855 internal_conf->process_type = eal_proc_type_detect();
1856
1857 /* default main lcore is the first one */
1858 if (!main_lcore_parsed) {
1859 cfg->main_lcore = rte_get_next_lcore(-1, 0, 0);
1860 if (cfg->main_lcore >= RTE_MAX_LCORE)
1861 return -1;
1862 lcore_config[cfg->main_lcore].core_role = ROLE_RTE;
1863 }
1864
1865 compute_ctrl_threads_cpuset(internal_cfg);
1866
1867 /* if no memory amounts were requested, this will result in 0 and
1868 * will be overridden later, right after eal_hugepage_info_init() */
1869 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1870 internal_cfg->memory += internal_cfg->socket_mem[i];
1871
1872 return 0;
1873 }
1874
1875 int
eal_check_common_options(struct internal_config * internal_cfg)1876 eal_check_common_options(struct internal_config *internal_cfg)
1877 {
1878 struct rte_config *cfg = rte_eal_get_configuration();
1879 const struct internal_config *internal_conf =
1880 eal_get_internal_configuration();
1881
1882 if (cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) {
1883 RTE_LOG(ERR, EAL, "Main lcore is not enabled for DPDK\n");
1884 return -1;
1885 }
1886
1887 if (internal_cfg->process_type == RTE_PROC_INVALID) {
1888 RTE_LOG(ERR, EAL, "Invalid process type specified\n");
1889 return -1;
1890 }
1891 if (internal_cfg->hugefile_prefix != NULL &&
1892 strlen(internal_cfg->hugefile_prefix) < 1) {
1893 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n");
1894 return -1;
1895 }
1896 if (internal_cfg->hugepage_dir != NULL &&
1897 strlen(internal_cfg->hugepage_dir) < 1) {
1898 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n");
1899 return -1;
1900 }
1901 if (internal_cfg->user_mbuf_pool_ops_name != NULL &&
1902 strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) {
1903 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n");
1904 return -1;
1905 }
1906 if (index(eal_get_hugefile_prefix(), '%') != NULL) {
1907 RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" "
1908 "option\n");
1909 return -1;
1910 }
1911 if (mem_parsed && internal_cfg->force_sockets == 1) {
1912 RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot "
1913 "be specified at the same time\n");
1914 return -1;
1915 }
1916 if (internal_cfg->no_hugetlbfs && internal_cfg->force_sockets == 1) {
1917 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_MEM" cannot "
1918 "be specified together with --"OPT_NO_HUGE"\n");
1919 return -1;
1920 }
1921 if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink &&
1922 !internal_cfg->in_memory) {
1923 RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot "
1924 "be specified together with --"OPT_NO_HUGE"\n");
1925 return -1;
1926 }
1927 if (internal_conf->force_socket_limits && internal_conf->legacy_mem) {
1928 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT
1929 " is only supported in non-legacy memory mode\n");
1930 }
1931 if (internal_cfg->single_file_segments &&
1932 internal_cfg->hugepage_unlink &&
1933 !internal_cfg->in_memory) {
1934 RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is "
1935 "not compatible with --"OPT_HUGE_UNLINK"\n");
1936 return -1;
1937 }
1938 if (internal_cfg->legacy_mem &&
1939 internal_cfg->in_memory) {
1940 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible "
1941 "with --"OPT_IN_MEMORY"\n");
1942 return -1;
1943 }
1944 if (internal_cfg->legacy_mem && internal_cfg->match_allocations) {
1945 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible "
1946 "with --"OPT_MATCH_ALLOCATIONS"\n");
1947 return -1;
1948 }
1949 if (internal_cfg->no_hugetlbfs && internal_cfg->match_allocations) {
1950 RTE_LOG(ERR, EAL, "Option --"OPT_NO_HUGE" is not compatible "
1951 "with --"OPT_MATCH_ALLOCATIONS"\n");
1952 return -1;
1953 }
1954 if (internal_cfg->legacy_mem && internal_cfg->memory == 0) {
1955 RTE_LOG(NOTICE, EAL, "Static memory layout is selected, "
1956 "amount of reserved memory can be adjusted with "
1957 "-m or --"OPT_SOCKET_MEM"\n");
1958 }
1959
1960 return 0;
1961 }
1962
1963 uint16_t
rte_vect_get_max_simd_bitwidth(void)1964 rte_vect_get_max_simd_bitwidth(void)
1965 {
1966 const struct internal_config *internal_conf =
1967 eal_get_internal_configuration();
1968 return internal_conf->max_simd_bitwidth.bitwidth;
1969 }
1970
1971 int
rte_vect_set_max_simd_bitwidth(uint16_t bitwidth)1972 rte_vect_set_max_simd_bitwidth(uint16_t bitwidth)
1973 {
1974 struct internal_config *internal_conf =
1975 eal_get_internal_configuration();
1976 if (internal_conf->max_simd_bitwidth.forced) {
1977 RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled");
1978 return -EPERM;
1979 }
1980
1981 if (bitwidth < RTE_VECT_SIMD_DISABLED || !rte_is_power_of_2(bitwidth)) {
1982 RTE_LOG(ERR, EAL, "Invalid bitwidth value!\n");
1983 return -EINVAL;
1984 }
1985 internal_conf->max_simd_bitwidth.bitwidth = bitwidth;
1986 return 0;
1987 }
1988
1989 void
eal_common_usage(void)1990 eal_common_usage(void)
1991 {
1992 printf("[options]\n\n"
1993 "EAL common options:\n"
1994 " -c COREMASK Hexadecimal bitmask of cores to run on\n"
1995 " -l CORELIST List of cores to run on\n"
1996 " The argument format is <c1>[-c2][,c3[-c4],...]\n"
1997 " where c1, c2, etc are core indexes between 0 and %d\n"
1998 " --"OPT_LCORES" COREMAP Map lcore set to physical cpu set\n"
1999 " The argument format is\n"
2000 " '<lcores[@cpus]>[<,lcores[@cpus]>...]'\n"
2001 " lcores and cpus list are grouped by '(' and ')'\n"
2002 " Within the group, '-' is used for range separator,\n"
2003 " ',' is used for single number separator.\n"
2004 " '( )' can be omitted for single element group,\n"
2005 " '@' can be omitted if cpus and lcores have the same value\n"
2006 " -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n"
2007 " --"OPT_MAIN_LCORE" ID Core ID that is used as main\n"
2008 " --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n"
2009 " -n CHANNELS Number of memory channels\n"
2010 " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n"
2011 " -r RANKS Force number of memory ranks (don't detect)\n"
2012 " -b, --block Add a device to the blocked list.\n"
2013 " Prevent EAL from using this device. The argument\n"
2014 " format for PCI devices is <domain:bus:devid.func>.\n"
2015 " -a, --allow Add a device to the allow list.\n"
2016 " Only use the specified devices. The argument format\n"
2017 " for PCI devices is <[domain:]bus:devid.func>.\n"
2018 " This option can be present several times.\n"
2019 " [NOTE: " OPT_DEV_ALLOW " cannot be used with "OPT_DEV_BLOCK" option]\n"
2020 " --"OPT_VDEV" Add a virtual device.\n"
2021 " The argument format is <driver><id>[,key=val,...]\n"
2022 " (ex: --vdev=net_pcap0,iface=eth2).\n"
2023 " --"OPT_IOVA_MODE" Set IOVA mode. 'pa' for IOVA_PA\n"
2024 " 'va' for IOVA_VA\n"
2025 " -d LIB.so|DIR Add a driver or driver directory\n"
2026 " (can be used multiple times)\n"
2027 " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n"
2028 " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n"
2029 #ifndef RTE_EXEC_ENV_WINDOWS
2030 " --"OPT_SYSLOG" Set syslog facility\n"
2031 #endif
2032 " --"OPT_LOG_LEVEL"=<int> Set global log level\n"
2033 " --"OPT_LOG_LEVEL"=<type-match>:<int>\n"
2034 " Set specific log level\n"
2035 #ifndef RTE_EXEC_ENV_WINDOWS
2036 " --"OPT_TRACE"=<regex-match>\n"
2037 " Enable trace based on regular expression trace name.\n"
2038 " By default, the trace is disabled.\n"
2039 " User must specify this option to enable trace.\n"
2040 " --"OPT_TRACE_DIR"=<directory path>\n"
2041 " Specify trace directory for trace output.\n"
2042 " By default, trace output will created at\n"
2043 " $HOME directory and parameter must be\n"
2044 " specified once only.\n"
2045 " --"OPT_TRACE_BUF_SIZE"=<int>\n"
2046 " Specify maximum size of allocated memory\n"
2047 " for trace output for each thread. Valid\n"
2048 " unit can be either 'B|K|M' for 'Bytes',\n"
2049 " 'KBytes' and 'MBytes' respectively.\n"
2050 " Default is 1MB and parameter must be\n"
2051 " specified once only.\n"
2052 " --"OPT_TRACE_MODE"=<o[verwrite] | d[iscard]>\n"
2053 " Specify the mode of update of trace\n"
2054 " output file. Either update on a file can\n"
2055 " be wrapped or discarded when file size\n"
2056 " reaches its maximum limit.\n"
2057 " Default mode is 'overwrite' and parameter\n"
2058 " must be specified once only.\n"
2059 #endif /* !RTE_EXEC_ENV_WINDOWS */
2060 " -v Display version information on startup\n"
2061 " -h, --help This help\n"
2062 " --"OPT_IN_MEMORY" Operate entirely in memory. This will\n"
2063 " disable secondary process support\n"
2064 " --"OPT_BASE_VIRTADDR" Base virtual address\n"
2065 " --"OPT_TELEMETRY" Enable telemetry support (on by default)\n"
2066 " --"OPT_NO_TELEMETRY" Disable telemetry support\n"
2067 " --"OPT_FORCE_MAX_SIMD_BITWIDTH" Force the max SIMD bitwidth\n"
2068 "\nEAL options for DEBUG use only:\n"
2069 " --"OPT_HUGE_UNLINK" Unlink hugepage files after init\n"
2070 " --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n"
2071 " --"OPT_NO_PCI" Disable PCI\n"
2072 " --"OPT_NO_HPET" Disable HPET\n"
2073 " --"OPT_NO_SHCONF" No shared config (mmap'd files)\n"
2074 "\n", RTE_MAX_LCORE);
2075 }
2076