xref: /f-stack/dpdk/lib/librte_eal/linux/eal.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation.
3  * Copyright(c) 2012-2014 6WIND S.A.
4  */
5 
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <stdarg.h>
11 #include <unistd.h>
12 #include <pthread.h>
13 #include <syslog.h>
14 #include <getopt.h>
15 #include <sys/file.h>
16 #include <dirent.h>
17 #include <fcntl.h>
18 #include <fnmatch.h>
19 #include <stddef.h>
20 #include <errno.h>
21 #include <limits.h>
22 #include <sys/mman.h>
23 #include <sys/queue.h>
24 #include <sys/stat.h>
25 #if defined(RTE_ARCH_X86)
26 #include <sys/io.h>
27 #endif
28 #include <linux/version.h>
29 
30 #include <rte_compat.h>
31 #include <rte_common.h>
32 #include <rte_debug.h>
33 #include <rte_memory.h>
34 #include <rte_launch.h>
35 #include <rte_eal.h>
36 #include <rte_errno.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_service_component.h>
40 #include <rte_log.h>
41 #include <rte_random.h>
42 #include <rte_cycles.h>
43 #include <rte_string_fns.h>
44 #include <rte_cpuflags.h>
45 #include <rte_interrupts.h>
46 #include <rte_bus.h>
47 #include <rte_dev.h>
48 #include <rte_devargs.h>
49 #include <rte_version.h>
50 #include <malloc_heap.h>
51 #include <rte_vfio.h>
52 #include <rte_telemetry.h>
53 
54 #include "eal_private.h"
55 #include "eal_thread.h"
56 #include "eal_internal_cfg.h"
57 #include "eal_filesystem.h"
58 #include "eal_hugepages.h"
59 #include "eal_memcfg.h"
60 #include "eal_trace.h"
61 #include "eal_options.h"
62 #include "eal_vfio.h"
63 #include "hotplug_mp.h"
64 
65 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
66 
67 #define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10)
68 
69 #define KERNEL_IOMMU_GROUPS_PATH "/sys/kernel/iommu_groups"
70 
71 /* define fd variable here, because file needs to be kept open for the
72  * duration of the program, as we hold a write lock on it in the primary proc */
73 static int mem_cfg_fd = -1;
74 
75 static struct flock wr_lock = {
76 		.l_type = F_WRLCK,
77 		.l_whence = SEEK_SET,
78 		.l_start = offsetof(struct rte_mem_config, memsegs),
79 		.l_len = RTE_SIZEOF_FIELD(struct rte_mem_config, memsegs),
80 };
81 
82 /* internal configuration (per-core) */
83 struct lcore_config lcore_config[RTE_MAX_LCORE];
84 
85 /* used by rte_rdtsc() */
86 int rte_cycles_vmware_tsc_map;
87 
88 static const char *default_runtime_dir = "/var/run";
89 
90 int
eal_create_runtime_dir(void)91 eal_create_runtime_dir(void)
92 {
93 	const char *directory = default_runtime_dir;
94 	const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
95 	const char *fallback = "/tmp";
96 	char run_dir[PATH_MAX];
97 	char tmp[PATH_MAX];
98 	int ret;
99 
100 	if (getuid() != 0) {
101 		/* try XDG path first, fall back to /tmp */
102 		if (xdg_runtime_dir != NULL)
103 			directory = xdg_runtime_dir;
104 		else
105 			directory = fallback;
106 	}
107 	/* create DPDK subdirectory under runtime dir */
108 	ret = snprintf(tmp, sizeof(tmp), "%s/dpdk", directory);
109 	if (ret < 0 || ret == sizeof(tmp)) {
110 		RTE_LOG(ERR, EAL, "Error creating DPDK runtime path name\n");
111 		return -1;
112 	}
113 
114 	/* create prefix-specific subdirectory under DPDK runtime dir */
115 	ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
116 			tmp, eal_get_hugefile_prefix());
117 	if (ret < 0 || ret == sizeof(run_dir)) {
118 		RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
119 		return -1;
120 	}
121 
122 	/* create the path if it doesn't exist. no "mkdir -p" here, so do it
123 	 * step by step.
124 	 */
125 	ret = mkdir(tmp, 0700);
126 	if (ret < 0 && errno != EEXIST) {
127 		RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
128 			tmp, strerror(errno));
129 		return -1;
130 	}
131 
132 	ret = mkdir(run_dir, 0700);
133 	if (ret < 0 && errno != EEXIST) {
134 		RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
135 			run_dir, strerror(errno));
136 		return -1;
137 	}
138 
139 	if (eal_set_runtime_dir(run_dir, sizeof(run_dir)))
140 		return -1;
141 
142 	return 0;
143 }
144 
145 int
eal_clean_runtime_dir(void)146 eal_clean_runtime_dir(void)
147 {
148 	const char *runtime_dir = rte_eal_get_runtime_dir();
149 	DIR *dir;
150 	struct dirent *dirent;
151 	int dir_fd, fd, lck_result;
152 	static const char * const filters[] = {
153 		"fbarray_*",
154 		"mp_socket_*"
155 	};
156 
157 	/* open directory */
158 	dir = opendir(runtime_dir);
159 	if (!dir) {
160 		RTE_LOG(ERR, EAL, "Unable to open runtime directory %s\n",
161 				runtime_dir);
162 		goto error;
163 	}
164 	dir_fd = dirfd(dir);
165 
166 	/* lock the directory before doing anything, to avoid races */
167 	if (flock(dir_fd, LOCK_EX) < 0) {
168 		RTE_LOG(ERR, EAL, "Unable to lock runtime directory %s\n",
169 			runtime_dir);
170 		goto error;
171 	}
172 
173 	dirent = readdir(dir);
174 	if (!dirent) {
175 		RTE_LOG(ERR, EAL, "Unable to read runtime directory %s\n",
176 				runtime_dir);
177 		goto error;
178 	}
179 
180 	while (dirent != NULL) {
181 		unsigned int f_idx;
182 		bool skip = true;
183 
184 		/* skip files that don't match the patterns */
185 		for (f_idx = 0; f_idx < RTE_DIM(filters); f_idx++) {
186 			const char *filter = filters[f_idx];
187 
188 			if (fnmatch(filter, dirent->d_name, 0) == 0) {
189 				skip = false;
190 				break;
191 			}
192 		}
193 		if (skip) {
194 			dirent = readdir(dir);
195 			continue;
196 		}
197 
198 		/* try and lock the file */
199 		fd = openat(dir_fd, dirent->d_name, O_RDONLY);
200 
201 		/* skip to next file */
202 		if (fd == -1) {
203 			dirent = readdir(dir);
204 			continue;
205 		}
206 
207 		/* non-blocking lock */
208 		lck_result = flock(fd, LOCK_EX | LOCK_NB);
209 
210 		/* if lock succeeds, remove the file */
211 		if (lck_result != -1)
212 			unlinkat(dir_fd, dirent->d_name, 0);
213 		close(fd);
214 		dirent = readdir(dir);
215 	}
216 
217 	/* closedir closes dir_fd and drops the lock */
218 	closedir(dir);
219 	return 0;
220 
221 error:
222 	if (dir)
223 		closedir(dir);
224 
225 	RTE_LOG(ERR, EAL, "Error while clearing runtime dir: %s\n",
226 		strerror(errno));
227 
228 	return -1;
229 }
230 
231 /* parse a sysfs (or other) file containing one integer value */
232 int
eal_parse_sysfs_value(const char * filename,unsigned long * val)233 eal_parse_sysfs_value(const char *filename, unsigned long *val)
234 {
235 	FILE *f;
236 	char buf[BUFSIZ];
237 	char *end = NULL;
238 
239 	if ((f = fopen(filename, "r")) == NULL) {
240 		RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
241 			__func__, filename);
242 		return -1;
243 	}
244 
245 	if (fgets(buf, sizeof(buf), f) == NULL) {
246 		RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
247 			__func__, filename);
248 		fclose(f);
249 		return -1;
250 	}
251 	*val = strtoul(buf, &end, 0);
252 	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
253 		RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
254 				__func__, filename);
255 		fclose(f);
256 		return -1;
257 	}
258 	fclose(f);
259 	return 0;
260 }
261 
262 
263 /* create memory configuration in shared/mmap memory. Take out
264  * a write lock on the memsegs, so we can auto-detect primary/secondary.
265  * This means we never close the file while running (auto-close on exit).
266  * We also don't lock the whole file, so that in future we can use read-locks
267  * on other parts, e.g. memzones, to detect if there are running secondary
268  * processes. */
269 static int
rte_eal_config_create(void)270 rte_eal_config_create(void)
271 {
272 	struct rte_config *config = rte_eal_get_configuration();
273 	size_t page_sz = sysconf(_SC_PAGE_SIZE);
274 	size_t cfg_len = sizeof(*config->mem_config);
275 	size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz);
276 	void *rte_mem_cfg_addr, *mapped_mem_cfg_addr;
277 	int retval;
278 	const struct internal_config *internal_conf =
279 		eal_get_internal_configuration();
280 
281 	const char *pathname = eal_runtime_config_path();
282 
283 	if (internal_conf->no_shconf)
284 		return 0;
285 
286 	/* map the config before hugepage address so that we don't waste a page */
287 	if (internal_conf->base_virtaddr != 0)
288 		rte_mem_cfg_addr = (void *)
289 			RTE_ALIGN_FLOOR(internal_conf->base_virtaddr -
290 			sizeof(struct rte_mem_config), page_sz);
291 	else
292 		rte_mem_cfg_addr = NULL;
293 
294 	if (mem_cfg_fd < 0){
295 		mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0600);
296 		if (mem_cfg_fd < 0) {
297 			RTE_LOG(ERR, EAL, "Cannot open '%s' for rte_mem_config\n",
298 				pathname);
299 			return -1;
300 		}
301 	}
302 
303 	retval = ftruncate(mem_cfg_fd, cfg_len);
304 	if (retval < 0){
305 		close(mem_cfg_fd);
306 		mem_cfg_fd = -1;
307 		RTE_LOG(ERR, EAL, "Cannot resize '%s' for rte_mem_config\n",
308 			pathname);
309 		return -1;
310 	}
311 
312 	retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
313 	if (retval < 0){
314 		close(mem_cfg_fd);
315 		mem_cfg_fd = -1;
316 		RTE_LOG(ERR, EAL, "Cannot create lock on '%s'. Is another primary "
317 			"process running?\n", pathname);
318 		return -1;
319 	}
320 
321 	/* reserve space for config */
322 	rte_mem_cfg_addr = eal_get_virtual_area(rte_mem_cfg_addr,
323 			&cfg_len_aligned, page_sz, 0, 0);
324 	if (rte_mem_cfg_addr == NULL) {
325 		RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config\n");
326 		close(mem_cfg_fd);
327 		mem_cfg_fd = -1;
328 		return -1;
329 	}
330 
331 	/* remap the actual file into the space we've just reserved */
332 	mapped_mem_cfg_addr = mmap(rte_mem_cfg_addr,
333 			cfg_len_aligned, PROT_READ | PROT_WRITE,
334 			MAP_SHARED | MAP_FIXED, mem_cfg_fd, 0);
335 	if (mapped_mem_cfg_addr == MAP_FAILED) {
336 		munmap(rte_mem_cfg_addr, cfg_len);
337 		close(mem_cfg_fd);
338 		mem_cfg_fd = -1;
339 		RTE_LOG(ERR, EAL, "Cannot remap memory for rte_config\n");
340 		return -1;
341 	}
342 
343 	memcpy(rte_mem_cfg_addr, config->mem_config, sizeof(struct rte_mem_config));
344 	config->mem_config = rte_mem_cfg_addr;
345 
346 	/* store address of the config in the config itself so that secondary
347 	 * processes could later map the config into this exact location
348 	 */
349 	config->mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
350 	config->mem_config->dma_maskbits = 0;
351 
352 	return 0;
353 }
354 
355 /* attach to an existing shared memory config */
356 static int
rte_eal_config_attach(void)357 rte_eal_config_attach(void)
358 {
359 	struct rte_config *config = rte_eal_get_configuration();
360 	struct rte_mem_config *mem_config;
361 	const struct internal_config *internal_conf =
362 		eal_get_internal_configuration();
363 
364 	const char *pathname = eal_runtime_config_path();
365 
366 	if (internal_conf->no_shconf)
367 		return 0;
368 
369 	if (mem_cfg_fd < 0){
370 		mem_cfg_fd = open(pathname, O_RDWR);
371 		if (mem_cfg_fd < 0) {
372 			RTE_LOG(ERR, EAL, "Cannot open '%s' for rte_mem_config\n",
373 				pathname);
374 			return -1;
375 		}
376 	}
377 
378 	/* map it as read-only first */
379 	mem_config = (struct rte_mem_config *) mmap(NULL, sizeof(*mem_config),
380 			PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
381 	if (mem_config == MAP_FAILED) {
382 		close(mem_cfg_fd);
383 		mem_cfg_fd = -1;
384 		RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config! error %i (%s)\n",
385 			errno, strerror(errno));
386 		return -1;
387 	}
388 
389 	config->mem_config = mem_config;
390 
391 	return 0;
392 }
393 
394 /* reattach the shared config at exact memory location primary process has it */
395 static int
rte_eal_config_reattach(void)396 rte_eal_config_reattach(void)
397 {
398 	struct rte_config *config = rte_eal_get_configuration();
399 	struct rte_mem_config *mem_config;
400 	void *rte_mem_cfg_addr;
401 	const struct internal_config *internal_conf =
402 		eal_get_internal_configuration();
403 
404 	if (internal_conf->no_shconf)
405 		return 0;
406 
407 	/* save the address primary process has mapped shared config to */
408 	rte_mem_cfg_addr =
409 		(void *) (uintptr_t) config->mem_config->mem_cfg_addr;
410 
411 	/* unmap original config */
412 	munmap(config->mem_config, sizeof(struct rte_mem_config));
413 
414 	/* remap the config at proper address */
415 	mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr,
416 			sizeof(*mem_config), PROT_READ | PROT_WRITE, MAP_SHARED,
417 			mem_cfg_fd, 0);
418 
419 	close(mem_cfg_fd);
420 	mem_cfg_fd = -1;
421 
422 	if (mem_config == MAP_FAILED || mem_config != rte_mem_cfg_addr) {
423 		if (mem_config != MAP_FAILED) {
424 			/* errno is stale, don't use */
425 			RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config at [%p], got [%p]"
426 				" - please use '--" OPT_BASE_VIRTADDR
427 				"' option\n", rte_mem_cfg_addr, mem_config);
428 			munmap(mem_config, sizeof(struct rte_mem_config));
429 			return -1;
430 		}
431 		RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config! error %i (%s)\n",
432 			errno, strerror(errno));
433 		return -1;
434 	}
435 
436 	config->mem_config = mem_config;
437 
438 	return 0;
439 }
440 
441 /* Detect if we are a primary or a secondary process */
442 enum rte_proc_type_t
eal_proc_type_detect(void)443 eal_proc_type_detect(void)
444 {
445 	enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
446 	const char *pathname = eal_runtime_config_path();
447 	const struct internal_config *internal_conf =
448 		eal_get_internal_configuration();
449 
450 	/* if there no shared config, there can be no secondary processes */
451 	if (!internal_conf->no_shconf) {
452 		/* if we can open the file but not get a write-lock we are a
453 		 * secondary process. NOTE: if we get a file handle back, we
454 		 * keep that open and don't close it to prevent a race condition
455 		 * between multiple opens.
456 		 */
457 		if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
458 				(fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
459 			ptype = RTE_PROC_SECONDARY;
460 	}
461 
462 	RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
463 			ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
464 
465 	return ptype;
466 }
467 
468 /* Sets up rte_config structure with the pointer to shared memory config.*/
469 static int
rte_config_init(void)470 rte_config_init(void)
471 {
472 	struct rte_config *config = rte_eal_get_configuration();
473 	const struct internal_config *internal_conf =
474 		eal_get_internal_configuration();
475 
476 	config->process_type = internal_conf->process_type;
477 
478 	switch (config->process_type) {
479 	case RTE_PROC_PRIMARY:
480 		if (rte_eal_config_create() < 0)
481 			return -1;
482 		eal_mcfg_update_from_internal();
483 		break;
484 	case RTE_PROC_SECONDARY:
485 		if (rte_eal_config_attach() < 0)
486 			return -1;
487 		eal_mcfg_wait_complete();
488 		if (eal_mcfg_check_version() < 0) {
489 			RTE_LOG(ERR, EAL, "Primary and secondary process DPDK version mismatch\n");
490 			return -1;
491 		}
492 		if (rte_eal_config_reattach() < 0)
493 			return -1;
494 		if (!__rte_mp_enable()) {
495 			RTE_LOG(ERR, EAL, "Primary process refused secondary attachment\n");
496 			return -1;
497 		}
498 		eal_mcfg_update_internal();
499 		break;
500 	case RTE_PROC_AUTO:
501 	case RTE_PROC_INVALID:
502 		RTE_LOG(ERR, EAL, "Invalid process type %d\n",
503 			config->process_type);
504 		return -1;
505 	}
506 
507 	return 0;
508 }
509 
510 /* Unlocks hugepage directories that were locked by eal_hugepage_info_init */
511 static void
eal_hugedirs_unlock(void)512 eal_hugedirs_unlock(void)
513 {
514 	int i;
515 	struct internal_config *internal_conf =
516 		eal_get_internal_configuration();
517 
518 	for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
519 	{
520 		/* skip uninitialized */
521 		if (internal_conf->hugepage_info[i].lock_descriptor < 0)
522 			continue;
523 		/* unlock hugepage file */
524 		flock(internal_conf->hugepage_info[i].lock_descriptor, LOCK_UN);
525 		close(internal_conf->hugepage_info[i].lock_descriptor);
526 		/* reset the field */
527 		internal_conf->hugepage_info[i].lock_descriptor = -1;
528 	}
529 }
530 
531 /* display usage */
532 static void
eal_usage(const char * prgname)533 eal_usage(const char *prgname)
534 {
535 	rte_usage_hook_t hook = eal_get_application_usage_hook();
536 
537 	printf("\nUsage: %s ", prgname);
538 	eal_common_usage();
539 	printf("EAL Linux options:\n"
540 	       "  --"OPT_SOCKET_MEM"        Memory to allocate on sockets (comma separated values)\n"
541 	       "  --"OPT_SOCKET_LIMIT"      Limit memory allocation on sockets (comma separated values)\n"
542 	       "  --"OPT_HUGE_DIR"          Directory where hugetlbfs is mounted\n"
543 	       "  --"OPT_FILE_PREFIX"       Prefix for hugepage filenames\n"
544 	       "  --"OPT_CREATE_UIO_DEV"    Create /dev/uioX (usually done by hotplug)\n"
545 	       "  --"OPT_VFIO_INTR"         Interrupt mode for VFIO (legacy|msi|msix)\n"
546 	       "  --"OPT_VFIO_VF_TOKEN"     VF token (UUID) shared between SR-IOV PF and VFs\n"
547 	       "  --"OPT_LEGACY_MEM"        Legacy memory mode (no dynamic allocation, contiguous segments)\n"
548 	       "  --"OPT_SINGLE_FILE_SEGMENTS" Put all hugepage memory in single files\n"
549 	       "  --"OPT_MATCH_ALLOCATIONS" Free hugepages exactly as allocated\n"
550 	       "\n");
551 	/* Allow the application to print its usage message too if hook is set */
552 	if (hook) {
553 		printf("===== Application Usage =====\n\n");
554 		(hook)(prgname);
555 	}
556 }
557 
558 static int
eal_parse_socket_arg(char * strval,volatile uint64_t * socket_arg)559 eal_parse_socket_arg(char *strval, volatile uint64_t *socket_arg)
560 {
561 	char * arg[RTE_MAX_NUMA_NODES];
562 	char *end;
563 	int arg_num, i, len;
564 	uint64_t total_mem = 0;
565 
566 	len = strnlen(strval, SOCKET_MEM_STRLEN);
567 	if (len == SOCKET_MEM_STRLEN) {
568 		RTE_LOG(ERR, EAL, "--socket-mem is too long\n");
569 		return -1;
570 	}
571 
572 	/* all other error cases will be caught later */
573 	if (!isdigit(strval[len-1]))
574 		return -1;
575 
576 	/* split the optarg into separate socket values */
577 	arg_num = rte_strsplit(strval, len,
578 			arg, RTE_MAX_NUMA_NODES, ',');
579 
580 	/* if split failed, or 0 arguments */
581 	if (arg_num <= 0)
582 		return -1;
583 
584 	/* parse each defined socket option */
585 	errno = 0;
586 	for (i = 0; i < arg_num; i++) {
587 		uint64_t val;
588 		end = NULL;
589 		val = strtoull(arg[i], &end, 10);
590 
591 		/* check for invalid input */
592 		if ((errno != 0)  ||
593 				(arg[i][0] == '\0') || (end == NULL) || (*end != '\0'))
594 			return -1;
595 		val <<= 20;
596 		total_mem += val;
597 		socket_arg[i] = val;
598 	}
599 
600 	return 0;
601 }
602 
603 static int
eal_parse_vfio_intr(const char * mode)604 eal_parse_vfio_intr(const char *mode)
605 {
606 	struct internal_config *internal_conf =
607 		eal_get_internal_configuration();
608 	unsigned i;
609 	static struct {
610 		const char *name;
611 		enum rte_intr_mode value;
612 	} map[] = {
613 		{ "legacy", RTE_INTR_MODE_LEGACY },
614 		{ "msi", RTE_INTR_MODE_MSI },
615 		{ "msix", RTE_INTR_MODE_MSIX },
616 	};
617 
618 	for (i = 0; i < RTE_DIM(map); i++) {
619 		if (!strcmp(mode, map[i].name)) {
620 			internal_conf->vfio_intr_mode = map[i].value;
621 			return 0;
622 		}
623 	}
624 	return -1;
625 }
626 
627 static int
eal_parse_vfio_vf_token(const char * vf_token)628 eal_parse_vfio_vf_token(const char *vf_token)
629 {
630 	struct internal_config *cfg = eal_get_internal_configuration();
631 	rte_uuid_t uuid;
632 
633 	if (!rte_uuid_parse(vf_token, uuid)) {
634 		rte_uuid_copy(cfg->vfio_vf_token, uuid);
635 		return 0;
636 	}
637 
638 	return -1;
639 }
640 
641 /* Parse the arguments for --log-level only */
642 static void
eal_log_level_parse(int argc,char ** argv)643 eal_log_level_parse(int argc, char **argv)
644 {
645 	int opt;
646 	char **argvopt;
647 	int option_index;
648 	const int old_optind = optind;
649 	const int old_optopt = optopt;
650 	char * const old_optarg = optarg;
651 	struct internal_config *internal_conf =
652 		eal_get_internal_configuration();
653 
654 	argvopt = argv;
655 	optind = 1;
656 
657 	while ((opt = getopt_long(argc, argvopt, eal_short_options,
658 				  eal_long_options, &option_index)) != EOF) {
659 
660 		int ret;
661 
662 		/* getopt is not happy, stop right now */
663 		if (opt == '?')
664 			break;
665 
666 		ret = (opt == OPT_LOG_LEVEL_NUM) ?
667 			eal_parse_common_option(opt, optarg, internal_conf) : 0;
668 
669 		/* common parser is not happy */
670 		if (ret < 0)
671 			break;
672 	}
673 
674 	/* restore getopt lib */
675 	optind = old_optind;
676 	optopt = old_optopt;
677 	optarg = old_optarg;
678 }
679 
680 /* Parse the argument given in the command line of the application */
681 static int
eal_parse_args(int argc,char ** argv)682 eal_parse_args(int argc, char **argv)
683 {
684 	int opt, ret;
685 	char **argvopt;
686 	int option_index;
687 	char *prgname = argv[0];
688 	const int old_optind = optind;
689 	const int old_optopt = optopt;
690 	char * const old_optarg = optarg;
691 	struct internal_config *internal_conf =
692 		eal_get_internal_configuration();
693 
694 	argvopt = argv;
695 	optind = 1;
696 
697 	while ((opt = getopt_long(argc, argvopt, eal_short_options,
698 				  eal_long_options, &option_index)) != EOF) {
699 
700 		/* getopt didn't recognise the option */
701 		if (opt == '?') {
702 			eal_usage(prgname);
703 			ret = -1;
704 			goto out;
705 		}
706 
707 		ret = eal_parse_common_option(opt, optarg, internal_conf);
708 		/* common parser is not happy */
709 		if (ret < 0) {
710 			eal_usage(prgname);
711 			ret = -1;
712 			goto out;
713 		}
714 		/* common parser handled this option */
715 		if (ret == 0)
716 			continue;
717 
718 		switch (opt) {
719 		case 'h':
720 			eal_usage(prgname);
721 			exit(EXIT_SUCCESS);
722 
723 		case OPT_HUGE_DIR_NUM:
724 		{
725 			char *hdir = strdup(optarg);
726 			if (hdir == NULL)
727 				RTE_LOG(ERR, EAL, "Could not store hugepage directory\n");
728 			else {
729 				/* free old hugepage dir */
730 				if (internal_conf->hugepage_dir != NULL)
731 					free(internal_conf->hugepage_dir);
732 				internal_conf->hugepage_dir = hdir;
733 			}
734 			break;
735 		}
736 		case OPT_FILE_PREFIX_NUM:
737 		{
738 			char *prefix = strdup(optarg);
739 			if (prefix == NULL)
740 				RTE_LOG(ERR, EAL, "Could not store file prefix\n");
741 			else {
742 				/* free old prefix */
743 				if (internal_conf->hugefile_prefix != NULL)
744 					free(internal_conf->hugefile_prefix);
745 				internal_conf->hugefile_prefix = prefix;
746 			}
747 			break;
748 		}
749 		case OPT_SOCKET_MEM_NUM:
750 			if (eal_parse_socket_arg(optarg,
751 					internal_conf->socket_mem) < 0) {
752 				RTE_LOG(ERR, EAL, "invalid parameters for --"
753 						OPT_SOCKET_MEM "\n");
754 				eal_usage(prgname);
755 				ret = -1;
756 				goto out;
757 			}
758 			internal_conf->force_sockets = 1;
759 			break;
760 
761 		case OPT_SOCKET_LIMIT_NUM:
762 			if (eal_parse_socket_arg(optarg,
763 					internal_conf->socket_limit) < 0) {
764 				RTE_LOG(ERR, EAL, "invalid parameters for --"
765 						OPT_SOCKET_LIMIT "\n");
766 				eal_usage(prgname);
767 				ret = -1;
768 				goto out;
769 			}
770 			internal_conf->force_socket_limits = 1;
771 			break;
772 
773 		case OPT_VFIO_INTR_NUM:
774 			if (eal_parse_vfio_intr(optarg) < 0) {
775 				RTE_LOG(ERR, EAL, "invalid parameters for --"
776 						OPT_VFIO_INTR "\n");
777 				eal_usage(prgname);
778 				ret = -1;
779 				goto out;
780 			}
781 			break;
782 
783 		case OPT_VFIO_VF_TOKEN_NUM:
784 			if (eal_parse_vfio_vf_token(optarg) < 0) {
785 				RTE_LOG(ERR, EAL, "invalid parameters for --"
786 						OPT_VFIO_VF_TOKEN "\n");
787 				eal_usage(prgname);
788 				ret = -1;
789 				goto out;
790 			}
791 			break;
792 
793 		case OPT_CREATE_UIO_DEV_NUM:
794 			internal_conf->create_uio_dev = 1;
795 			break;
796 
797 		case OPT_MBUF_POOL_OPS_NAME_NUM:
798 		{
799 			char *ops_name = strdup(optarg);
800 			if (ops_name == NULL)
801 				RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
802 			else {
803 				/* free old ops name */
804 				if (internal_conf->user_mbuf_pool_ops_name !=
805 						NULL)
806 					free(internal_conf->user_mbuf_pool_ops_name);
807 
808 				internal_conf->user_mbuf_pool_ops_name =
809 						ops_name;
810 			}
811 			break;
812 		}
813 		case OPT_MATCH_ALLOCATIONS_NUM:
814 			internal_conf->match_allocations = 1;
815 			break;
816 
817 		default:
818 			if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
819 				RTE_LOG(ERR, EAL, "Option %c is not supported "
820 					"on Linux\n", opt);
821 			} else if (opt >= OPT_LONG_MIN_NUM &&
822 				   opt < OPT_LONG_MAX_NUM) {
823 				RTE_LOG(ERR, EAL, "Option %s is not supported "
824 					"on Linux\n",
825 					eal_long_options[option_index].name);
826 			} else {
827 				RTE_LOG(ERR, EAL, "Option %d is not supported "
828 					"on Linux\n", opt);
829 			}
830 			eal_usage(prgname);
831 			ret = -1;
832 			goto out;
833 		}
834 	}
835 
836 	/* create runtime data directory */
837 	if (internal_conf->no_shconf == 0 &&
838 			eal_create_runtime_dir() < 0) {
839 		RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
840 		ret = -1;
841 		goto out;
842 	}
843 
844 	if (eal_adjust_config(internal_conf) != 0) {
845 		ret = -1;
846 		goto out;
847 	}
848 
849 	/* sanity checks */
850 	if (eal_check_common_options(internal_conf) != 0) {
851 		eal_usage(prgname);
852 		ret = -1;
853 		goto out;
854 	}
855 
856 	if (optind >= 0)
857 		argv[optind-1] = prgname;
858 	ret = optind-1;
859 
860 out:
861 	/* restore getopt lib */
862 	optind = old_optind;
863 	optopt = old_optopt;
864 	optarg = old_optarg;
865 
866 	return ret;
867 }
868 
869 static int
check_socket(const struct rte_memseg_list * msl,void * arg)870 check_socket(const struct rte_memseg_list *msl, void *arg)
871 {
872 	int *socket_id = arg;
873 
874 	if (msl->external)
875 		return 0;
876 
877 	return *socket_id == msl->socket_id;
878 }
879 
880 static void
eal_check_mem_on_local_socket(void)881 eal_check_mem_on_local_socket(void)
882 {
883 	int socket_id;
884 	const struct rte_config *config = rte_eal_get_configuration();
885 
886 	socket_id = rte_lcore_to_socket_id(config->main_lcore);
887 
888 	if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
889 		RTE_LOG(WARNING, EAL, "WARNING: Main core has no memory on local socket!\n");
890 }
891 
892 static int
sync_func(__rte_unused void * arg)893 sync_func(__rte_unused void *arg)
894 {
895 	return 0;
896 }
897 
898 /*
899  * Request iopl privilege for all RPL, returns 0 on success
900  * iopl() call is mostly for the i386 architecture. For other architectures,
901  * return -1 to indicate IO privilege can't be changed in this way.
902  */
903 int
rte_eal_iopl_init(void)904 rte_eal_iopl_init(void)
905 {
906 #if defined(RTE_ARCH_X86)
907 	if (iopl(3) != 0)
908 		return -1;
909 #endif
910 	return 0;
911 }
912 
913 #ifdef VFIO_PRESENT
rte_eal_vfio_setup(void)914 static int rte_eal_vfio_setup(void)
915 {
916 	if (rte_vfio_enable("vfio"))
917 		return -1;
918 
919 	return 0;
920 }
921 #endif
922 
rte_eal_init_alert(const char * msg)923 static void rte_eal_init_alert(const char *msg)
924 {
925 	fprintf(stderr, "EAL: FATAL: %s\n", msg);
926 	RTE_LOG(ERR, EAL, "%s\n", msg);
927 }
928 
929 /*
930  * On Linux 3.6+, even if VFIO is not loaded, whenever IOMMU is enabled in the
931  * BIOS and in the kernel, /sys/kernel/iommu_groups path will contain kernel
932  * IOMMU groups. If IOMMU is not enabled, that path would be empty.
933  * Therefore, checking if the path is empty will tell us if IOMMU is enabled.
934  */
935 static bool
is_iommu_enabled(void)936 is_iommu_enabled(void)
937 {
938 	DIR *dir = opendir(KERNEL_IOMMU_GROUPS_PATH);
939 	struct dirent *d;
940 	int n = 0;
941 
942 	/* if directory doesn't exist, assume IOMMU is not enabled */
943 	if (dir == NULL)
944 		return false;
945 
946 	while ((d = readdir(dir)) != NULL) {
947 		/* skip dot and dot-dot */
948 		if (++n > 2)
949 			break;
950 	}
951 	closedir(dir);
952 
953 	return n > 2;
954 }
955 
956 /* Launch threads, called at application init(). */
957 int
rte_eal_init(int argc,char ** argv)958 rte_eal_init(int argc, char **argv)
959 {
960 	int i, fctret, ret;
961 	pthread_t thread_id;
962 	static uint32_t run_once;
963 	uint32_t has_run = 0;
964 	const char *p;
965 	static char logid[PATH_MAX];
966 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
967 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
968 	bool phys_addrs;
969 	const struct rte_config *config = rte_eal_get_configuration();
970 	struct internal_config *internal_conf =
971 		eal_get_internal_configuration();
972 
973 	/* checks if the machine is adequate */
974 	if (!rte_cpu_is_supported()) {
975 		rte_eal_init_alert("unsupported cpu type.");
976 		rte_errno = ENOTSUP;
977 		return -1;
978 	}
979 
980 	if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0,
981 					__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
982 		rte_eal_init_alert("already called initialization.");
983 		rte_errno = EALREADY;
984 		return -1;
985 	}
986 
987 	p = strrchr(argv[0], '/');
988 	strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid));
989 	thread_id = pthread_self();
990 
991 	eal_reset_internal_config(internal_conf);
992 
993 	/* set log level as early as possible */
994 	eal_log_level_parse(argc, argv);
995 
996 	/* clone argv to report out later in telemetry */
997 	eal_save_args(argc, argv);
998 
999 	if (rte_eal_cpu_init() < 0) {
1000 		rte_eal_init_alert("Cannot detect lcores.");
1001 		rte_errno = ENOTSUP;
1002 		return -1;
1003 	}
1004 
1005 	fctret = eal_parse_args(argc, argv);
1006 	if (fctret < 0) {
1007 		rte_eal_init_alert("Invalid 'command line' arguments.");
1008 		rte_errno = EINVAL;
1009 		__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
1010 		return -1;
1011 	}
1012 
1013 	if (eal_plugins_init() < 0) {
1014 		rte_eal_init_alert("Cannot init plugins");
1015 		rte_errno = EINVAL;
1016 		__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
1017 		return -1;
1018 	}
1019 
1020 	if (eal_trace_init() < 0) {
1021 		rte_eal_init_alert("Cannot init trace");
1022 		rte_errno = EFAULT;
1023 		return -1;
1024 	}
1025 
1026 	if (eal_option_device_parse()) {
1027 		rte_errno = ENODEV;
1028 		__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
1029 		return -1;
1030 	}
1031 
1032 	if (rte_config_init() < 0) {
1033 		rte_eal_init_alert("Cannot init config");
1034 		return -1;
1035 	}
1036 
1037 	if (rte_eal_intr_init() < 0) {
1038 		rte_eal_init_alert("Cannot init interrupt-handling thread");
1039 		return -1;
1040 	}
1041 
1042 	if (rte_eal_alarm_init() < 0) {
1043 		rte_eal_init_alert("Cannot init alarm");
1044 		/* rte_eal_alarm_init sets rte_errno on failure. */
1045 		return -1;
1046 	}
1047 
1048 	/* Put mp channel init before bus scan so that we can init the vdev
1049 	 * bus through mp channel in the secondary process before the bus scan.
1050 	 */
1051 	if (rte_mp_channel_init() < 0 && rte_errno != ENOTSUP) {
1052 		rte_eal_init_alert("failed to init mp channel");
1053 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1054 			rte_errno = EFAULT;
1055 			return -1;
1056 		}
1057 	}
1058 
1059 	/* register multi-process action callbacks for hotplug */
1060 	if (eal_mp_dev_hotplug_init() < 0) {
1061 		rte_eal_init_alert("failed to register mp callback for hotplug");
1062 		return -1;
1063 	}
1064 
1065 	if (rte_bus_scan()) {
1066 		rte_eal_init_alert("Cannot scan the buses for devices");
1067 		rte_errno = ENODEV;
1068 		__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
1069 		return -1;
1070 	}
1071 
1072 	phys_addrs = rte_eal_using_phys_addrs() != 0;
1073 
1074 	/* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */
1075 	if (internal_conf->iova_mode == RTE_IOVA_DC) {
1076 		/* autodetect the IOVA mapping mode */
1077 		enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
1078 
1079 		if (iova_mode == RTE_IOVA_DC) {
1080 			RTE_LOG(DEBUG, EAL, "Buses did not request a specific IOVA mode.\n");
1081 
1082 			if (!phys_addrs) {
1083 				/* if we have no access to physical addresses,
1084 				 * pick IOVA as VA mode.
1085 				 */
1086 				iova_mode = RTE_IOVA_VA;
1087 				RTE_LOG(DEBUG, EAL, "Physical addresses are unavailable, selecting IOVA as VA mode.\n");
1088 #if defined(RTE_LIB_KNI) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
1089 			} else if (rte_eal_check_module("rte_kni") == 1) {
1090 				iova_mode = RTE_IOVA_PA;
1091 				RTE_LOG(DEBUG, EAL, "KNI is loaded, selecting IOVA as PA mode for better KNI performance.\n");
1092 #endif
1093 			} else if (is_iommu_enabled()) {
1094 				/* we have an IOMMU, pick IOVA as VA mode */
1095 				iova_mode = RTE_IOVA_VA;
1096 				RTE_LOG(DEBUG, EAL, "IOMMU is available, selecting IOVA as VA mode.\n");
1097 			} else {
1098 				/* physical addresses available, and no IOMMU
1099 				 * found, so pick IOVA as PA.
1100 				 */
1101 				iova_mode = RTE_IOVA_PA;
1102 				RTE_LOG(DEBUG, EAL, "IOMMU is not available, selecting IOVA as PA mode.\n");
1103 			}
1104 		}
1105 #if defined(RTE_LIB_KNI) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
1106 		/* Workaround for KNI which requires physical address to work
1107 		 * in kernels < 4.10
1108 		 */
1109 		if (iova_mode == RTE_IOVA_VA &&
1110 				rte_eal_check_module("rte_kni") == 1) {
1111 			if (phys_addrs) {
1112 				iova_mode = RTE_IOVA_PA;
1113 				RTE_LOG(WARNING, EAL, "Forcing IOVA as 'PA' because KNI module is loaded\n");
1114 			} else {
1115 				RTE_LOG(DEBUG, EAL, "KNI can not work since physical addresses are unavailable\n");
1116 			}
1117 		}
1118 #endif
1119 		rte_eal_get_configuration()->iova_mode = iova_mode;
1120 	} else {
1121 		rte_eal_get_configuration()->iova_mode =
1122 			internal_conf->iova_mode;
1123 	}
1124 
1125 	if (rte_eal_iova_mode() == RTE_IOVA_PA && !phys_addrs) {
1126 		rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available");
1127 		rte_errno = EINVAL;
1128 		return -1;
1129 	}
1130 
1131 	RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n",
1132 		rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA");
1133 
1134 	if (internal_conf->no_hugetlbfs == 0) {
1135 		/* rte_config isn't initialized yet */
1136 		ret = internal_conf->process_type == RTE_PROC_PRIMARY ?
1137 				eal_hugepage_info_init() :
1138 				eal_hugepage_info_read();
1139 		if (ret < 0) {
1140 			rte_eal_init_alert("Cannot get hugepage information.");
1141 			rte_errno = EACCES;
1142 			__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
1143 			return -1;
1144 		}
1145 	}
1146 
1147 	if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) {
1148 		if (internal_conf->no_hugetlbfs)
1149 			internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
1150 	}
1151 
1152 	if (internal_conf->vmware_tsc_map == 1) {
1153 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
1154 		rte_cycles_vmware_tsc_map = 1;
1155 		RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
1156 				"you must have monitor_control.pseudo_perfctr = TRUE\n");
1157 #else
1158 		RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
1159 				"RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
1160 #endif
1161 	}
1162 
1163 	if (rte_eal_log_init(logid, internal_conf->syslog_facility) < 0) {
1164 		rte_eal_init_alert("Cannot init logging.");
1165 		rte_errno = ENOMEM;
1166 		__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
1167 		return -1;
1168 	}
1169 
1170 #ifdef VFIO_PRESENT
1171 	if (rte_eal_vfio_setup() < 0) {
1172 		rte_eal_init_alert("Cannot init VFIO");
1173 		rte_errno = EAGAIN;
1174 		__atomic_store_n(&run_once, 0, __ATOMIC_RELAXED);
1175 		return -1;
1176 	}
1177 #endif
1178 	/* in secondary processes, memory init may allocate additional fbarrays
1179 	 * not present in primary processes, so to avoid any potential issues,
1180 	 * initialize memzones first.
1181 	 */
1182 	if (rte_eal_memzone_init() < 0) {
1183 		rte_eal_init_alert("Cannot init memzone");
1184 		rte_errno = ENODEV;
1185 		return -1;
1186 	}
1187 
1188 	if (rte_eal_memory_init() < 0) {
1189 		rte_eal_init_alert("Cannot init memory");
1190 		rte_errno = ENOMEM;
1191 		return -1;
1192 	}
1193 
1194 	/* the directories are locked during eal_hugepage_info_init */
1195 	eal_hugedirs_unlock();
1196 
1197 	if (rte_eal_malloc_heap_init() < 0) {
1198 		rte_eal_init_alert("Cannot init malloc heap");
1199 		rte_errno = ENODEV;
1200 		return -1;
1201 	}
1202 
1203 	if (rte_eal_tailqs_init() < 0) {
1204 		rte_eal_init_alert("Cannot init tail queues for objects");
1205 		rte_errno = EFAULT;
1206 		return -1;
1207 	}
1208 
1209 	if (rte_eal_timer_init() < 0) {
1210 		rte_eal_init_alert("Cannot init HPET or TSC timers");
1211 		rte_errno = ENOTSUP;
1212 		return -1;
1213 	}
1214 
1215 	eal_check_mem_on_local_socket();
1216 
1217 	if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
1218 			&lcore_config[config->main_lcore].cpuset) != 0) {
1219 		rte_eal_init_alert("Cannot set affinity");
1220 		rte_errno = EINVAL;
1221 		return -1;
1222 	}
1223 	__rte_thread_init(config->main_lcore,
1224 		&lcore_config[config->main_lcore].cpuset);
1225 
1226 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
1227 	RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
1228 		config->main_lcore, (uintptr_t)thread_id, cpuset,
1229 		ret == 0 ? "" : "...");
1230 
1231 	RTE_LCORE_FOREACH_WORKER(i) {
1232 
1233 		/*
1234 		 * create communication pipes between main thread
1235 		 * and children
1236 		 */
1237 		if (pipe(lcore_config[i].pipe_main2worker) < 0)
1238 			rte_panic("Cannot create pipe\n");
1239 		if (pipe(lcore_config[i].pipe_worker2main) < 0)
1240 			rte_panic("Cannot create pipe\n");
1241 
1242 		lcore_config[i].state = WAIT;
1243 
1244 		/* create a thread for each lcore */
1245 		ret = pthread_create(&lcore_config[i].thread_id, NULL,
1246 				     eal_thread_loop, NULL);
1247 		if (ret != 0)
1248 			rte_panic("Cannot create thread\n");
1249 
1250 		/* Set thread_name for aid in debugging. */
1251 		snprintf(thread_name, sizeof(thread_name),
1252 			"lcore-worker-%d", i);
1253 		ret = rte_thread_setname(lcore_config[i].thread_id,
1254 						thread_name);
1255 		if (ret != 0)
1256 			RTE_LOG(DEBUG, EAL,
1257 				"Cannot set name for lcore thread\n");
1258 
1259 		ret = pthread_setaffinity_np(lcore_config[i].thread_id,
1260 			sizeof(rte_cpuset_t), &lcore_config[i].cpuset);
1261 		if (ret != 0)
1262 			rte_panic("Cannot set affinity\n");
1263 	}
1264 
1265 	/*
1266 	 * Launch a dummy function on all worker lcores, so that main lcore
1267 	 * knows they are all ready when this function returns.
1268 	 */
1269 	rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
1270 	rte_eal_mp_wait_lcore();
1271 
1272 	/* initialize services so vdevs register service during bus_probe. */
1273 	ret = rte_service_init();
1274 	if (ret) {
1275 		rte_eal_init_alert("rte_service_init() failed");
1276 		rte_errno = ENOEXEC;
1277 		return -1;
1278 	}
1279 
1280 	/* Probe all the buses and devices/drivers on them */
1281 	if (rte_bus_probe()) {
1282 		rte_eal_init_alert("Cannot probe devices");
1283 		rte_errno = ENOTSUP;
1284 		return -1;
1285 	}
1286 
1287 #ifdef VFIO_PRESENT
1288 	/* Register mp action after probe() so that we got enough info */
1289 	if (rte_vfio_is_enabled("vfio") && vfio_mp_sync_setup() < 0)
1290 		return -1;
1291 #endif
1292 
1293 	/* initialize default service/lcore mappings and start running. Ignore
1294 	 * -ENOTSUP, as it indicates no service coremask passed to EAL.
1295 	 */
1296 	ret = rte_service_start_with_defaults();
1297 	if (ret < 0 && ret != -ENOTSUP) {
1298 		rte_errno = ENOEXEC;
1299 		return -1;
1300 	}
1301 
1302 	/*
1303 	 * Clean up unused files in runtime directory. We do this at the end of
1304 	 * init and not at the beginning because we want to clean stuff up
1305 	 * whether we are primary or secondary process, but we cannot remove
1306 	 * primary process' files because secondary should be able to run even
1307 	 * if primary process is dead.
1308 	 *
1309 	 * In no_shconf mode, no runtime directory is created in the first
1310 	 * place, so no cleanup needed.
1311 	 */
1312 	if (!internal_conf->no_shconf && eal_clean_runtime_dir() < 0) {
1313 		rte_eal_init_alert("Cannot clear runtime directory");
1314 		return -1;
1315 	}
1316 	if (!internal_conf->no_telemetry) {
1317 		const char *error_str = NULL;
1318 		if (rte_telemetry_init(rte_eal_get_runtime_dir(),
1319 				&internal_conf->ctrl_cpuset, &error_str)
1320 				!= 0) {
1321 			rte_eal_init_alert(error_str);
1322 			return -1;
1323 		}
1324 		if (error_str != NULL)
1325 			RTE_LOG(NOTICE, EAL, "%s\n", error_str);
1326 	}
1327 
1328 	eal_mcfg_complete();
1329 
1330 	return fctret;
1331 }
1332 
1333 static int
mark_freeable(const struct rte_memseg_list * msl,const struct rte_memseg * ms,void * arg __rte_unused)1334 mark_freeable(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1335 		void *arg __rte_unused)
1336 {
1337 	/* ms is const, so find this memseg */
1338 	struct rte_memseg *found;
1339 
1340 	if (msl->external)
1341 		return 0;
1342 
1343 	found = rte_mem_virt2memseg(ms->addr, msl);
1344 
1345 	found->flags &= ~RTE_MEMSEG_FLAG_DO_NOT_FREE;
1346 
1347 	return 0;
1348 }
1349 
1350 int
rte_eal_cleanup(void)1351 rte_eal_cleanup(void)
1352 {
1353 	/* if we're in a primary process, we need to mark hugepages as freeable
1354 	 * so that finalization can release them back to the system.
1355 	 */
1356 	struct internal_config *internal_conf =
1357 		eal_get_internal_configuration();
1358 
1359 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1360 		rte_memseg_walk(mark_freeable, NULL);
1361 	rte_service_finalize();
1362 	rte_mp_channel_cleanup();
1363 	rte_trace_save();
1364 	eal_trace_fini();
1365 	eal_cleanup_config(internal_conf);
1366 	return 0;
1367 }
1368 
rte_eal_create_uio_dev(void)1369 int rte_eal_create_uio_dev(void)
1370 {
1371 	const struct internal_config *internal_conf =
1372 		eal_get_internal_configuration();
1373 
1374 	return internal_conf->create_uio_dev;
1375 }
1376 
1377 enum rte_intr_mode
rte_eal_vfio_intr_mode(void)1378 rte_eal_vfio_intr_mode(void)
1379 {
1380 	const struct internal_config *internal_conf =
1381 		eal_get_internal_configuration();
1382 
1383 	return internal_conf->vfio_intr_mode;
1384 }
1385 
1386 void
rte_eal_vfio_get_vf_token(rte_uuid_t vf_token)1387 rte_eal_vfio_get_vf_token(rte_uuid_t vf_token)
1388 {
1389 	struct internal_config *cfg = eal_get_internal_configuration();
1390 
1391 	rte_uuid_copy(vf_token, cfg->vfio_vf_token);
1392 }
1393 
1394 int
rte_eal_check_module(const char * module_name)1395 rte_eal_check_module(const char *module_name)
1396 {
1397 	char sysfs_mod_name[PATH_MAX];
1398 	struct stat st;
1399 	int n;
1400 
1401 	if (NULL == module_name)
1402 		return -1;
1403 
1404 	/* Check if there is sysfs mounted */
1405 	if (stat("/sys/module", &st) != 0) {
1406 		RTE_LOG(DEBUG, EAL, "sysfs is not mounted! error %i (%s)\n",
1407 			errno, strerror(errno));
1408 		return -1;
1409 	}
1410 
1411 	/* A module might be built-in, therefore try sysfs */
1412 	n = snprintf(sysfs_mod_name, PATH_MAX, "/sys/module/%s", module_name);
1413 	if (n < 0 || n > PATH_MAX) {
1414 		RTE_LOG(DEBUG, EAL, "Could not format module path\n");
1415 		return -1;
1416 	}
1417 
1418 	if (stat(sysfs_mod_name, &st) != 0) {
1419 		RTE_LOG(DEBUG, EAL, "Module %s not found! error %i (%s)\n",
1420 		        sysfs_mod_name, errno, strerror(errno));
1421 		return 0;
1422 	}
1423 
1424 	/* Module has been found */
1425 	return 1;
1426 }
1427