xref: /dpdk/lib/eal/windows/eal.c (revision 8001c0dd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #include <stdarg.h>
6 
7 #include <fcntl.h>
8 #include <io.h>
9 #include <share.h>
10 #include <sys/stat.h>
11 
12 #include <rte_debug.h>
13 #include <rte_eal.h>
14 #include <eal_memcfg.h>
15 #include <rte_errno.h>
16 #include <rte_lcore.h>
17 #include <eal_thread.h>
18 #include <eal_internal_cfg.h>
19 #include <eal_filesystem.h>
20 #include <eal_options.h>
21 #include <eal_private.h>
22 #include <rte_service_component.h>
23 #include <rte_vfio.h>
24 
25 #include "eal_firmware.h"
26 #include "eal_hugepages.h"
27 #include "eal_trace.h"
28 #include "eal_log.h"
29 #include "eal_windows.h"
30 
31 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
32 
33 /* define fd variable here, because file needs to be kept open for the
34  * duration of the program, as we hold a write lock on it in the primary proc
35  */
36 static int mem_cfg_fd = -1;
37 
38 /* internal configuration (per-core) */
39 struct lcore_config lcore_config[RTE_MAX_LCORE];
40 
41 /* Detect if we are a primary or a secondary process */
42 enum rte_proc_type_t
eal_proc_type_detect(void)43 eal_proc_type_detect(void)
44 {
45 	enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
46 	const char *pathname = eal_runtime_config_path();
47 	const struct rte_config *config = rte_eal_get_configuration();
48 
49 	/* if we can open the file but not get a write-lock we are a secondary
50 	 * process. NOTE: if we get a file handle back, we keep that open
51 	 * and don't close it to prevent a race condition between multiple opens
52 	 */
53 	errno_t err = _sopen_s(&mem_cfg_fd, pathname,
54 		_O_RDWR, _SH_DENYNO, _S_IREAD | _S_IWRITE);
55 	if (err == 0) {
56 		OVERLAPPED soverlapped = { 0 };
57 		soverlapped.Offset = sizeof(*config->mem_config);
58 		soverlapped.OffsetHigh = 0;
59 
60 		HANDLE hwinfilehandle = (HANDLE)_get_osfhandle(mem_cfg_fd);
61 
62 		if (!LockFileEx(hwinfilehandle,
63 			LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
64 			sizeof(*config->mem_config), 0, &soverlapped))
65 			ptype = RTE_PROC_SECONDARY;
66 	}
67 
68 	RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
69 		ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
70 
71 	return ptype;
72 }
73 
74 bool
rte_mp_disable(void)75 rte_mp_disable(void)
76 {
77 	return true;
78 }
79 
80 /* display usage */
81 static void
eal_usage(const char * prgname)82 eal_usage(const char *prgname)
83 {
84 	rte_usage_hook_t hook = eal_get_application_usage_hook();
85 
86 	printf("\nUsage: %s ", prgname);
87 	eal_common_usage();
88 	/* Allow the application to print its usage message too
89 	 * if hook is set
90 	 */
91 	if (hook) {
92 		printf("===== Application Usage =====\n\n");
93 		(hook)(prgname);
94 	}
95 }
96 
97 /* Parse the arguments for --log-level only */
98 static void
eal_log_level_parse(int argc,char ** argv)99 eal_log_level_parse(int argc, char **argv)
100 {
101 	int opt;
102 	char **argvopt;
103 	int option_index;
104 	struct internal_config *internal_conf =
105 		eal_get_internal_configuration();
106 
107 	argvopt = argv;
108 
109 	eal_reset_internal_config(internal_conf);
110 
111 	while ((opt = getopt_long(argc, argvopt, eal_short_options,
112 		eal_long_options, &option_index)) != EOF) {
113 
114 		int ret;
115 
116 		/* getopt is not happy, stop right now */
117 		if (opt == '?')
118 			break;
119 
120 		ret = (opt == OPT_LOG_LEVEL_NUM) ?
121 			eal_parse_common_option(opt, optarg,
122 				internal_conf) : 0;
123 
124 		/* common parser is not happy */
125 		if (ret < 0)
126 			break;
127 	}
128 
129 	optind = 0; /* reset getopt lib */
130 }
131 
132 /* Parse the argument given in the command line of the application */
133 static int
eal_parse_args(int argc,char ** argv)134 eal_parse_args(int argc, char **argv)
135 {
136 	int opt, ret;
137 	char **argvopt;
138 	int option_index;
139 	char *prgname = argv[0];
140 	struct internal_config *internal_conf =
141 		eal_get_internal_configuration();
142 
143 	argvopt = argv;
144 
145 	while ((opt = getopt_long(argc, argvopt, eal_short_options,
146 		eal_long_options, &option_index)) != EOF) {
147 
148 		int ret;
149 
150 		/* getopt is not happy, stop right now */
151 		if (opt == '?') {
152 			eal_usage(prgname);
153 			return -1;
154 		}
155 
156 		/* eal_log_level_parse() already handled this option */
157 		if (opt == OPT_LOG_LEVEL_NUM)
158 			continue;
159 
160 		ret = eal_parse_common_option(opt, optarg, internal_conf);
161 		/* common parser is not happy */
162 		if (ret < 0) {
163 			eal_usage(prgname);
164 			return -1;
165 		}
166 		/* common parser handled this option */
167 		if (ret == 0)
168 			continue;
169 
170 		switch (opt) {
171 		case 'h':
172 			eal_usage(prgname);
173 			exit(EXIT_SUCCESS);
174 		default:
175 			if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
176 				RTE_LOG(ERR, EAL, "Option %c is not supported "
177 					"on Windows\n", opt);
178 			} else if (opt >= OPT_LONG_MIN_NUM &&
179 				opt < OPT_LONG_MAX_NUM) {
180 				RTE_LOG(ERR, EAL, "Option %s is not supported "
181 					"on Windows\n",
182 					eal_long_options[option_index].name);
183 			} else {
184 				RTE_LOG(ERR, EAL, "Option %d is not supported "
185 					"on Windows\n", opt);
186 			}
187 			eal_usage(prgname);
188 			return -1;
189 		}
190 	}
191 
192 	if (eal_adjust_config(internal_conf) != 0)
193 		return -1;
194 
195 	/* sanity checks */
196 	if (eal_check_common_options(internal_conf) != 0) {
197 		eal_usage(prgname);
198 		return -1;
199 	}
200 
201 	if (optind >= 0)
202 		argv[optind - 1] = prgname;
203 	ret = optind - 1;
204 	optind = 0; /* reset getopt lib */
205 	return ret;
206 }
207 
208 static int
sync_func(void * arg __rte_unused)209 sync_func(void *arg __rte_unused)
210 {
211 	return 0;
212 }
213 
214 static void
rte_eal_init_alert(const char * msg)215 rte_eal_init_alert(const char *msg)
216 {
217 	fprintf(stderr, "EAL: FATAL: %s\n", msg);
218 	RTE_LOG(ERR, EAL, "%s\n", msg);
219 }
220 
221 /* Stubs to enable EAL trace point compilation
222  * until eal_common_trace.c can be compiled.
223  */
224 
225 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
226 RTE_DEFINE_PER_LCORE(void *, trace_mem);
227 
228 void
__rte_trace_mem_per_thread_alloc(void)229 __rte_trace_mem_per_thread_alloc(void)
230 {
231 }
232 
233 void
trace_mem_per_thread_free(void)234 trace_mem_per_thread_free(void)
235 {
236 }
237 
238 void
__rte_trace_point_emit_field(size_t sz,const char * field,const char * type)239 __rte_trace_point_emit_field(size_t sz, const char *field,
240 	const char *type)
241 {
242 	RTE_SET_USED(sz);
243 	RTE_SET_USED(field);
244 	RTE_SET_USED(type);
245 }
246 
247 int
__rte_trace_point_register(rte_trace_point_t * trace,const char * name,void (* register_fn)(void))248 __rte_trace_point_register(rte_trace_point_t *trace, const char *name,
249 	void (*register_fn)(void))
250 {
251 	RTE_SET_USED(trace);
252 	RTE_SET_USED(name);
253 	RTE_SET_USED(register_fn);
254 	return -ENOTSUP;
255 }
256 
257 int
rte_eal_cleanup(void)258 rte_eal_cleanup(void)
259 {
260 	struct internal_config *internal_conf =
261 		eal_get_internal_configuration();
262 
263 	eal_intr_thread_cancel();
264 	eal_mem_virt2iova_cleanup();
265 	/* after this point, any DPDK pointers will become dangling */
266 	rte_eal_memory_detach();
267 	eal_cleanup_config(internal_conf);
268 	return 0;
269 }
270 
271 /* Launch threads, called at application init(). */
272 int
rte_eal_init(int argc,char ** argv)273 rte_eal_init(int argc, char **argv)
274 {
275 	int i, fctret, bscan;
276 	const struct rte_config *config = rte_eal_get_configuration();
277 	struct internal_config *internal_conf =
278 		eal_get_internal_configuration();
279 	bool has_phys_addr;
280 	enum rte_iova_mode iova_mode;
281 	int ret;
282 	char cpuset[RTE_CPU_AFFINITY_STR_LEN];
283 
284 	eal_log_init(NULL, 0);
285 
286 	eal_log_level_parse(argc, argv);
287 
288 	if (eal_create_cpu_map() < 0) {
289 		rte_eal_init_alert("Cannot discover CPU and NUMA.");
290 		/* rte_errno is set */
291 		return -1;
292 	}
293 
294 	if (rte_eal_cpu_init() < 0) {
295 		rte_eal_init_alert("Cannot detect lcores.");
296 		rte_errno = ENOTSUP;
297 		return -1;
298 	}
299 
300 	fctret = eal_parse_args(argc, argv);
301 	if (fctret < 0)
302 		exit(1);
303 
304 	if (eal_option_device_parse()) {
305 		rte_errno = ENODEV;
306 		return -1;
307 	}
308 
309 	/* Prevent creation of shared memory files. */
310 	if (internal_conf->in_memory == 0) {
311 		RTE_LOG(WARNING, EAL, "Multi-process support is requested, "
312 			"but not available.\n");
313 		internal_conf->in_memory = 1;
314 		internal_conf->no_shconf = 1;
315 	}
316 
317 	if (!internal_conf->no_hugetlbfs && (eal_hugepage_info_init() < 0)) {
318 		rte_eal_init_alert("Cannot get hugepage information");
319 		rte_errno = EACCES;
320 		return -1;
321 	}
322 
323 	if (internal_conf->memory == 0 && !internal_conf->force_sockets) {
324 		if (internal_conf->no_hugetlbfs)
325 			internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
326 	}
327 
328 	if (rte_eal_intr_init() < 0) {
329 		rte_eal_init_alert("Cannot init interrupt-handling thread");
330 		return -1;
331 	}
332 
333 	if (rte_eal_timer_init() < 0) {
334 		rte_eal_init_alert("Cannot init TSC timer");
335 		rte_errno = EFAULT;
336 		return -1;
337 	}
338 
339 	bscan = rte_bus_scan();
340 	if (bscan < 0) {
341 		rte_eal_init_alert("Cannot scan the buses");
342 		rte_errno = ENODEV;
343 		return -1;
344 	}
345 
346 	if (eal_mem_win32api_init() < 0) {
347 		rte_eal_init_alert("Cannot access Win32 memory management");
348 		rte_errno = ENOTSUP;
349 		return -1;
350 	}
351 
352 	has_phys_addr = true;
353 	if (eal_mem_virt2iova_init() < 0) {
354 		/* Non-fatal error if physical addresses are not required. */
355 		RTE_LOG(DEBUG, EAL, "Cannot access virt2phys driver, "
356 			"PA will not be available\n");
357 		has_phys_addr = false;
358 	}
359 
360 	iova_mode = internal_conf->iova_mode;
361 	if (iova_mode == RTE_IOVA_PA && !has_phys_addr) {
362 		rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available");
363 		rte_errno = EINVAL;
364 		return -1;
365 	}
366 	if (iova_mode == RTE_IOVA_DC) {
367 		RTE_LOG(DEBUG, EAL, "Specific IOVA mode is not requested, autodetecting\n");
368 		if (has_phys_addr) {
369 			RTE_LOG(DEBUG, EAL, "Selecting IOVA mode according to bus requests\n");
370 			iova_mode = rte_bus_get_iommu_class();
371 			if (iova_mode == RTE_IOVA_DC)
372 				iova_mode = RTE_IOVA_PA;
373 		} else {
374 			iova_mode = RTE_IOVA_VA;
375 		}
376 	}
377 	RTE_LOG(DEBUG, EAL, "Selected IOVA mode '%s'\n",
378 		iova_mode == RTE_IOVA_PA ? "PA" : "VA");
379 	rte_eal_get_configuration()->iova_mode = iova_mode;
380 
381 	if (rte_eal_memzone_init() < 0) {
382 		rte_eal_init_alert("Cannot init memzone");
383 		rte_errno = ENODEV;
384 		return -1;
385 	}
386 
387 	if (rte_eal_memory_init() < 0) {
388 		rte_eal_init_alert("Cannot init memory");
389 		rte_errno = ENOMEM;
390 		return -1;
391 	}
392 
393 	if (rte_eal_malloc_heap_init() < 0) {
394 		rte_eal_init_alert("Cannot init malloc heap");
395 		rte_errno = ENODEV;
396 		return -1;
397 	}
398 
399 	if (rte_eal_tailqs_init() < 0) {
400 		rte_eal_init_alert("Cannot init tail queues for objects");
401 		rte_errno = EFAULT;
402 		return -1;
403 	}
404 
405 	if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t),
406 			&lcore_config[config->main_lcore].cpuset) != 0) {
407 		rte_eal_init_alert("Cannot set affinity");
408 		rte_errno = EINVAL;
409 		return -1;
410 	}
411 	__rte_thread_init(config->main_lcore,
412 		&lcore_config[config->main_lcore].cpuset);
413 
414 	ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
415 	RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
416 		config->main_lcore, (uintptr_t)pthread_self(), cpuset,
417 		ret == 0 ? "" : "...");
418 
419 	RTE_LCORE_FOREACH_WORKER(i) {
420 
421 		/*
422 		 * create communication pipes between main thread
423 		 * and children
424 		 */
425 		if (_pipe(lcore_config[i].pipe_main2worker,
426 			sizeof(char), _O_BINARY) < 0)
427 			rte_panic("Cannot create pipe\n");
428 		if (_pipe(lcore_config[i].pipe_worker2main,
429 			sizeof(char), _O_BINARY) < 0)
430 			rte_panic("Cannot create pipe\n");
431 
432 		lcore_config[i].state = WAIT;
433 
434 		/* create a thread for each lcore */
435 		if (eal_thread_create(&lcore_config[i].thread_id, i) != 0)
436 			rte_panic("Cannot create thread\n");
437 		ret = pthread_setaffinity_np(lcore_config[i].thread_id,
438 			sizeof(rte_cpuset_t), &lcore_config[i].cpuset);
439 		if (ret != 0)
440 			RTE_LOG(DEBUG, EAL, "Cannot set affinity\n");
441 	}
442 
443 	/* Initialize services so drivers can register services during probe. */
444 	ret = rte_service_init();
445 	if (ret) {
446 		rte_eal_init_alert("rte_service_init() failed");
447 		rte_errno = -ret;
448 		return -1;
449 	}
450 
451 	if (rte_bus_probe()) {
452 		rte_eal_init_alert("Cannot probe devices");
453 		rte_errno = ENOTSUP;
454 		return -1;
455 	}
456 
457 	/*
458 	 * Launch a dummy function on all worker lcores, so that main lcore
459 	 * knows they are all ready when this function returns.
460 	 */
461 	rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
462 	rte_eal_mp_wait_lcore();
463 	return fctret;
464 }
465 
466 /* Don't use MinGW asprintf() to have identical code with all toolchains. */
467 int
eal_asprintf(char ** buffer,const char * format,...)468 eal_asprintf(char **buffer, const char *format, ...)
469 {
470 	int size, ret;
471 	va_list arg;
472 
473 	va_start(arg, format);
474 	size = vsnprintf(NULL, 0, format, arg);
475 	va_end(arg);
476 	if (size < 0)
477 		return -1;
478 	size++;
479 
480 	*buffer = malloc(size);
481 	if (*buffer == NULL)
482 		return -1;
483 
484 	va_start(arg, format);
485 	ret = vsnprintf(*buffer, size, format, arg);
486 	va_end(arg);
487 	if (ret != size - 1) {
488 		free(*buffer);
489 		return -1;
490 	}
491 	return ret;
492 }
493 
494 int
rte_vfio_container_dma_map(__rte_unused int container_fd,__rte_unused uint64_t vaddr,__rte_unused uint64_t iova,__rte_unused uint64_t len)495 rte_vfio_container_dma_map(__rte_unused int container_fd,
496 			__rte_unused uint64_t vaddr,
497 			__rte_unused uint64_t iova,
498 			__rte_unused uint64_t len)
499 {
500 	rte_errno = ENOTSUP;
501 	return -1;
502 }
503 
504 int
rte_vfio_container_dma_unmap(__rte_unused int container_fd,__rte_unused uint64_t vaddr,__rte_unused uint64_t iova,__rte_unused uint64_t len)505 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
506 			__rte_unused uint64_t vaddr,
507 			__rte_unused uint64_t iova,
508 			__rte_unused uint64_t len)
509 {
510 	rte_errno = ENOTSUP;
511 	return -1;
512 }
513 
514 int
rte_firmware_read(__rte_unused const char * name,__rte_unused void ** buf,__rte_unused size_t * bufsz)515 rte_firmware_read(__rte_unused const char *name,
516 			__rte_unused void **buf,
517 			__rte_unused size_t *bufsz)
518 {
519 	return -1;
520 }
521