1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 7 #include <fcntl.h> 8 #include <io.h> 9 #include <share.h> 10 #include <sys/stat.h> 11 12 #include <rte_debug.h> 13 #include <rte_eal.h> 14 #include <eal_memcfg.h> 15 #include <rte_errno.h> 16 #include <rte_lcore.h> 17 #include <eal_thread.h> 18 #include <eal_internal_cfg.h> 19 #include <eal_filesystem.h> 20 #include <eal_options.h> 21 #include <eal_private.h> 22 #include <rte_service_component.h> 23 #include <rte_vfio.h> 24 25 #include "eal_firmware.h" 26 #include "eal_hugepages.h" 27 #include "eal_trace.h" 28 #include "eal_log.h" 29 #include "eal_windows.h" 30 31 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) 32 33 /* define fd variable here, because file needs to be kept open for the 34 * duration of the program, as we hold a write lock on it in the primary proc 35 */ 36 static int mem_cfg_fd = -1; 37 38 /* internal configuration (per-core) */ 39 struct lcore_config lcore_config[RTE_MAX_LCORE]; 40 41 /* Detect if we are a primary or a secondary process */ 42 enum rte_proc_type_t 43 eal_proc_type_detect(void) 44 { 45 enum rte_proc_type_t ptype = RTE_PROC_PRIMARY; 46 const char *pathname = eal_runtime_config_path(); 47 const struct rte_config *config = rte_eal_get_configuration(); 48 49 /* if we can open the file but not get a write-lock we are a secondary 50 * process. NOTE: if we get a file handle back, we keep that open 51 * and don't close it to prevent a race condition between multiple opens 52 */ 53 errno_t err = _sopen_s(&mem_cfg_fd, pathname, 54 _O_RDWR, _SH_DENYNO, _S_IREAD | _S_IWRITE); 55 if (err == 0) { 56 OVERLAPPED soverlapped = { 0 }; 57 soverlapped.Offset = sizeof(*config->mem_config); 58 soverlapped.OffsetHigh = 0; 59 60 HANDLE hwinfilehandle = (HANDLE)_get_osfhandle(mem_cfg_fd); 61 62 if (!LockFileEx(hwinfilehandle, 63 LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, 64 sizeof(*config->mem_config), 0, &soverlapped)) 65 ptype = RTE_PROC_SECONDARY; 66 } 67 68 RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n", 69 ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY"); 70 71 return ptype; 72 } 73 74 bool 75 rte_mp_disable(void) 76 { 77 return true; 78 } 79 80 /* display usage */ 81 static void 82 eal_usage(const char *prgname) 83 { 84 rte_usage_hook_t hook = eal_get_application_usage_hook(); 85 86 printf("\nUsage: %s ", prgname); 87 eal_common_usage(); 88 /* Allow the application to print its usage message too 89 * if hook is set 90 */ 91 if (hook) { 92 printf("===== Application Usage =====\n\n"); 93 (hook)(prgname); 94 } 95 } 96 97 /* Parse the arguments for --log-level only */ 98 static void 99 eal_log_level_parse(int argc, char **argv) 100 { 101 int opt; 102 char **argvopt; 103 int option_index; 104 struct internal_config *internal_conf = 105 eal_get_internal_configuration(); 106 107 argvopt = argv; 108 109 eal_reset_internal_config(internal_conf); 110 111 while ((opt = getopt_long(argc, argvopt, eal_short_options, 112 eal_long_options, &option_index)) != EOF) { 113 114 int ret; 115 116 /* getopt is not happy, stop right now */ 117 if (opt == '?') 118 break; 119 120 ret = (opt == OPT_LOG_LEVEL_NUM) ? 121 eal_parse_common_option(opt, optarg, 122 internal_conf) : 0; 123 124 /* common parser is not happy */ 125 if (ret < 0) 126 break; 127 } 128 129 optind = 0; /* reset getopt lib */ 130 } 131 132 /* Parse the argument given in the command line of the application */ 133 static int 134 eal_parse_args(int argc, char **argv) 135 { 136 int opt, ret; 137 char **argvopt; 138 int option_index; 139 char *prgname = argv[0]; 140 struct internal_config *internal_conf = 141 eal_get_internal_configuration(); 142 143 argvopt = argv; 144 145 while ((opt = getopt_long(argc, argvopt, eal_short_options, 146 eal_long_options, &option_index)) != EOF) { 147 148 int ret; 149 150 /* getopt is not happy, stop right now */ 151 if (opt == '?') { 152 eal_usage(prgname); 153 return -1; 154 } 155 156 /* eal_log_level_parse() already handled this option */ 157 if (opt == OPT_LOG_LEVEL_NUM) 158 continue; 159 160 ret = eal_parse_common_option(opt, optarg, internal_conf); 161 /* common parser is not happy */ 162 if (ret < 0) { 163 eal_usage(prgname); 164 return -1; 165 } 166 /* common parser handled this option */ 167 if (ret == 0) 168 continue; 169 170 switch (opt) { 171 case 'h': 172 eal_usage(prgname); 173 exit(EXIT_SUCCESS); 174 default: 175 if (opt < OPT_LONG_MIN_NUM && isprint(opt)) { 176 RTE_LOG(ERR, EAL, "Option %c is not supported " 177 "on Windows\n", opt); 178 } else if (opt >= OPT_LONG_MIN_NUM && 179 opt < OPT_LONG_MAX_NUM) { 180 RTE_LOG(ERR, EAL, "Option %s is not supported " 181 "on Windows\n", 182 eal_long_options[option_index].name); 183 } else { 184 RTE_LOG(ERR, EAL, "Option %d is not supported " 185 "on Windows\n", opt); 186 } 187 eal_usage(prgname); 188 return -1; 189 } 190 } 191 192 if (eal_adjust_config(internal_conf) != 0) 193 return -1; 194 195 /* sanity checks */ 196 if (eal_check_common_options(internal_conf) != 0) { 197 eal_usage(prgname); 198 return -1; 199 } 200 201 if (optind >= 0) 202 argv[optind - 1] = prgname; 203 ret = optind - 1; 204 optind = 0; /* reset getopt lib */ 205 return ret; 206 } 207 208 static int 209 sync_func(void *arg __rte_unused) 210 { 211 return 0; 212 } 213 214 static void 215 rte_eal_init_alert(const char *msg) 216 { 217 fprintf(stderr, "EAL: FATAL: %s\n", msg); 218 RTE_LOG(ERR, EAL, "%s\n", msg); 219 } 220 221 /* Stubs to enable EAL trace point compilation 222 * until eal_common_trace.c can be compiled. 223 */ 224 225 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz); 226 RTE_DEFINE_PER_LCORE(void *, trace_mem); 227 228 void 229 __rte_trace_mem_per_thread_alloc(void) 230 { 231 } 232 233 void 234 trace_mem_per_thread_free(void) 235 { 236 } 237 238 void 239 __rte_trace_point_emit_field(size_t sz, const char *field, 240 const char *type) 241 { 242 RTE_SET_USED(sz); 243 RTE_SET_USED(field); 244 RTE_SET_USED(type); 245 } 246 247 int 248 __rte_trace_point_register(rte_trace_point_t *trace, const char *name, 249 void (*register_fn)(void)) 250 { 251 RTE_SET_USED(trace); 252 RTE_SET_USED(name); 253 RTE_SET_USED(register_fn); 254 return -ENOTSUP; 255 } 256 257 int 258 rte_eal_cleanup(void) 259 { 260 struct internal_config *internal_conf = 261 eal_get_internal_configuration(); 262 263 eal_intr_thread_cancel(); 264 eal_mem_virt2iova_cleanup(); 265 /* after this point, any DPDK pointers will become dangling */ 266 rte_eal_memory_detach(); 267 eal_cleanup_config(internal_conf); 268 return 0; 269 } 270 271 /* Launch threads, called at application init(). */ 272 int 273 rte_eal_init(int argc, char **argv) 274 { 275 int i, fctret, bscan; 276 const struct rte_config *config = rte_eal_get_configuration(); 277 struct internal_config *internal_conf = 278 eal_get_internal_configuration(); 279 bool has_phys_addr; 280 enum rte_iova_mode iova_mode; 281 int ret; 282 283 eal_log_init(NULL, 0); 284 285 eal_log_level_parse(argc, argv); 286 287 if (eal_create_cpu_map() < 0) { 288 rte_eal_init_alert("Cannot discover CPU and NUMA."); 289 /* rte_errno is set */ 290 return -1; 291 } 292 293 if (rte_eal_cpu_init() < 0) { 294 rte_eal_init_alert("Cannot detect lcores."); 295 rte_errno = ENOTSUP; 296 return -1; 297 } 298 299 fctret = eal_parse_args(argc, argv); 300 if (fctret < 0) 301 exit(1); 302 303 if (eal_option_device_parse()) { 304 rte_errno = ENODEV; 305 return -1; 306 } 307 308 /* Prevent creation of shared memory files. */ 309 if (internal_conf->in_memory == 0) { 310 RTE_LOG(WARNING, EAL, "Multi-process support is requested, " 311 "but not available.\n"); 312 internal_conf->in_memory = 1; 313 internal_conf->no_shconf = 1; 314 } 315 316 if (!internal_conf->no_hugetlbfs && (eal_hugepage_info_init() < 0)) { 317 rte_eal_init_alert("Cannot get hugepage information"); 318 rte_errno = EACCES; 319 return -1; 320 } 321 322 if (internal_conf->memory == 0 && !internal_conf->force_sockets) { 323 if (internal_conf->no_hugetlbfs) 324 internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE; 325 } 326 327 if (rte_eal_intr_init() < 0) { 328 rte_eal_init_alert("Cannot init interrupt-handling thread"); 329 return -1; 330 } 331 332 if (rte_eal_timer_init() < 0) { 333 rte_eal_init_alert("Cannot init TSC timer"); 334 rte_errno = EFAULT; 335 return -1; 336 } 337 338 bscan = rte_bus_scan(); 339 if (bscan < 0) { 340 rte_eal_init_alert("Cannot scan the buses"); 341 rte_errno = ENODEV; 342 return -1; 343 } 344 345 if (eal_mem_win32api_init() < 0) { 346 rte_eal_init_alert("Cannot access Win32 memory management"); 347 rte_errno = ENOTSUP; 348 return -1; 349 } 350 351 has_phys_addr = true; 352 if (eal_mem_virt2iova_init() < 0) { 353 /* Non-fatal error if physical addresses are not required. */ 354 RTE_LOG(DEBUG, EAL, "Cannot access virt2phys driver, " 355 "PA will not be available\n"); 356 has_phys_addr = false; 357 } 358 359 iova_mode = internal_conf->iova_mode; 360 if (iova_mode == RTE_IOVA_PA && !has_phys_addr) { 361 rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available"); 362 rte_errno = EINVAL; 363 return -1; 364 } 365 if (iova_mode == RTE_IOVA_DC) { 366 RTE_LOG(DEBUG, EAL, "Specific IOVA mode is not requested, autodetecting\n"); 367 if (has_phys_addr) { 368 RTE_LOG(DEBUG, EAL, "Selecting IOVA mode according to bus requests\n"); 369 iova_mode = rte_bus_get_iommu_class(); 370 if (iova_mode == RTE_IOVA_DC) 371 iova_mode = RTE_IOVA_PA; 372 } else { 373 iova_mode = RTE_IOVA_VA; 374 } 375 } 376 RTE_LOG(DEBUG, EAL, "Selected IOVA mode '%s'\n", 377 iova_mode == RTE_IOVA_PA ? "PA" : "VA"); 378 rte_eal_get_configuration()->iova_mode = iova_mode; 379 380 if (rte_eal_memzone_init() < 0) { 381 rte_eal_init_alert("Cannot init memzone"); 382 rte_errno = ENODEV; 383 return -1; 384 } 385 386 if (rte_eal_memory_init() < 0) { 387 rte_eal_init_alert("Cannot init memory"); 388 rte_errno = ENOMEM; 389 return -1; 390 } 391 392 if (rte_eal_malloc_heap_init() < 0) { 393 rte_eal_init_alert("Cannot init malloc heap"); 394 rte_errno = ENODEV; 395 return -1; 396 } 397 398 if (rte_eal_tailqs_init() < 0) { 399 rte_eal_init_alert("Cannot init tail queues for objects"); 400 rte_errno = EFAULT; 401 return -1; 402 } 403 404 __rte_thread_init(config->main_lcore, 405 &lcore_config[config->main_lcore].cpuset); 406 407 RTE_LCORE_FOREACH_WORKER(i) { 408 409 /* 410 * create communication pipes between main thread 411 * and children 412 */ 413 if (_pipe(lcore_config[i].pipe_main2worker, 414 sizeof(char), _O_BINARY) < 0) 415 rte_panic("Cannot create pipe\n"); 416 if (_pipe(lcore_config[i].pipe_worker2main, 417 sizeof(char), _O_BINARY) < 0) 418 rte_panic("Cannot create pipe\n"); 419 420 lcore_config[i].state = WAIT; 421 422 /* create a thread for each lcore */ 423 if (eal_thread_create(&lcore_config[i].thread_id) != 0) 424 rte_panic("Cannot create thread\n"); 425 ret = pthread_setaffinity_np(lcore_config[i].thread_id, 426 sizeof(rte_cpuset_t), &lcore_config[i].cpuset); 427 if (ret != 0) 428 RTE_LOG(DEBUG, EAL, "Cannot set affinity\n"); 429 } 430 431 /* Initialize services so drivers can register services during probe. */ 432 ret = rte_service_init(); 433 if (ret) { 434 rte_eal_init_alert("rte_service_init() failed"); 435 rte_errno = -ret; 436 return -1; 437 } 438 439 if (rte_bus_probe()) { 440 rte_eal_init_alert("Cannot probe devices"); 441 rte_errno = ENOTSUP; 442 return -1; 443 } 444 445 /* 446 * Launch a dummy function on all worker lcores, so that main lcore 447 * knows they are all ready when this function returns. 448 */ 449 rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN); 450 rte_eal_mp_wait_lcore(); 451 return fctret; 452 } 453 454 /* Don't use MinGW asprintf() to have identical code with all toolchains. */ 455 int 456 eal_asprintf(char **buffer, const char *format, ...) 457 { 458 int size, ret; 459 va_list arg; 460 461 va_start(arg, format); 462 size = vsnprintf(NULL, 0, format, arg); 463 va_end(arg); 464 if (size < 0) 465 return -1; 466 size++; 467 468 *buffer = malloc(size); 469 if (*buffer == NULL) 470 return -1; 471 472 va_start(arg, format); 473 ret = vsnprintf(*buffer, size, format, arg); 474 va_end(arg); 475 if (ret != size - 1) { 476 free(*buffer); 477 return -1; 478 } 479 return ret; 480 } 481 482 int 483 rte_vfio_container_dma_map(__rte_unused int container_fd, 484 __rte_unused uint64_t vaddr, 485 __rte_unused uint64_t iova, 486 __rte_unused uint64_t len) 487 { 488 rte_errno = ENOTSUP; 489 return -1; 490 } 491 492 int 493 rte_vfio_container_dma_unmap(__rte_unused int container_fd, 494 __rte_unused uint64_t vaddr, 495 __rte_unused uint64_t iova, 496 __rte_unused uint64_t len) 497 { 498 rte_errno = ENOTSUP; 499 return -1; 500 } 501 502 int 503 rte_firmware_read(__rte_unused const char *name, 504 __rte_unused void **buf, 505 __rte_unused size_t *bufsz) 506 { 507 return -1; 508 } 509