1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Intel Corporation 3 */ 4 5 #include <stdarg.h> 6 7 #include <fcntl.h> 8 #include <io.h> 9 #include <share.h> 10 #include <sys/stat.h> 11 12 #include <rte_debug.h> 13 #include <rte_bus.h> 14 #include <rte_eal.h> 15 #include <rte_eal_memconfig.h> 16 #include <eal_memcfg.h> 17 #include <rte_errno.h> 18 #include <rte_lcore.h> 19 #include "eal_lcore_var.h" 20 #include <eal_thread.h> 21 #include <eal_internal_cfg.h> 22 #include <eal_filesystem.h> 23 #include <eal_options.h> 24 #include <eal_private.h> 25 #include <rte_service_component.h> 26 #include <rte_vfio.h> 27 28 #include "eal_firmware.h" 29 #include "eal_hugepages.h" 30 #include "eal_trace.h" 31 #include "eal_windows.h" 32 #include "log_internal.h" 33 34 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) 35 36 /* define fd variable here, because file needs to be kept open for the 37 * duration of the program, as we hold a write lock on it in the primary proc 38 */ 39 static int mem_cfg_fd = -1; 40 41 /* internal configuration (per-core) */ 42 struct lcore_config lcore_config[RTE_MAX_LCORE]; 43 44 /* Detect if we are a primary or a secondary process */ 45 enum rte_proc_type_t 46 eal_proc_type_detect(void) 47 { 48 enum rte_proc_type_t ptype = RTE_PROC_PRIMARY; 49 const char *pathname = eal_runtime_config_path(); 50 const struct rte_config *config = rte_eal_get_configuration(); 51 52 /* if we can open the file but not get a write-lock we are a secondary 53 * process. NOTE: if we get a file handle back, we keep that open 54 * and don't close it to prevent a race condition between multiple opens 55 */ 56 errno_t err = _sopen_s(&mem_cfg_fd, pathname, 57 _O_RDWR, _SH_DENYNO, _S_IREAD | _S_IWRITE); 58 if (err == 0) { 59 OVERLAPPED soverlapped = { 0 }; 60 soverlapped.Offset = sizeof(*config->mem_config); 61 soverlapped.OffsetHigh = 0; 62 63 HANDLE hwinfilehandle = (HANDLE)_get_osfhandle(mem_cfg_fd); 64 65 if (!LockFileEx(hwinfilehandle, 66 LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, 67 sizeof(*config->mem_config), 0, &soverlapped)) 68 ptype = RTE_PROC_SECONDARY; 69 } 70 71 EAL_LOG(INFO, "Auto-detected process type: %s", 72 ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY"); 73 74 return ptype; 75 } 76 77 bool 78 rte_mp_disable(void) 79 { 80 return true; 81 } 82 83 /* display usage */ 84 static void 85 eal_usage(const char *prgname) 86 { 87 rte_usage_hook_t hook = eal_get_application_usage_hook(); 88 89 printf("\nUsage: %s ", prgname); 90 eal_common_usage(); 91 /* Allow the application to print its usage message too 92 * if hook is set 93 */ 94 if (hook) { 95 printf("===== Application Usage =====\n\n"); 96 (hook)(prgname); 97 } 98 } 99 100 /* Parse the argument given in the command line of the application */ 101 static int 102 eal_parse_args(int argc, char **argv) 103 { 104 int opt, ret; 105 char **argvopt; 106 int option_index; 107 char *prgname = argv[0]; 108 struct internal_config *internal_conf = 109 eal_get_internal_configuration(); 110 111 argvopt = argv; 112 113 while ((opt = getopt_long(argc, argvopt, eal_short_options, 114 eal_long_options, &option_index)) != EOF) { 115 116 int ret; 117 118 /* getopt is not happy, stop right now */ 119 if (opt == '?') { 120 eal_usage(prgname); 121 return -1; 122 } 123 124 /* eal_parse_log_options() already handled this option */ 125 if (eal_option_is_log(opt)) 126 continue; 127 128 ret = eal_parse_common_option(opt, optarg, internal_conf); 129 /* common parser is not happy */ 130 if (ret < 0) { 131 eal_usage(prgname); 132 return -1; 133 } 134 /* common parser handled this option */ 135 if (ret == 0) 136 continue; 137 138 switch (opt) { 139 case OPT_HELP_NUM: 140 eal_usage(prgname); 141 exit(EXIT_SUCCESS); 142 default: 143 if (opt < OPT_LONG_MIN_NUM && isprint(opt)) { 144 EAL_LOG(ERR, "Option %c is not supported " 145 "on Windows", opt); 146 } else if (opt >= OPT_LONG_MIN_NUM && 147 opt < OPT_LONG_MAX_NUM) { 148 EAL_LOG(ERR, "Option %s is not supported " 149 "on Windows", 150 eal_long_options[option_index].name); 151 } else { 152 EAL_LOG(ERR, "Option %d is not supported " 153 "on Windows", opt); 154 } 155 eal_usage(prgname); 156 return -1; 157 } 158 } 159 160 if (eal_adjust_config(internal_conf) != 0) 161 return -1; 162 163 /* sanity checks */ 164 if (eal_check_common_options(internal_conf) != 0) { 165 eal_usage(prgname); 166 return -1; 167 } 168 169 if (optind >= 0) 170 argv[optind - 1] = prgname; 171 ret = optind - 1; 172 optind = 0; /* reset getopt lib */ 173 return ret; 174 } 175 176 static int 177 sync_func(void *arg __rte_unused) 178 { 179 return 0; 180 } 181 182 static void 183 rte_eal_init_alert(const char *msg) 184 { 185 EAL_LOG(ALERT, "%s", msg); 186 } 187 188 /* Stubs to enable EAL trace point compilation 189 * until eal_common_trace.c can be compiled. 190 */ 191 192 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz); 193 RTE_DEFINE_PER_LCORE(void *, trace_mem); 194 195 void 196 __rte_trace_mem_per_thread_alloc(void) 197 { 198 } 199 200 void 201 trace_mem_per_thread_free(void) 202 { 203 } 204 205 void 206 __rte_trace_point_emit_field(size_t sz, const char *field, 207 const char *type) 208 { 209 RTE_SET_USED(sz); 210 RTE_SET_USED(field); 211 RTE_SET_USED(type); 212 } 213 214 int 215 __rte_trace_point_register(rte_trace_point_t *trace, const char *name, 216 void (*register_fn)(void)) 217 { 218 RTE_SET_USED(trace); 219 RTE_SET_USED(name); 220 RTE_SET_USED(register_fn); 221 return -ENOTSUP; 222 } 223 224 int 225 rte_eal_cleanup(void) 226 { 227 struct internal_config *internal_conf = 228 eal_get_internal_configuration(); 229 230 eal_intr_thread_cancel(); 231 eal_mem_virt2iova_cleanup(); 232 eal_bus_cleanup(); 233 /* after this point, any DPDK pointers will become dangling */ 234 rte_eal_memory_detach(); 235 eal_cleanup_config(internal_conf); 236 eal_lcore_var_cleanup(); 237 return 0; 238 } 239 240 /* Launch threads, called at application init(). */ 241 int 242 rte_eal_init(int argc, char **argv) 243 { 244 int i, fctret, bscan; 245 const struct rte_config *config = rte_eal_get_configuration(); 246 struct internal_config *internal_conf = 247 eal_get_internal_configuration(); 248 bool has_phys_addr; 249 enum rte_iova_mode iova_mode; 250 int ret; 251 char cpuset[RTE_CPU_AFFINITY_STR_LEN]; 252 char thread_name[RTE_THREAD_NAME_SIZE]; 253 254 /* setup log as early as possible */ 255 if (eal_parse_log_options(argc, argv) < 0) { 256 rte_eal_init_alert("invalid log arguments."); 257 rte_errno = EINVAL; 258 return -1; 259 } 260 261 eal_log_init(NULL); 262 263 if (eal_create_cpu_map() < 0) { 264 rte_eal_init_alert("Cannot discover CPU and NUMA."); 265 /* rte_errno is set */ 266 return -1; 267 } 268 269 /* verify if DPDK supported on architecture MMU */ 270 if (!eal_mmu_supported()) { 271 rte_eal_init_alert("Unsupported MMU type."); 272 rte_errno = ENOTSUP; 273 return -1; 274 } 275 276 if (rte_eal_cpu_init() < 0) { 277 rte_eal_init_alert("Cannot detect lcores."); 278 rte_errno = ENOTSUP; 279 return -1; 280 } 281 282 fctret = eal_parse_args(argc, argv); 283 if (fctret < 0) 284 exit(1); 285 286 if (eal_option_device_parse()) { 287 rte_errno = ENODEV; 288 return -1; 289 } 290 291 /* Prevent creation of shared memory files. */ 292 if (internal_conf->in_memory == 0) { 293 EAL_LOG(WARNING, "Multi-process support is requested, " 294 "but not available."); 295 internal_conf->in_memory = 1; 296 internal_conf->no_shconf = 1; 297 } 298 299 if (!internal_conf->no_hugetlbfs && (eal_hugepage_info_init() < 0)) { 300 rte_eal_init_alert("Cannot get hugepage information"); 301 rte_errno = EACCES; 302 return -1; 303 } 304 305 if (internal_conf->memory == 0 && !internal_conf->force_sockets) { 306 if (internal_conf->no_hugetlbfs) 307 internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE; 308 } 309 310 if (rte_eal_intr_init() < 0) { 311 rte_eal_init_alert("Cannot init interrupt-handling thread"); 312 return -1; 313 } 314 315 if (rte_eal_timer_init() < 0) { 316 rte_eal_init_alert("Cannot init TSC timer"); 317 rte_errno = EFAULT; 318 return -1; 319 } 320 321 bscan = rte_bus_scan(); 322 if (bscan < 0) { 323 rte_eal_init_alert("Cannot scan the buses"); 324 rte_errno = ENODEV; 325 return -1; 326 } 327 328 if (eal_mem_win32api_init() < 0) { 329 rte_eal_init_alert("Cannot access Win32 memory management"); 330 rte_errno = ENOTSUP; 331 return -1; 332 } 333 334 has_phys_addr = true; 335 if (eal_mem_virt2iova_init() < 0) { 336 /* Non-fatal error if physical addresses are not required. */ 337 EAL_LOG(DEBUG, "Cannot access virt2phys driver, " 338 "PA will not be available"); 339 has_phys_addr = false; 340 } 341 342 iova_mode = internal_conf->iova_mode; 343 if (iova_mode == RTE_IOVA_DC) { 344 EAL_LOG(DEBUG, "Specific IOVA mode is not requested, autodetecting"); 345 if (has_phys_addr) { 346 EAL_LOG(DEBUG, "Selecting IOVA mode according to bus requests"); 347 iova_mode = rte_bus_get_iommu_class(); 348 if (iova_mode == RTE_IOVA_DC) { 349 if (!RTE_IOVA_IN_MBUF) { 350 iova_mode = RTE_IOVA_VA; 351 EAL_LOG(DEBUG, "IOVA as VA mode is forced by build option."); 352 } else { 353 iova_mode = RTE_IOVA_PA; 354 } 355 } 356 } else { 357 iova_mode = RTE_IOVA_VA; 358 } 359 } 360 361 if (iova_mode == RTE_IOVA_PA && !has_phys_addr) { 362 rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available"); 363 rte_errno = EINVAL; 364 return -1; 365 } 366 367 if (iova_mode == RTE_IOVA_PA && !RTE_IOVA_IN_MBUF) { 368 rte_eal_init_alert("Cannot use IOVA as 'PA' as it is disabled during build"); 369 rte_errno = EINVAL; 370 return -1; 371 } 372 373 EAL_LOG(DEBUG, "Selected IOVA mode '%s'", 374 iova_mode == RTE_IOVA_PA ? "PA" : "VA"); 375 rte_eal_get_configuration()->iova_mode = iova_mode; 376 377 if (rte_eal_memzone_init() < 0) { 378 rte_eal_init_alert("Cannot init memzone"); 379 rte_errno = ENODEV; 380 return -1; 381 } 382 383 rte_mcfg_mem_read_lock(); 384 385 if (rte_eal_memory_init() < 0) { 386 rte_mcfg_mem_read_unlock(); 387 rte_eal_init_alert("Cannot init memory"); 388 rte_errno = ENOMEM; 389 return -1; 390 } 391 392 if (rte_eal_malloc_heap_init() < 0) { 393 rte_mcfg_mem_read_unlock(); 394 rte_eal_init_alert("Cannot init malloc heap"); 395 rte_errno = ENODEV; 396 return -1; 397 } 398 399 rte_mcfg_mem_read_unlock(); 400 401 if (rte_eal_malloc_heap_populate() < 0) { 402 rte_eal_init_alert("Cannot init malloc heap"); 403 rte_errno = ENODEV; 404 return -1; 405 } 406 407 if (rte_eal_tailqs_init() < 0) { 408 rte_eal_init_alert("Cannot init tail queues for objects"); 409 rte_errno = EFAULT; 410 return -1; 411 } 412 413 if (rte_thread_set_affinity_by_id(rte_thread_self(), 414 &lcore_config[config->main_lcore].cpuset) != 0) { 415 rte_eal_init_alert("Cannot set affinity"); 416 rte_errno = EINVAL; 417 return -1; 418 } 419 __rte_thread_init(config->main_lcore, 420 &lcore_config[config->main_lcore].cpuset); 421 422 ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); 423 EAL_LOG(DEBUG, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])", 424 config->main_lcore, rte_thread_self().opaque_id, cpuset, 425 ret == 0 ? "" : "..."); 426 427 RTE_LCORE_FOREACH_WORKER(i) { 428 429 /* 430 * create communication pipes between main thread 431 * and children 432 */ 433 if (_pipe(lcore_config[i].pipe_main2worker, 434 sizeof(char), _O_BINARY) < 0) 435 rte_panic("Cannot create pipe\n"); 436 if (_pipe(lcore_config[i].pipe_worker2main, 437 sizeof(char), _O_BINARY) < 0) 438 rte_panic("Cannot create pipe\n"); 439 440 lcore_config[i].state = WAIT; 441 442 /* create a thread for each lcore */ 443 if (rte_thread_create(&lcore_config[i].thread_id, NULL, 444 eal_thread_loop, (void *)(uintptr_t)i) != 0) 445 rte_panic("Cannot create thread\n"); 446 447 /* Set thread name for aid in debugging. */ 448 snprintf(thread_name, sizeof(thread_name), 449 "dpdk-worker%d", i); 450 rte_thread_set_name(lcore_config[i].thread_id, thread_name); 451 452 ret = rte_thread_set_affinity_by_id(lcore_config[i].thread_id, 453 &lcore_config[i].cpuset); 454 if (ret != 0) 455 EAL_LOG(DEBUG, "Cannot set affinity"); 456 } 457 458 /* Initialize services so drivers can register services during probe. */ 459 ret = rte_service_init(); 460 if (ret) { 461 rte_eal_init_alert("rte_service_init() failed"); 462 rte_errno = -ret; 463 return -1; 464 } 465 466 if (rte_bus_probe()) { 467 rte_eal_init_alert("Cannot probe devices"); 468 rte_errno = ENOTSUP; 469 return -1; 470 } 471 472 /* 473 * Launch a dummy function on all worker lcores, so that main lcore 474 * knows they are all ready when this function returns. 475 */ 476 rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN); 477 rte_eal_mp_wait_lcore(); 478 479 eal_mcfg_complete(); 480 481 return fctret; 482 } 483 484 /* Don't use MinGW asprintf() to have identical code with all toolchains. */ 485 int 486 eal_asprintf(char **buffer, const char *format, ...) 487 { 488 int size, ret; 489 va_list arg; 490 491 va_start(arg, format); 492 size = vsnprintf(NULL, 0, format, arg); 493 va_end(arg); 494 if (size < 0) 495 return -1; 496 size++; 497 498 *buffer = malloc(size); 499 if (*buffer == NULL) 500 return -1; 501 502 va_start(arg, format); 503 ret = vsnprintf(*buffer, size, format, arg); 504 va_end(arg); 505 if (ret != size - 1) { 506 free(*buffer); 507 return -1; 508 } 509 return ret; 510 } 511 512 int 513 rte_vfio_container_dma_map(__rte_unused int container_fd, 514 __rte_unused uint64_t vaddr, 515 __rte_unused uint64_t iova, 516 __rte_unused uint64_t len) 517 { 518 rte_errno = ENOTSUP; 519 return -1; 520 } 521 522 int 523 rte_vfio_container_dma_unmap(__rte_unused int container_fd, 524 __rte_unused uint64_t vaddr, 525 __rte_unused uint64_t iova, 526 __rte_unused uint64_t len) 527 { 528 rte_errno = ENOTSUP; 529 return -1; 530 } 531 532 int 533 rte_firmware_read(__rte_unused const char *name, 534 __rte_unused void **buf, 535 __rte_unused size_t *bufsz) 536 { 537 return -1; 538 } 539