1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2018 Intel Corporation. 3 * Copyright(c) 2012-2014 6WIND S.A. 4 */ 5 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <stdarg.h> 11 #include <unistd.h> 12 #include <pthread.h> 13 #include <syslog.h> 14 #include <getopt.h> 15 #include <sys/file.h> 16 #include <dirent.h> 17 #include <fcntl.h> 18 #include <fnmatch.h> 19 #include <stddef.h> 20 #include <errno.h> 21 #include <limits.h> 22 #include <sys/mman.h> 23 #include <sys/queue.h> 24 #include <sys/stat.h> 25 #if defined(RTE_ARCH_X86) 26 #include <sys/io.h> 27 #endif 28 #include <linux/version.h> 29 30 #include <rte_compat.h> 31 #include <rte_common.h> 32 #include <rte_debug.h> 33 #include <rte_memory.h> 34 #include <rte_launch.h> 35 #include <rte_eal.h> 36 #include <rte_errno.h> 37 #include <rte_per_lcore.h> 38 #include <rte_lcore.h> 39 #include <rte_service_component.h> 40 #include <rte_log.h> 41 #include <rte_random.h> 42 #include <rte_cycles.h> 43 #include <rte_string_fns.h> 44 #include <rte_cpuflags.h> 45 #include <rte_interrupts.h> 46 #include <rte_bus.h> 47 #include <rte_dev.h> 48 #include <rte_devargs.h> 49 #include <rte_version.h> 50 #include <malloc_heap.h> 51 #include <rte_vfio.h> 52 53 #include <telemetry_internal.h> 54 #include "eal_private.h" 55 #include "eal_thread.h" 56 #include "eal_internal_cfg.h" 57 #include "eal_filesystem.h" 58 #include "eal_hugepages.h" 59 #include "eal_memcfg.h" 60 #include "eal_trace.h" 61 #include "eal_log.h" 62 #include "eal_options.h" 63 #include "eal_vfio.h" 64 #include "hotplug_mp.h" 65 66 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) 67 68 #define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10) 69 70 #define KERNEL_IOMMU_GROUPS_PATH "/sys/kernel/iommu_groups" 71 72 /* define fd variable here, because file needs to be kept open for the 73 * duration of the program, as we hold a write lock on it in the primary proc */ 74 static int mem_cfg_fd = -1; 75 76 static struct flock wr_lock = { 77 .l_type = F_WRLCK, 78 .l_whence = SEEK_SET, 79 .l_start = offsetof(struct rte_mem_config, memsegs), 80 .l_len = RTE_SIZEOF_FIELD(struct rte_mem_config, memsegs), 81 }; 82 83 /* internal configuration (per-core) */ 84 struct lcore_config lcore_config[RTE_MAX_LCORE]; 85 86 /* used by rte_rdtsc() */ 87 int rte_cycles_vmware_tsc_map; 88 89 static const char *default_runtime_dir = "/var/run"; 90 91 int 92 eal_create_runtime_dir(void) 93 { 94 const char *directory = default_runtime_dir; 95 const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR"); 96 const char *fallback = "/tmp"; 97 char run_dir[PATH_MAX]; 98 char tmp[PATH_MAX]; 99 int ret; 100 101 if (getuid() != 0) { 102 /* try XDG path first, fall back to /tmp */ 103 if (xdg_runtime_dir != NULL) 104 directory = xdg_runtime_dir; 105 else 106 directory = fallback; 107 } 108 /* create DPDK subdirectory under runtime dir */ 109 ret = snprintf(tmp, sizeof(tmp), "%s/dpdk", directory); 110 if (ret < 0 || ret == sizeof(tmp)) { 111 RTE_LOG(ERR, EAL, "Error creating DPDK runtime path name\n"); 112 return -1; 113 } 114 115 /* create prefix-specific subdirectory under DPDK runtime dir */ 116 ret = snprintf(run_dir, sizeof(run_dir), "%s/%s", 117 tmp, eal_get_hugefile_prefix()); 118 if (ret < 0 || ret == sizeof(run_dir)) { 119 RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n"); 120 return -1; 121 } 122 123 /* create the path if it doesn't exist. no "mkdir -p" here, so do it 124 * step by step. 125 */ 126 ret = mkdir(tmp, 0700); 127 if (ret < 0 && errno != EEXIST) { 128 RTE_LOG(ERR, EAL, "Error creating '%s': %s\n", 129 tmp, strerror(errno)); 130 return -1; 131 } 132 133 ret = mkdir(run_dir, 0700); 134 if (ret < 0 && errno != EEXIST) { 135 RTE_LOG(ERR, EAL, "Error creating '%s': %s\n", 136 run_dir, strerror(errno)); 137 return -1; 138 } 139 140 if (eal_set_runtime_dir(run_dir, sizeof(run_dir))) 141 return -1; 142 143 return 0; 144 } 145 146 int 147 eal_clean_runtime_dir(void) 148 { 149 const char *runtime_dir = rte_eal_get_runtime_dir(); 150 DIR *dir; 151 struct dirent *dirent; 152 int dir_fd, fd, lck_result; 153 static const char * const filters[] = { 154 "fbarray_*", 155 "mp_socket_*" 156 }; 157 158 /* open directory */ 159 dir = opendir(runtime_dir); 160 if (!dir) { 161 RTE_LOG(ERR, EAL, "Unable to open runtime directory %s\n", 162 runtime_dir); 163 goto error; 164 } 165 dir_fd = dirfd(dir); 166 167 /* lock the directory before doing anything, to avoid races */ 168 if (flock(dir_fd, LOCK_EX) < 0) { 169 RTE_LOG(ERR, EAL, "Unable to lock runtime directory %s\n", 170 runtime_dir); 171 goto error; 172 } 173 174 dirent = readdir(dir); 175 if (!dirent) { 176 RTE_LOG(ERR, EAL, "Unable to read runtime directory %s\n", 177 runtime_dir); 178 goto error; 179 } 180 181 while (dirent != NULL) { 182 unsigned int f_idx; 183 bool skip = true; 184 185 /* skip files that don't match the patterns */ 186 for (f_idx = 0; f_idx < RTE_DIM(filters); f_idx++) { 187 const char *filter = filters[f_idx]; 188 189 if (fnmatch(filter, dirent->d_name, 0) == 0) { 190 skip = false; 191 break; 192 } 193 } 194 if (skip) { 195 dirent = readdir(dir); 196 continue; 197 } 198 199 /* try and lock the file */ 200 fd = openat(dir_fd, dirent->d_name, O_RDONLY); 201 202 /* skip to next file */ 203 if (fd == -1) { 204 dirent = readdir(dir); 205 continue; 206 } 207 208 /* non-blocking lock */ 209 lck_result = flock(fd, LOCK_EX | LOCK_NB); 210 211 /* if lock succeeds, remove the file */ 212 if (lck_result != -1) 213 unlinkat(dir_fd, dirent->d_name, 0); 214 close(fd); 215 dirent = readdir(dir); 216 } 217 218 /* closedir closes dir_fd and drops the lock */ 219 closedir(dir); 220 return 0; 221 222 error: 223 if (dir) 224 closedir(dir); 225 226 RTE_LOG(ERR, EAL, "Error while clearing runtime dir: %s\n", 227 strerror(errno)); 228 229 return -1; 230 } 231 232 /* parse a sysfs (or other) file containing one integer value */ 233 int 234 eal_parse_sysfs_value(const char *filename, unsigned long *val) 235 { 236 FILE *f; 237 char buf[BUFSIZ]; 238 char *end = NULL; 239 240 if ((f = fopen(filename, "r")) == NULL) { 241 RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n", 242 __func__, filename); 243 return -1; 244 } 245 246 if (fgets(buf, sizeof(buf), f) == NULL) { 247 RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n", 248 __func__, filename); 249 fclose(f); 250 return -1; 251 } 252 *val = strtoul(buf, &end, 0); 253 if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) { 254 RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n", 255 __func__, filename); 256 fclose(f); 257 return -1; 258 } 259 fclose(f); 260 return 0; 261 } 262 263 264 /* create memory configuration in shared/mmap memory. Take out 265 * a write lock on the memsegs, so we can auto-detect primary/secondary. 266 * This means we never close the file while running (auto-close on exit). 267 * We also don't lock the whole file, so that in future we can use read-locks 268 * on other parts, e.g. memzones, to detect if there are running secondary 269 * processes. */ 270 static int 271 rte_eal_config_create(void) 272 { 273 struct rte_config *config = rte_eal_get_configuration(); 274 size_t page_sz = sysconf(_SC_PAGE_SIZE); 275 size_t cfg_len = sizeof(*config->mem_config); 276 size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz); 277 void *rte_mem_cfg_addr, *mapped_mem_cfg_addr; 278 int retval; 279 const struct internal_config *internal_conf = 280 eal_get_internal_configuration(); 281 282 const char *pathname = eal_runtime_config_path(); 283 284 if (internal_conf->no_shconf) 285 return 0; 286 287 /* map the config before hugepage address so that we don't waste a page */ 288 if (internal_conf->base_virtaddr != 0) 289 rte_mem_cfg_addr = (void *) 290 RTE_ALIGN_FLOOR(internal_conf->base_virtaddr - 291 sizeof(struct rte_mem_config), page_sz); 292 else 293 rte_mem_cfg_addr = NULL; 294 295 if (mem_cfg_fd < 0){ 296 mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0600); 297 if (mem_cfg_fd < 0) { 298 RTE_LOG(ERR, EAL, "Cannot open '%s' for rte_mem_config\n", 299 pathname); 300 return -1; 301 } 302 } 303 304 retval = ftruncate(mem_cfg_fd, cfg_len); 305 if (retval < 0){ 306 close(mem_cfg_fd); 307 mem_cfg_fd = -1; 308 RTE_LOG(ERR, EAL, "Cannot resize '%s' for rte_mem_config\n", 309 pathname); 310 return -1; 311 } 312 313 retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock); 314 if (retval < 0){ 315 close(mem_cfg_fd); 316 mem_cfg_fd = -1; 317 RTE_LOG(ERR, EAL, "Cannot create lock on '%s'. Is another primary " 318 "process running?\n", pathname); 319 return -1; 320 } 321 322 /* reserve space for config */ 323 rte_mem_cfg_addr = eal_get_virtual_area(rte_mem_cfg_addr, 324 &cfg_len_aligned, page_sz, 0, 0); 325 if (rte_mem_cfg_addr == NULL) { 326 RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config\n"); 327 close(mem_cfg_fd); 328 mem_cfg_fd = -1; 329 return -1; 330 } 331 332 /* remap the actual file into the space we've just reserved */ 333 mapped_mem_cfg_addr = mmap(rte_mem_cfg_addr, 334 cfg_len_aligned, PROT_READ | PROT_WRITE, 335 MAP_SHARED | MAP_FIXED, mem_cfg_fd, 0); 336 if (mapped_mem_cfg_addr == MAP_FAILED) { 337 munmap(rte_mem_cfg_addr, cfg_len); 338 close(mem_cfg_fd); 339 mem_cfg_fd = -1; 340 RTE_LOG(ERR, EAL, "Cannot remap memory for rte_config\n"); 341 return -1; 342 } 343 344 memcpy(rte_mem_cfg_addr, config->mem_config, sizeof(struct rte_mem_config)); 345 config->mem_config = rte_mem_cfg_addr; 346 347 /* store address of the config in the config itself so that secondary 348 * processes could later map the config into this exact location 349 */ 350 config->mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr; 351 config->mem_config->dma_maskbits = 0; 352 353 return 0; 354 } 355 356 /* attach to an existing shared memory config */ 357 static int 358 rte_eal_config_attach(void) 359 { 360 struct rte_config *config = rte_eal_get_configuration(); 361 struct rte_mem_config *mem_config; 362 const struct internal_config *internal_conf = 363 eal_get_internal_configuration(); 364 365 const char *pathname = eal_runtime_config_path(); 366 367 if (internal_conf->no_shconf) 368 return 0; 369 370 if (mem_cfg_fd < 0){ 371 mem_cfg_fd = open(pathname, O_RDWR); 372 if (mem_cfg_fd < 0) { 373 RTE_LOG(ERR, EAL, "Cannot open '%s' for rte_mem_config\n", 374 pathname); 375 return -1; 376 } 377 } 378 379 /* map it as read-only first */ 380 mem_config = (struct rte_mem_config *) mmap(NULL, sizeof(*mem_config), 381 PROT_READ, MAP_SHARED, mem_cfg_fd, 0); 382 if (mem_config == MAP_FAILED) { 383 close(mem_cfg_fd); 384 mem_cfg_fd = -1; 385 RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config! error %i (%s)\n", 386 errno, strerror(errno)); 387 return -1; 388 } 389 390 config->mem_config = mem_config; 391 392 return 0; 393 } 394 395 /* reattach the shared config at exact memory location primary process has it */ 396 static int 397 rte_eal_config_reattach(void) 398 { 399 struct rte_config *config = rte_eal_get_configuration(); 400 struct rte_mem_config *mem_config; 401 void *rte_mem_cfg_addr; 402 const struct internal_config *internal_conf = 403 eal_get_internal_configuration(); 404 405 if (internal_conf->no_shconf) 406 return 0; 407 408 /* save the address primary process has mapped shared config to */ 409 rte_mem_cfg_addr = 410 (void *) (uintptr_t) config->mem_config->mem_cfg_addr; 411 412 /* unmap original config */ 413 munmap(config->mem_config, sizeof(struct rte_mem_config)); 414 415 /* remap the config at proper address */ 416 mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr, 417 sizeof(*mem_config), PROT_READ | PROT_WRITE, MAP_SHARED, 418 mem_cfg_fd, 0); 419 420 close(mem_cfg_fd); 421 mem_cfg_fd = -1; 422 423 if (mem_config == MAP_FAILED || mem_config != rte_mem_cfg_addr) { 424 if (mem_config != MAP_FAILED) { 425 /* errno is stale, don't use */ 426 RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config at [%p], got [%p]" 427 " - please use '--" OPT_BASE_VIRTADDR 428 "' option\n", rte_mem_cfg_addr, mem_config); 429 munmap(mem_config, sizeof(struct rte_mem_config)); 430 return -1; 431 } 432 RTE_LOG(ERR, EAL, "Cannot mmap memory for rte_config! error %i (%s)\n", 433 errno, strerror(errno)); 434 return -1; 435 } 436 437 config->mem_config = mem_config; 438 439 return 0; 440 } 441 442 /* Detect if we are a primary or a secondary process */ 443 enum rte_proc_type_t 444 eal_proc_type_detect(void) 445 { 446 enum rte_proc_type_t ptype = RTE_PROC_PRIMARY; 447 const char *pathname = eal_runtime_config_path(); 448 const struct internal_config *internal_conf = 449 eal_get_internal_configuration(); 450 451 /* if there no shared config, there can be no secondary processes */ 452 if (!internal_conf->no_shconf) { 453 /* if we can open the file but not get a write-lock we are a 454 * secondary process. NOTE: if we get a file handle back, we 455 * keep that open and don't close it to prevent a race condition 456 * between multiple opens. 457 */ 458 if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) && 459 (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0)) 460 ptype = RTE_PROC_SECONDARY; 461 } 462 463 RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n", 464 ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY"); 465 466 return ptype; 467 } 468 469 /* Sets up rte_config structure with the pointer to shared memory config.*/ 470 static int 471 rte_config_init(void) 472 { 473 struct rte_config *config = rte_eal_get_configuration(); 474 const struct internal_config *internal_conf = 475 eal_get_internal_configuration(); 476 477 config->process_type = internal_conf->process_type; 478 479 switch (config->process_type) { 480 case RTE_PROC_PRIMARY: 481 if (rte_eal_config_create() < 0) 482 return -1; 483 eal_mcfg_update_from_internal(); 484 break; 485 case RTE_PROC_SECONDARY: 486 if (rte_eal_config_attach() < 0) 487 return -1; 488 eal_mcfg_wait_complete(); 489 if (eal_mcfg_check_version() < 0) { 490 RTE_LOG(ERR, EAL, "Primary and secondary process DPDK version mismatch\n"); 491 return -1; 492 } 493 if (rte_eal_config_reattach() < 0) 494 return -1; 495 if (!__rte_mp_enable()) { 496 RTE_LOG(ERR, EAL, "Primary process refused secondary attachment\n"); 497 return -1; 498 } 499 eal_mcfg_update_internal(); 500 break; 501 case RTE_PROC_AUTO: 502 case RTE_PROC_INVALID: 503 RTE_LOG(ERR, EAL, "Invalid process type %d\n", 504 config->process_type); 505 return -1; 506 } 507 508 return 0; 509 } 510 511 /* Unlocks hugepage directories that were locked by eal_hugepage_info_init */ 512 static void 513 eal_hugedirs_unlock(void) 514 { 515 int i; 516 struct internal_config *internal_conf = 517 eal_get_internal_configuration(); 518 519 for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) 520 { 521 /* skip uninitialized */ 522 if (internal_conf->hugepage_info[i].lock_descriptor < 0) 523 continue; 524 /* unlock hugepage file */ 525 flock(internal_conf->hugepage_info[i].lock_descriptor, LOCK_UN); 526 close(internal_conf->hugepage_info[i].lock_descriptor); 527 /* reset the field */ 528 internal_conf->hugepage_info[i].lock_descriptor = -1; 529 } 530 } 531 532 /* display usage */ 533 static void 534 eal_usage(const char *prgname) 535 { 536 rte_usage_hook_t hook = eal_get_application_usage_hook(); 537 538 printf("\nUsage: %s ", prgname); 539 eal_common_usage(); 540 printf("EAL Linux options:\n" 541 " --"OPT_SOCKET_MEM" Memory to allocate on sockets (comma separated values)\n" 542 " --"OPT_SOCKET_LIMIT" Limit memory allocation on sockets (comma separated values)\n" 543 " --"OPT_HUGE_DIR" Directory where hugetlbfs is mounted\n" 544 " --"OPT_FILE_PREFIX" Prefix for hugepage filenames\n" 545 " --"OPT_CREATE_UIO_DEV" Create /dev/uioX (usually done by hotplug)\n" 546 " --"OPT_VFIO_INTR" Interrupt mode for VFIO (legacy|msi|msix)\n" 547 " --"OPT_VFIO_VF_TOKEN" VF token (UUID) shared between SR-IOV PF and VFs\n" 548 " --"OPT_LEGACY_MEM" Legacy memory mode (no dynamic allocation, contiguous segments)\n" 549 " --"OPT_SINGLE_FILE_SEGMENTS" Put all hugepage memory in single files\n" 550 " --"OPT_MATCH_ALLOCATIONS" Free hugepages exactly as allocated\n" 551 "\n"); 552 /* Allow the application to print its usage message too if hook is set */ 553 if (hook) { 554 printf("===== Application Usage =====\n\n"); 555 (hook)(prgname); 556 } 557 } 558 559 static int 560 eal_parse_socket_arg(char *strval, volatile uint64_t *socket_arg) 561 { 562 char * arg[RTE_MAX_NUMA_NODES]; 563 char *end; 564 int arg_num, i, len; 565 566 len = strnlen(strval, SOCKET_MEM_STRLEN); 567 if (len == SOCKET_MEM_STRLEN) { 568 RTE_LOG(ERR, EAL, "--socket-mem is too long\n"); 569 return -1; 570 } 571 572 /* all other error cases will be caught later */ 573 if (!isdigit(strval[len-1])) 574 return -1; 575 576 /* split the optarg into separate socket values */ 577 arg_num = rte_strsplit(strval, len, 578 arg, RTE_MAX_NUMA_NODES, ','); 579 580 /* if split failed, or 0 arguments */ 581 if (arg_num <= 0) 582 return -1; 583 584 /* parse each defined socket option */ 585 errno = 0; 586 for (i = 0; i < arg_num; i++) { 587 uint64_t val; 588 end = NULL; 589 val = strtoull(arg[i], &end, 10); 590 591 /* check for invalid input */ 592 if ((errno != 0) || 593 (arg[i][0] == '\0') || (end == NULL) || (*end != '\0')) 594 return -1; 595 val <<= 20; 596 socket_arg[i] = val; 597 } 598 599 return 0; 600 } 601 602 static int 603 eal_parse_vfio_intr(const char *mode) 604 { 605 struct internal_config *internal_conf = 606 eal_get_internal_configuration(); 607 unsigned i; 608 static struct { 609 const char *name; 610 enum rte_intr_mode value; 611 } map[] = { 612 { "legacy", RTE_INTR_MODE_LEGACY }, 613 { "msi", RTE_INTR_MODE_MSI }, 614 { "msix", RTE_INTR_MODE_MSIX }, 615 }; 616 617 for (i = 0; i < RTE_DIM(map); i++) { 618 if (!strcmp(mode, map[i].name)) { 619 internal_conf->vfio_intr_mode = map[i].value; 620 return 0; 621 } 622 } 623 return -1; 624 } 625 626 static int 627 eal_parse_vfio_vf_token(const char *vf_token) 628 { 629 struct internal_config *cfg = eal_get_internal_configuration(); 630 rte_uuid_t uuid; 631 632 if (!rte_uuid_parse(vf_token, uuid)) { 633 rte_uuid_copy(cfg->vfio_vf_token, uuid); 634 return 0; 635 } 636 637 return -1; 638 } 639 640 /* Parse the arguments for --log-level only */ 641 static void 642 eal_log_level_parse(int argc, char **argv) 643 { 644 int opt; 645 char **argvopt; 646 int option_index; 647 const int old_optind = optind; 648 const int old_optopt = optopt; 649 char * const old_optarg = optarg; 650 struct internal_config *internal_conf = 651 eal_get_internal_configuration(); 652 653 argvopt = argv; 654 optind = 1; 655 656 while ((opt = getopt_long(argc, argvopt, eal_short_options, 657 eal_long_options, &option_index)) != EOF) { 658 659 int ret; 660 661 /* getopt is not happy, stop right now */ 662 if (opt == '?') 663 break; 664 665 ret = (opt == OPT_LOG_LEVEL_NUM) ? 666 eal_parse_common_option(opt, optarg, internal_conf) : 0; 667 668 /* common parser is not happy */ 669 if (ret < 0) 670 break; 671 } 672 673 /* restore getopt lib */ 674 optind = old_optind; 675 optopt = old_optopt; 676 optarg = old_optarg; 677 } 678 679 /* Parse the argument given in the command line of the application */ 680 static int 681 eal_parse_args(int argc, char **argv) 682 { 683 int opt, ret; 684 char **argvopt; 685 int option_index; 686 char *prgname = argv[0]; 687 const int old_optind = optind; 688 const int old_optopt = optopt; 689 char * const old_optarg = optarg; 690 struct internal_config *internal_conf = 691 eal_get_internal_configuration(); 692 693 argvopt = argv; 694 optind = 1; 695 696 while ((opt = getopt_long(argc, argvopt, eal_short_options, 697 eal_long_options, &option_index)) != EOF) { 698 699 /* getopt didn't recognise the option */ 700 if (opt == '?') { 701 eal_usage(prgname); 702 ret = -1; 703 goto out; 704 } 705 706 /* eal_log_level_parse() already handled this option */ 707 if (opt == OPT_LOG_LEVEL_NUM) 708 continue; 709 710 ret = eal_parse_common_option(opt, optarg, internal_conf); 711 /* common parser is not happy */ 712 if (ret < 0) { 713 eal_usage(prgname); 714 ret = -1; 715 goto out; 716 } 717 /* common parser handled this option */ 718 if (ret == 0) 719 continue; 720 721 switch (opt) { 722 case 'h': 723 eal_usage(prgname); 724 exit(EXIT_SUCCESS); 725 726 case OPT_HUGE_DIR_NUM: 727 { 728 char *hdir = strdup(optarg); 729 if (hdir == NULL) 730 RTE_LOG(ERR, EAL, "Could not store hugepage directory\n"); 731 else { 732 /* free old hugepage dir */ 733 if (internal_conf->hugepage_dir != NULL) 734 free(internal_conf->hugepage_dir); 735 internal_conf->hugepage_dir = hdir; 736 } 737 break; 738 } 739 case OPT_FILE_PREFIX_NUM: 740 { 741 char *prefix = strdup(optarg); 742 if (prefix == NULL) 743 RTE_LOG(ERR, EAL, "Could not store file prefix\n"); 744 else { 745 /* free old prefix */ 746 if (internal_conf->hugefile_prefix != NULL) 747 free(internal_conf->hugefile_prefix); 748 internal_conf->hugefile_prefix = prefix; 749 } 750 break; 751 } 752 case OPT_SOCKET_MEM_NUM: 753 if (eal_parse_socket_arg(optarg, 754 internal_conf->socket_mem) < 0) { 755 RTE_LOG(ERR, EAL, "invalid parameters for --" 756 OPT_SOCKET_MEM "\n"); 757 eal_usage(prgname); 758 ret = -1; 759 goto out; 760 } 761 internal_conf->force_sockets = 1; 762 break; 763 764 case OPT_SOCKET_LIMIT_NUM: 765 if (eal_parse_socket_arg(optarg, 766 internal_conf->socket_limit) < 0) { 767 RTE_LOG(ERR, EAL, "invalid parameters for --" 768 OPT_SOCKET_LIMIT "\n"); 769 eal_usage(prgname); 770 ret = -1; 771 goto out; 772 } 773 internal_conf->force_socket_limits = 1; 774 break; 775 776 case OPT_VFIO_INTR_NUM: 777 if (eal_parse_vfio_intr(optarg) < 0) { 778 RTE_LOG(ERR, EAL, "invalid parameters for --" 779 OPT_VFIO_INTR "\n"); 780 eal_usage(prgname); 781 ret = -1; 782 goto out; 783 } 784 break; 785 786 case OPT_VFIO_VF_TOKEN_NUM: 787 if (eal_parse_vfio_vf_token(optarg) < 0) { 788 RTE_LOG(ERR, EAL, "invalid parameters for --" 789 OPT_VFIO_VF_TOKEN "\n"); 790 eal_usage(prgname); 791 ret = -1; 792 goto out; 793 } 794 break; 795 796 case OPT_CREATE_UIO_DEV_NUM: 797 internal_conf->create_uio_dev = 1; 798 break; 799 800 case OPT_MBUF_POOL_OPS_NAME_NUM: 801 { 802 char *ops_name = strdup(optarg); 803 if (ops_name == NULL) 804 RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n"); 805 else { 806 /* free old ops name */ 807 if (internal_conf->user_mbuf_pool_ops_name != 808 NULL) 809 free(internal_conf->user_mbuf_pool_ops_name); 810 811 internal_conf->user_mbuf_pool_ops_name = 812 ops_name; 813 } 814 break; 815 } 816 case OPT_MATCH_ALLOCATIONS_NUM: 817 internal_conf->match_allocations = 1; 818 break; 819 820 default: 821 if (opt < OPT_LONG_MIN_NUM && isprint(opt)) { 822 RTE_LOG(ERR, EAL, "Option %c is not supported " 823 "on Linux\n", opt); 824 } else if (opt >= OPT_LONG_MIN_NUM && 825 opt < OPT_LONG_MAX_NUM) { 826 RTE_LOG(ERR, EAL, "Option %s is not supported " 827 "on Linux\n", 828 eal_long_options[option_index].name); 829 } else { 830 RTE_LOG(ERR, EAL, "Option %d is not supported " 831 "on Linux\n", opt); 832 } 833 eal_usage(prgname); 834 ret = -1; 835 goto out; 836 } 837 } 838 839 /* create runtime data directory. In no_shconf mode, skip any errors */ 840 if (eal_create_runtime_dir() < 0) { 841 if (internal_conf->no_shconf == 0) { 842 RTE_LOG(ERR, EAL, "Cannot create runtime directory\n"); 843 ret = -1; 844 goto out; 845 } else 846 RTE_LOG(WARNING, EAL, "No DPDK runtime directory created\n"); 847 } 848 849 if (eal_adjust_config(internal_conf) != 0) { 850 ret = -1; 851 goto out; 852 } 853 854 /* sanity checks */ 855 if (eal_check_common_options(internal_conf) != 0) { 856 eal_usage(prgname); 857 ret = -1; 858 goto out; 859 } 860 861 if (optind >= 0) 862 argv[optind-1] = prgname; 863 ret = optind-1; 864 865 out: 866 /* restore getopt lib */ 867 optind = old_optind; 868 optopt = old_optopt; 869 optarg = old_optarg; 870 871 return ret; 872 } 873 874 static int 875 check_socket(const struct rte_memseg_list *msl, void *arg) 876 { 877 int *socket_id = arg; 878 879 if (msl->external) 880 return 0; 881 882 return *socket_id == msl->socket_id; 883 } 884 885 static void 886 eal_check_mem_on_local_socket(void) 887 { 888 int socket_id; 889 const struct rte_config *config = rte_eal_get_configuration(); 890 891 socket_id = rte_lcore_to_socket_id(config->main_lcore); 892 893 if (rte_memseg_list_walk(check_socket, &socket_id) == 0) 894 RTE_LOG(WARNING, EAL, "WARNING: Main core has no memory on local socket!\n"); 895 } 896 897 static int 898 sync_func(__rte_unused void *arg) 899 { 900 return 0; 901 } 902 903 /* 904 * Request iopl privilege for all RPL, returns 0 on success 905 * iopl() call is mostly for the i386 architecture. For other architectures, 906 * return -1 to indicate IO privilege can't be changed in this way. 907 */ 908 int 909 rte_eal_iopl_init(void) 910 { 911 #if defined(RTE_ARCH_X86) 912 if (iopl(3) != 0) 913 return -1; 914 #endif 915 return 0; 916 } 917 918 #ifdef VFIO_PRESENT 919 static int rte_eal_vfio_setup(void) 920 { 921 if (rte_vfio_enable("vfio")) 922 return -1; 923 924 return 0; 925 } 926 #endif 927 928 static void rte_eal_init_alert(const char *msg) 929 { 930 fprintf(stderr, "EAL: FATAL: %s\n", msg); 931 RTE_LOG(ERR, EAL, "%s\n", msg); 932 } 933 934 /* 935 * On Linux 3.6+, even if VFIO is not loaded, whenever IOMMU is enabled in the 936 * BIOS and in the kernel, /sys/kernel/iommu_groups path will contain kernel 937 * IOMMU groups. If IOMMU is not enabled, that path would be empty. 938 * Therefore, checking if the path is empty will tell us if IOMMU is enabled. 939 */ 940 static bool 941 is_iommu_enabled(void) 942 { 943 DIR *dir = opendir(KERNEL_IOMMU_GROUPS_PATH); 944 struct dirent *d; 945 int n = 0; 946 947 /* if directory doesn't exist, assume IOMMU is not enabled */ 948 if (dir == NULL) 949 return false; 950 951 while ((d = readdir(dir)) != NULL) { 952 /* skip dot and dot-dot */ 953 if (++n > 2) 954 break; 955 } 956 closedir(dir); 957 958 return n > 2; 959 } 960 961 /* Launch threads, called at application init(). */ 962 int 963 rte_eal_init(int argc, char **argv) 964 { 965 int i, fctret, ret; 966 pthread_t thread_id; 967 static uint32_t run_once; 968 uint32_t has_run = 0; 969 const char *p; 970 static char logid[PATH_MAX]; 971 char cpuset[RTE_CPU_AFFINITY_STR_LEN]; 972 char thread_name[RTE_MAX_THREAD_NAME_LEN]; 973 bool phys_addrs; 974 const struct rte_config *config = rte_eal_get_configuration(); 975 struct internal_config *internal_conf = 976 eal_get_internal_configuration(); 977 978 /* checks if the machine is adequate */ 979 if (!rte_cpu_is_supported()) { 980 rte_eal_init_alert("unsupported cpu type."); 981 rte_errno = ENOTSUP; 982 return -1; 983 } 984 985 if (!__atomic_compare_exchange_n(&run_once, &has_run, 1, 0, 986 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { 987 rte_eal_init_alert("already called initialization."); 988 rte_errno = EALREADY; 989 return -1; 990 } 991 992 p = strrchr(argv[0], '/'); 993 strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid)); 994 thread_id = pthread_self(); 995 996 eal_reset_internal_config(internal_conf); 997 998 /* set log level as early as possible */ 999 eal_log_level_parse(argc, argv); 1000 1001 /* clone argv to report out later in telemetry */ 1002 eal_save_args(argc, argv); 1003 1004 if (rte_eal_cpu_init() < 0) { 1005 rte_eal_init_alert("Cannot detect lcores."); 1006 rte_errno = ENOTSUP; 1007 return -1; 1008 } 1009 1010 fctret = eal_parse_args(argc, argv); 1011 if (fctret < 0) { 1012 rte_eal_init_alert("Invalid 'command line' arguments."); 1013 rte_errno = EINVAL; 1014 __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED); 1015 return -1; 1016 } 1017 1018 if (eal_plugins_init() < 0) { 1019 rte_eal_init_alert("Cannot init plugins"); 1020 rte_errno = EINVAL; 1021 __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED); 1022 return -1; 1023 } 1024 1025 if (eal_trace_init() < 0) { 1026 rte_eal_init_alert("Cannot init trace"); 1027 rte_errno = EFAULT; 1028 return -1; 1029 } 1030 1031 if (eal_option_device_parse()) { 1032 rte_errno = ENODEV; 1033 __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED); 1034 return -1; 1035 } 1036 1037 if (rte_config_init() < 0) { 1038 rte_eal_init_alert("Cannot init config"); 1039 return -1; 1040 } 1041 1042 if (rte_eal_intr_init() < 0) { 1043 rte_eal_init_alert("Cannot init interrupt-handling thread"); 1044 return -1; 1045 } 1046 1047 if (rte_eal_alarm_init() < 0) { 1048 rte_eal_init_alert("Cannot init alarm"); 1049 /* rte_eal_alarm_init sets rte_errno on failure. */ 1050 return -1; 1051 } 1052 1053 /* Put mp channel init before bus scan so that we can init the vdev 1054 * bus through mp channel in the secondary process before the bus scan. 1055 */ 1056 if (rte_mp_channel_init() < 0 && rte_errno != ENOTSUP) { 1057 rte_eal_init_alert("failed to init mp channel"); 1058 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 1059 rte_errno = EFAULT; 1060 return -1; 1061 } 1062 } 1063 1064 /* register multi-process action callbacks for hotplug */ 1065 if (eal_mp_dev_hotplug_init() < 0) { 1066 rte_eal_init_alert("failed to register mp callback for hotplug"); 1067 return -1; 1068 } 1069 1070 if (rte_bus_scan()) { 1071 rte_eal_init_alert("Cannot scan the buses for devices"); 1072 rte_errno = ENODEV; 1073 __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED); 1074 return -1; 1075 } 1076 1077 phys_addrs = rte_eal_using_phys_addrs() != 0; 1078 1079 /* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */ 1080 if (internal_conf->iova_mode == RTE_IOVA_DC) { 1081 /* autodetect the IOVA mapping mode */ 1082 enum rte_iova_mode iova_mode = rte_bus_get_iommu_class(); 1083 1084 if (iova_mode == RTE_IOVA_DC) { 1085 RTE_LOG(DEBUG, EAL, "Buses did not request a specific IOVA mode.\n"); 1086 1087 if (!phys_addrs) { 1088 /* if we have no access to physical addresses, 1089 * pick IOVA as VA mode. 1090 */ 1091 iova_mode = RTE_IOVA_VA; 1092 RTE_LOG(DEBUG, EAL, "Physical addresses are unavailable, selecting IOVA as VA mode.\n"); 1093 #if defined(RTE_LIB_KNI) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) 1094 } else if (rte_eal_check_module("rte_kni") == 1) { 1095 iova_mode = RTE_IOVA_PA; 1096 RTE_LOG(DEBUG, EAL, "KNI is loaded, selecting IOVA as PA mode for better KNI performance.\n"); 1097 #endif 1098 } else if (is_iommu_enabled()) { 1099 /* we have an IOMMU, pick IOVA as VA mode */ 1100 iova_mode = RTE_IOVA_VA; 1101 RTE_LOG(DEBUG, EAL, "IOMMU is available, selecting IOVA as VA mode.\n"); 1102 } else { 1103 /* physical addresses available, and no IOMMU 1104 * found, so pick IOVA as PA. 1105 */ 1106 iova_mode = RTE_IOVA_PA; 1107 RTE_LOG(DEBUG, EAL, "IOMMU is not available, selecting IOVA as PA mode.\n"); 1108 } 1109 } 1110 #if defined(RTE_LIB_KNI) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) 1111 /* Workaround for KNI which requires physical address to work 1112 * in kernels < 4.10 1113 */ 1114 if (iova_mode == RTE_IOVA_VA && 1115 rte_eal_check_module("rte_kni") == 1) { 1116 if (phys_addrs) { 1117 iova_mode = RTE_IOVA_PA; 1118 RTE_LOG(WARNING, EAL, "Forcing IOVA as 'PA' because KNI module is loaded\n"); 1119 } else { 1120 RTE_LOG(DEBUG, EAL, "KNI can not work since physical addresses are unavailable\n"); 1121 } 1122 } 1123 #endif 1124 rte_eal_get_configuration()->iova_mode = iova_mode; 1125 } else { 1126 rte_eal_get_configuration()->iova_mode = 1127 internal_conf->iova_mode; 1128 } 1129 1130 if (rte_eal_iova_mode() == RTE_IOVA_PA && !phys_addrs) { 1131 rte_eal_init_alert("Cannot use IOVA as 'PA' since physical addresses are not available"); 1132 rte_errno = EINVAL; 1133 return -1; 1134 } 1135 1136 RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n", 1137 rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA"); 1138 1139 if (internal_conf->no_hugetlbfs == 0) { 1140 /* rte_config isn't initialized yet */ 1141 ret = internal_conf->process_type == RTE_PROC_PRIMARY ? 1142 eal_hugepage_info_init() : 1143 eal_hugepage_info_read(); 1144 if (ret < 0) { 1145 rte_eal_init_alert("Cannot get hugepage information."); 1146 rte_errno = EACCES; 1147 __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED); 1148 return -1; 1149 } 1150 } 1151 1152 if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) { 1153 if (internal_conf->no_hugetlbfs) 1154 internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE; 1155 } 1156 1157 if (internal_conf->vmware_tsc_map == 1) { 1158 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT 1159 rte_cycles_vmware_tsc_map = 1; 1160 RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, " 1161 "you must have monitor_control.pseudo_perfctr = TRUE\n"); 1162 #else 1163 RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because " 1164 "RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n"); 1165 #endif 1166 } 1167 1168 if (eal_log_init(logid, internal_conf->syslog_facility) < 0) { 1169 rte_eal_init_alert("Cannot init logging."); 1170 rte_errno = ENOMEM; 1171 __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED); 1172 return -1; 1173 } 1174 1175 #ifdef VFIO_PRESENT 1176 if (rte_eal_vfio_setup() < 0) { 1177 rte_eal_init_alert("Cannot init VFIO"); 1178 rte_errno = EAGAIN; 1179 __atomic_store_n(&run_once, 0, __ATOMIC_RELAXED); 1180 return -1; 1181 } 1182 #endif 1183 /* in secondary processes, memory init may allocate additional fbarrays 1184 * not present in primary processes, so to avoid any potential issues, 1185 * initialize memzones first. 1186 */ 1187 if (rte_eal_memzone_init() < 0) { 1188 rte_eal_init_alert("Cannot init memzone"); 1189 rte_errno = ENODEV; 1190 return -1; 1191 } 1192 1193 if (rte_eal_memory_init() < 0) { 1194 rte_eal_init_alert("Cannot init memory"); 1195 rte_errno = ENOMEM; 1196 return -1; 1197 } 1198 1199 /* the directories are locked during eal_hugepage_info_init */ 1200 eal_hugedirs_unlock(); 1201 1202 if (rte_eal_malloc_heap_init() < 0) { 1203 rte_eal_init_alert("Cannot init malloc heap"); 1204 rte_errno = ENODEV; 1205 return -1; 1206 } 1207 1208 if (rte_eal_tailqs_init() < 0) { 1209 rte_eal_init_alert("Cannot init tail queues for objects"); 1210 rte_errno = EFAULT; 1211 return -1; 1212 } 1213 1214 if (rte_eal_timer_init() < 0) { 1215 rte_eal_init_alert("Cannot init HPET or TSC timers"); 1216 rte_errno = ENOTSUP; 1217 return -1; 1218 } 1219 1220 eal_check_mem_on_local_socket(); 1221 1222 if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t), 1223 &lcore_config[config->main_lcore].cpuset) != 0) { 1224 rte_eal_init_alert("Cannot set affinity"); 1225 rte_errno = EINVAL; 1226 return -1; 1227 } 1228 __rte_thread_init(config->main_lcore, 1229 &lcore_config[config->main_lcore].cpuset); 1230 1231 ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); 1232 RTE_LOG(DEBUG, EAL, "Main lcore %u is ready (tid=%zx;cpuset=[%s%s])\n", 1233 config->main_lcore, (uintptr_t)thread_id, cpuset, 1234 ret == 0 ? "" : "..."); 1235 1236 RTE_LCORE_FOREACH_WORKER(i) { 1237 1238 /* 1239 * create communication pipes between main thread 1240 * and children 1241 */ 1242 if (pipe(lcore_config[i].pipe_main2worker) < 0) 1243 rte_panic("Cannot create pipe\n"); 1244 if (pipe(lcore_config[i].pipe_worker2main) < 0) 1245 rte_panic("Cannot create pipe\n"); 1246 1247 lcore_config[i].state = WAIT; 1248 1249 /* create a thread for each lcore */ 1250 ret = pthread_create(&lcore_config[i].thread_id, NULL, 1251 eal_thread_loop, NULL); 1252 if (ret != 0) 1253 rte_panic("Cannot create thread\n"); 1254 1255 /* Set thread_name for aid in debugging. */ 1256 snprintf(thread_name, sizeof(thread_name), 1257 "lcore-worker-%d", i); 1258 ret = rte_thread_setname(lcore_config[i].thread_id, 1259 thread_name); 1260 if (ret != 0) 1261 RTE_LOG(DEBUG, EAL, 1262 "Cannot set name for lcore thread\n"); 1263 1264 ret = pthread_setaffinity_np(lcore_config[i].thread_id, 1265 sizeof(rte_cpuset_t), &lcore_config[i].cpuset); 1266 if (ret != 0) 1267 rte_panic("Cannot set affinity\n"); 1268 } 1269 1270 /* 1271 * Launch a dummy function on all worker lcores, so that main lcore 1272 * knows they are all ready when this function returns. 1273 */ 1274 rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN); 1275 rte_eal_mp_wait_lcore(); 1276 1277 /* initialize services so vdevs register service during bus_probe. */ 1278 ret = rte_service_init(); 1279 if (ret) { 1280 rte_eal_init_alert("rte_service_init() failed"); 1281 rte_errno = -ret; 1282 return -1; 1283 } 1284 1285 /* Probe all the buses and devices/drivers on them */ 1286 if (rte_bus_probe()) { 1287 rte_eal_init_alert("Cannot probe devices"); 1288 rte_errno = ENOTSUP; 1289 return -1; 1290 } 1291 1292 #ifdef VFIO_PRESENT 1293 /* Register mp action after probe() so that we got enough info */ 1294 if (rte_vfio_is_enabled("vfio") && vfio_mp_sync_setup() < 0) 1295 return -1; 1296 #endif 1297 1298 /* initialize default service/lcore mappings and start running. Ignore 1299 * -ENOTSUP, as it indicates no service coremask passed to EAL. 1300 */ 1301 ret = rte_service_start_with_defaults(); 1302 if (ret < 0 && ret != -ENOTSUP) { 1303 rte_errno = -ret; 1304 return -1; 1305 } 1306 1307 /* 1308 * Clean up unused files in runtime directory. We do this at the end of 1309 * init and not at the beginning because we want to clean stuff up 1310 * whether we are primary or secondary process, but we cannot remove 1311 * primary process' files because secondary should be able to run even 1312 * if primary process is dead. 1313 * 1314 * In no_shconf mode, no runtime directory is created in the first 1315 * place, so no cleanup needed. 1316 */ 1317 if (!internal_conf->no_shconf && eal_clean_runtime_dir() < 0) { 1318 rte_eal_init_alert("Cannot clear runtime directory"); 1319 return -1; 1320 } 1321 if (rte_eal_process_type() == RTE_PROC_PRIMARY && !internal_conf->no_telemetry) { 1322 int tlog = rte_log_register_type_and_pick_level( 1323 "lib.telemetry", RTE_LOG_WARNING); 1324 if (tlog < 0) 1325 tlog = RTE_LOGTYPE_EAL; 1326 if (rte_telemetry_init(rte_eal_get_runtime_dir(), 1327 rte_version(), 1328 &internal_conf->ctrl_cpuset, rte_log, tlog) != 0) 1329 return -1; 1330 } 1331 1332 eal_mcfg_complete(); 1333 1334 return fctret; 1335 } 1336 1337 static int 1338 mark_freeable(const struct rte_memseg_list *msl, const struct rte_memseg *ms, 1339 void *arg __rte_unused) 1340 { 1341 /* ms is const, so find this memseg */ 1342 struct rte_memseg *found; 1343 1344 if (msl->external) 1345 return 0; 1346 1347 found = rte_mem_virt2memseg(ms->addr, msl); 1348 1349 found->flags &= ~RTE_MEMSEG_FLAG_DO_NOT_FREE; 1350 1351 return 0; 1352 } 1353 1354 int 1355 rte_eal_cleanup(void) 1356 { 1357 /* if we're in a primary process, we need to mark hugepages as freeable 1358 * so that finalization can release them back to the system. 1359 */ 1360 struct internal_config *internal_conf = 1361 eal_get_internal_configuration(); 1362 1363 if (rte_eal_process_type() == RTE_PROC_PRIMARY) 1364 rte_memseg_walk(mark_freeable, NULL); 1365 rte_service_finalize(); 1366 rte_mp_channel_cleanup(); 1367 /* after this point, any DPDK pointers will become dangling */ 1368 rte_eal_memory_detach(); 1369 rte_eal_alarm_cleanup(); 1370 rte_trace_save(); 1371 eal_trace_fini(); 1372 eal_cleanup_config(internal_conf); 1373 return 0; 1374 } 1375 1376 int rte_eal_create_uio_dev(void) 1377 { 1378 const struct internal_config *internal_conf = 1379 eal_get_internal_configuration(); 1380 1381 return internal_conf->create_uio_dev; 1382 } 1383 1384 enum rte_intr_mode 1385 rte_eal_vfio_intr_mode(void) 1386 { 1387 const struct internal_config *internal_conf = 1388 eal_get_internal_configuration(); 1389 1390 return internal_conf->vfio_intr_mode; 1391 } 1392 1393 void 1394 rte_eal_vfio_get_vf_token(rte_uuid_t vf_token) 1395 { 1396 struct internal_config *cfg = eal_get_internal_configuration(); 1397 1398 rte_uuid_copy(vf_token, cfg->vfio_vf_token); 1399 } 1400 1401 int 1402 rte_eal_check_module(const char *module_name) 1403 { 1404 char sysfs_mod_name[PATH_MAX]; 1405 struct stat st; 1406 int n; 1407 1408 if (NULL == module_name) 1409 return -1; 1410 1411 /* Check if there is sysfs mounted */ 1412 if (stat("/sys/module", &st) != 0) { 1413 RTE_LOG(DEBUG, EAL, "sysfs is not mounted! error %i (%s)\n", 1414 errno, strerror(errno)); 1415 return -1; 1416 } 1417 1418 /* A module might be built-in, therefore try sysfs */ 1419 n = snprintf(sysfs_mod_name, PATH_MAX, "/sys/module/%s", module_name); 1420 if (n < 0 || n > PATH_MAX) { 1421 RTE_LOG(DEBUG, EAL, "Could not format module path\n"); 1422 return -1; 1423 } 1424 1425 if (stat(sysfs_mod_name, &st) != 0) { 1426 RTE_LOG(DEBUG, EAL, "Module %s not found! error %i (%s)\n", 1427 sysfs_mod_name, errno, strerror(errno)); 1428 return 0; 1429 } 1430 1431 /* Module has been found */ 1432 return 1; 1433 } 1434