1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright(c) 2014 6WIND S.A. 4 */ 5 6 #include <stdlib.h> 7 #include <unistd.h> 8 #include <string.h> 9 #ifndef RTE_EXEC_ENV_WINDOWS 10 #include <syslog.h> 11 #endif 12 #include <ctype.h> 13 #include <limits.h> 14 #include <errno.h> 15 #include <getopt.h> 16 #ifndef RTE_EXEC_ENV_WINDOWS 17 #include <dlfcn.h> 18 #include <libgen.h> 19 #endif 20 #include <sys/types.h> 21 #include <sys/stat.h> 22 #ifndef RTE_EXEC_ENV_WINDOWS 23 #include <dirent.h> 24 #endif 25 26 #include <rte_string_fns.h> 27 #include <rte_eal.h> 28 #include <rte_log.h> 29 #include <rte_lcore.h> 30 #include <rte_memory.h> 31 #include <rte_tailq.h> 32 #include <rte_version.h> 33 #include <rte_devargs.h> 34 #include <rte_memcpy.h> 35 #ifndef RTE_EXEC_ENV_WINDOWS 36 #include <rte_telemetry.h> 37 #endif 38 #include <rte_vect.h> 39 40 #include "eal_internal_cfg.h" 41 #include "eal_options.h" 42 #include "eal_filesystem.h" 43 #include "eal_private.h" 44 #include "eal_log.h" 45 #ifndef RTE_EXEC_ENV_WINDOWS 46 #include "eal_trace.h" 47 #endif 48 49 #define BITS_PER_HEX 4 50 #define LCORE_OPT_LST 1 51 #define LCORE_OPT_MSK 2 52 #define LCORE_OPT_MAP 3 53 54 const char 55 eal_short_options[] = 56 "a:" /* allow */ 57 "b:" /* block */ 58 "c:" /* coremask */ 59 "s:" /* service coremask */ 60 "d:" /* driver */ 61 "h" /* help */ 62 "l:" /* corelist */ 63 "S:" /* service corelist */ 64 "m:" /* memory size */ 65 "n:" /* memory channels */ 66 "r:" /* memory ranks */ 67 "v" /* version */ 68 ; 69 70 const struct option 71 eal_long_options[] = { 72 {OPT_BASE_VIRTADDR, 1, NULL, OPT_BASE_VIRTADDR_NUM }, 73 {OPT_CREATE_UIO_DEV, 0, NULL, OPT_CREATE_UIO_DEV_NUM }, 74 {OPT_FILE_PREFIX, 1, NULL, OPT_FILE_PREFIX_NUM }, 75 {OPT_HELP, 0, NULL, OPT_HELP_NUM }, 76 {OPT_HUGE_DIR, 1, NULL, OPT_HUGE_DIR_NUM }, 77 {OPT_HUGE_UNLINK, 0, NULL, OPT_HUGE_UNLINK_NUM }, 78 {OPT_IOVA_MODE, 1, NULL, OPT_IOVA_MODE_NUM }, 79 {OPT_LCORES, 1, NULL, OPT_LCORES_NUM }, 80 {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM }, 81 {OPT_TRACE, 1, NULL, OPT_TRACE_NUM }, 82 {OPT_TRACE_DIR, 1, NULL, OPT_TRACE_DIR_NUM }, 83 {OPT_TRACE_BUF_SIZE, 1, NULL, OPT_TRACE_BUF_SIZE_NUM }, 84 {OPT_TRACE_MODE, 1, NULL, OPT_TRACE_MODE_NUM }, 85 {OPT_MAIN_LCORE, 1, NULL, OPT_MAIN_LCORE_NUM }, 86 {OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM}, 87 {OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM }, 88 {OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM }, 89 {OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM }, 90 {OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM }, 91 {OPT_IN_MEMORY, 0, NULL, OPT_IN_MEMORY_NUM }, 92 {OPT_DEV_BLOCK, 1, NULL, OPT_DEV_BLOCK_NUM }, 93 {OPT_DEV_ALLOW, 1, NULL, OPT_DEV_ALLOW_NUM }, 94 {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM }, 95 {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM }, 96 {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM }, 97 {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM }, 98 {OPT_VDEV, 1, NULL, OPT_VDEV_NUM }, 99 {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM }, 100 {OPT_VFIO_VF_TOKEN, 1, NULL, OPT_VFIO_VF_TOKEN_NUM }, 101 {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM }, 102 {OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM }, 103 {OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM}, 104 {OPT_MATCH_ALLOCATIONS, 0, NULL, OPT_MATCH_ALLOCATIONS_NUM}, 105 {OPT_TELEMETRY, 0, NULL, OPT_TELEMETRY_NUM }, 106 {OPT_NO_TELEMETRY, 0, NULL, OPT_NO_TELEMETRY_NUM }, 107 {OPT_FORCE_MAX_SIMD_BITWIDTH, 1, NULL, OPT_FORCE_MAX_SIMD_BITWIDTH_NUM}, 108 109 {0, 0, NULL, 0 } 110 }; 111 112 TAILQ_HEAD(shared_driver_list, shared_driver); 113 114 /* Definition for shared object drivers. */ 115 struct shared_driver { 116 TAILQ_ENTRY(shared_driver) next; 117 118 char name[PATH_MAX]; 119 void* lib_handle; 120 }; 121 122 /* List of external loadable drivers */ 123 static struct shared_driver_list solib_list = 124 TAILQ_HEAD_INITIALIZER(solib_list); 125 126 #ifndef RTE_EXEC_ENV_WINDOWS 127 /* Default path of external loadable drivers */ 128 static const char *default_solib_dir = RTE_EAL_PMD_PATH; 129 #endif 130 131 /* 132 * Stringified version of solib path used by dpdk-pmdinfo.py 133 * Note: PLEASE DO NOT ALTER THIS without making a corresponding 134 * change to usertools/dpdk-pmdinfo.py 135 */ 136 static const char dpdk_solib_path[] __rte_used = 137 "DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH; 138 139 TAILQ_HEAD(device_option_list, device_option); 140 141 struct device_option { 142 TAILQ_ENTRY(device_option) next; 143 144 enum rte_devtype type; 145 char arg[]; 146 }; 147 148 static struct device_option_list devopt_list = 149 TAILQ_HEAD_INITIALIZER(devopt_list); 150 151 static int main_lcore_parsed; 152 static int mem_parsed; 153 static int core_parsed; 154 155 /* Allow the application to print its usage message too if set */ 156 static rte_usage_hook_t rte_application_usage_hook; 157 158 /* Returns rte_usage_hook_t */ 159 rte_usage_hook_t 160 eal_get_application_usage_hook(void) 161 { 162 return rte_application_usage_hook; 163 } 164 165 /* Set a per-application usage message */ 166 rte_usage_hook_t 167 rte_set_application_usage_hook(rte_usage_hook_t usage_func) 168 { 169 rte_usage_hook_t old_func; 170 171 /* Will be NULL on the first call to denote the last usage routine. */ 172 old_func = rte_application_usage_hook; 173 rte_application_usage_hook = usage_func; 174 175 return old_func; 176 } 177 178 #ifndef RTE_EXEC_ENV_WINDOWS 179 static char **eal_args; 180 static char **eal_app_args; 181 182 #define EAL_PARAM_REQ "/eal/params" 183 #define EAL_APP_PARAM_REQ "/eal/app_params" 184 185 /* callback handler for telemetry library to report out EAL flags */ 186 int 187 handle_eal_info_request(const char *cmd, const char *params __rte_unused, 188 struct rte_tel_data *d) 189 { 190 char **args; 191 int used = 0; 192 int i = 0; 193 194 if (strcmp(cmd, EAL_PARAM_REQ) == 0) 195 args = eal_args; 196 else 197 args = eal_app_args; 198 199 rte_tel_data_start_array(d, RTE_TEL_STRING_VAL); 200 if (args == NULL || args[0] == NULL) 201 return 0; 202 203 for ( ; args[i] != NULL; i++) 204 used = rte_tel_data_add_array_string(d, args[i]); 205 return used; 206 } 207 208 int 209 eal_save_args(int argc, char **argv) 210 { 211 int i, j; 212 213 rte_telemetry_register_cmd(EAL_PARAM_REQ, handle_eal_info_request, 214 "Returns EAL commandline parameters used. Takes no parameters"); 215 rte_telemetry_register_cmd(EAL_APP_PARAM_REQ, handle_eal_info_request, 216 "Returns app commandline parameters used. Takes no parameters"); 217 218 /* clone argv to report out later. We overprovision, but 219 * this does not waste huge amounts of memory 220 */ 221 eal_args = calloc(argc + 1, sizeof(*eal_args)); 222 if (eal_args == NULL) 223 return -1; 224 225 for (i = 0; i < argc; i++) { 226 if (strcmp(argv[i], "--") == 0) 227 break; 228 eal_args[i] = strdup(argv[i]); 229 } 230 eal_args[i++] = NULL; /* always finish with NULL */ 231 232 /* allow reporting of any app args we know about too */ 233 if (i >= argc) 234 return 0; 235 236 eal_app_args = calloc(argc - i + 1, sizeof(*eal_args)); 237 if (eal_app_args == NULL) 238 return -1; 239 240 for (j = 0; i < argc; j++, i++) 241 eal_app_args[j] = strdup(argv[i]); 242 eal_app_args[j] = NULL; 243 244 return 0; 245 } 246 #endif 247 248 static int 249 eal_option_device_add(enum rte_devtype type, const char *optarg) 250 { 251 struct device_option *devopt; 252 size_t optlen; 253 int ret; 254 255 optlen = strlen(optarg) + 1; 256 devopt = calloc(1, sizeof(*devopt) + optlen); 257 if (devopt == NULL) { 258 RTE_LOG(ERR, EAL, "Unable to allocate device option\n"); 259 return -ENOMEM; 260 } 261 262 devopt->type = type; 263 ret = strlcpy(devopt->arg, optarg, optlen); 264 if (ret < 0) { 265 RTE_LOG(ERR, EAL, "Unable to copy device option\n"); 266 free(devopt); 267 return -EINVAL; 268 } 269 TAILQ_INSERT_TAIL(&devopt_list, devopt, next); 270 return 0; 271 } 272 273 int 274 eal_option_device_parse(void) 275 { 276 struct device_option *devopt; 277 void *tmp; 278 int ret = 0; 279 280 RTE_TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) { 281 if (ret == 0) { 282 ret = rte_devargs_add(devopt->type, devopt->arg); 283 if (ret) 284 RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n", 285 devopt->arg); 286 } 287 TAILQ_REMOVE(&devopt_list, devopt, next); 288 free(devopt); 289 } 290 return ret; 291 } 292 293 const char * 294 eal_get_hugefile_prefix(void) 295 { 296 const struct internal_config *internal_conf = 297 eal_get_internal_configuration(); 298 299 if (internal_conf->hugefile_prefix != NULL) 300 return internal_conf->hugefile_prefix; 301 return HUGEFILE_PREFIX_DEFAULT; 302 } 303 304 void 305 eal_reset_internal_config(struct internal_config *internal_cfg) 306 { 307 int i; 308 309 internal_cfg->memory = 0; 310 internal_cfg->force_nrank = 0; 311 internal_cfg->force_nchannel = 0; 312 internal_cfg->hugefile_prefix = NULL; 313 internal_cfg->hugepage_dir = NULL; 314 internal_cfg->force_sockets = 0; 315 /* zero out the NUMA config */ 316 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 317 internal_cfg->socket_mem[i] = 0; 318 internal_cfg->force_socket_limits = 0; 319 /* zero out the NUMA limits config */ 320 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 321 internal_cfg->socket_limit[i] = 0; 322 /* zero out hugedir descriptors */ 323 for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) { 324 memset(&internal_cfg->hugepage_info[i], 0, 325 sizeof(internal_cfg->hugepage_info[0])); 326 internal_cfg->hugepage_info[i].lock_descriptor = -1; 327 } 328 internal_cfg->base_virtaddr = 0; 329 330 #ifdef LOG_DAEMON 331 internal_cfg->syslog_facility = LOG_DAEMON; 332 #endif 333 334 /* if set to NONE, interrupt mode is determined automatically */ 335 internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE; 336 memset(internal_cfg->vfio_vf_token, 0, 337 sizeof(internal_cfg->vfio_vf_token)); 338 339 #ifdef RTE_LIBEAL_USE_HPET 340 internal_cfg->no_hpet = 0; 341 #else 342 internal_cfg->no_hpet = 1; 343 #endif 344 internal_cfg->vmware_tsc_map = 0; 345 internal_cfg->create_uio_dev = 0; 346 internal_cfg->iova_mode = RTE_IOVA_DC; 347 internal_cfg->user_mbuf_pool_ops_name = NULL; 348 CPU_ZERO(&internal_cfg->ctrl_cpuset); 349 internal_cfg->init_complete = 0; 350 internal_cfg->max_simd_bitwidth.bitwidth = RTE_VECT_DEFAULT_SIMD_BITWIDTH; 351 internal_cfg->max_simd_bitwidth.forced = 0; 352 } 353 354 static int 355 eal_plugin_add(const char *path) 356 { 357 struct shared_driver *solib; 358 359 solib = malloc(sizeof(*solib)); 360 if (solib == NULL) { 361 RTE_LOG(ERR, EAL, "malloc(solib) failed\n"); 362 return -1; 363 } 364 memset(solib, 0, sizeof(*solib)); 365 strlcpy(solib->name, path, PATH_MAX); 366 TAILQ_INSERT_TAIL(&solib_list, solib, next); 367 368 return 0; 369 } 370 371 #ifdef RTE_EXEC_ENV_WINDOWS 372 int 373 eal_plugins_init(void) 374 { 375 return 0; 376 } 377 #else 378 379 static int 380 eal_plugindir_init(const char *path) 381 { 382 DIR *d = NULL; 383 struct dirent *dent = NULL; 384 char sopath[PATH_MAX]; 385 386 if (path == NULL || *path == '\0') 387 return 0; 388 389 d = opendir(path); 390 if (d == NULL) { 391 RTE_LOG(ERR, EAL, "failed to open directory %s: %s\n", 392 path, strerror(errno)); 393 return -1; 394 } 395 396 while ((dent = readdir(d)) != NULL) { 397 struct stat sb; 398 int nlen = strnlen(dent->d_name, sizeof(dent->d_name)); 399 400 /* check if name ends in .so or .so.ABI_VERSION */ 401 if (strcmp(&dent->d_name[nlen - 3], ".so") != 0 && 402 strcmp(&dent->d_name[nlen - 4 - strlen(ABI_VERSION)], 403 ".so."ABI_VERSION) != 0) 404 continue; 405 406 snprintf(sopath, sizeof(sopath), "%s/%s", path, dent->d_name); 407 408 /* if a regular file, add to list to load */ 409 if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode))) 410 continue; 411 412 if (eal_plugin_add(sopath) == -1) 413 break; 414 } 415 416 closedir(d); 417 /* XXX this ignores failures from readdir() itself */ 418 return (dent == NULL) ? 0 : -1; 419 } 420 421 static int 422 verify_perms(const char *dirpath) 423 { 424 struct stat st; 425 426 /* if not root, check down one level first */ 427 if (strcmp(dirpath, "/") != 0) { 428 static __thread char last_dir_checked[PATH_MAX]; 429 char copy[PATH_MAX]; 430 const char *dir; 431 432 strlcpy(copy, dirpath, PATH_MAX); 433 dir = dirname(copy); 434 if (strncmp(dir, last_dir_checked, PATH_MAX) != 0) { 435 if (verify_perms(dir) != 0) 436 return -1; 437 strlcpy(last_dir_checked, dir, PATH_MAX); 438 } 439 } 440 441 /* call stat to check for permissions and ensure not world writable */ 442 if (stat(dirpath, &st) != 0) { 443 RTE_LOG(ERR, EAL, "Error with stat on %s, %s\n", 444 dirpath, strerror(errno)); 445 return -1; 446 } 447 if (st.st_mode & S_IWOTH) { 448 RTE_LOG(ERR, EAL, 449 "Error, directory path %s is world-writable and insecure\n", 450 dirpath); 451 return -1; 452 } 453 454 return 0; 455 } 456 457 static void * 458 eal_dlopen(const char *pathname) 459 { 460 void *retval = NULL; 461 char *realp = realpath(pathname, NULL); 462 463 if (realp == NULL && errno == ENOENT) { 464 /* not a full or relative path, try a load from system dirs */ 465 retval = dlopen(pathname, RTLD_NOW); 466 if (retval == NULL) 467 RTE_LOG(ERR, EAL, "%s\n", dlerror()); 468 return retval; 469 } 470 if (realp == NULL) { 471 RTE_LOG(ERR, EAL, "Error with realpath for %s, %s\n", 472 pathname, strerror(errno)); 473 goto out; 474 } 475 if (strnlen(realp, PATH_MAX) == PATH_MAX) { 476 RTE_LOG(ERR, EAL, "Error, driver path greater than PATH_MAX\n"); 477 goto out; 478 } 479 480 /* do permissions checks */ 481 if (verify_perms(realp) != 0) 482 goto out; 483 484 retval = dlopen(realp, RTLD_NOW); 485 if (retval == NULL) 486 RTE_LOG(ERR, EAL, "%s\n", dlerror()); 487 out: 488 free(realp); 489 return retval; 490 } 491 492 static int 493 is_shared_build(void) 494 { 495 #define EAL_SO "librte_eal.so" 496 char soname[32]; 497 size_t len, minlen = strlen(EAL_SO); 498 499 len = strlcpy(soname, EAL_SO"."ABI_VERSION, sizeof(soname)); 500 if (len > sizeof(soname)) { 501 RTE_LOG(ERR, EAL, "Shared lib name too long in shared build check\n"); 502 len = sizeof(soname) - 1; 503 } 504 505 while (len >= minlen) { 506 void *handle; 507 508 /* check if we have this .so loaded, if so - shared build */ 509 RTE_LOG(DEBUG, EAL, "Checking presence of .so '%s'\n", soname); 510 handle = dlopen(soname, RTLD_LAZY | RTLD_NOLOAD); 511 if (handle != NULL) { 512 RTE_LOG(INFO, EAL, "Detected shared linkage of DPDK\n"); 513 dlclose(handle); 514 return 1; 515 } 516 517 /* remove any version numbers off the end to retry */ 518 while (len-- > 0) 519 if (soname[len] == '.') { 520 soname[len] = '\0'; 521 break; 522 } 523 } 524 525 RTE_LOG(INFO, EAL, "Detected static linkage of DPDK\n"); 526 return 0; 527 } 528 529 int 530 eal_plugins_init(void) 531 { 532 struct shared_driver *solib = NULL; 533 struct stat sb; 534 535 /* If we are not statically linked, add default driver loading 536 * path if it exists as a directory. 537 * (Using dlopen with NOLOAD flag on EAL, will return NULL if the EAL 538 * shared library is not already loaded i.e. it's statically linked.) 539 */ 540 if (is_shared_build() && 541 *default_solib_dir != '\0' && 542 stat(default_solib_dir, &sb) == 0 && 543 S_ISDIR(sb.st_mode)) 544 eal_plugin_add(default_solib_dir); 545 546 TAILQ_FOREACH(solib, &solib_list, next) { 547 548 if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) { 549 if (eal_plugindir_init(solib->name) == -1) { 550 RTE_LOG(ERR, EAL, 551 "Cannot init plugin directory %s\n", 552 solib->name); 553 return -1; 554 } 555 } else { 556 RTE_LOG(DEBUG, EAL, "open shared lib %s\n", 557 solib->name); 558 solib->lib_handle = eal_dlopen(solib->name); 559 if (solib->lib_handle == NULL) 560 return -1; 561 } 562 563 } 564 return 0; 565 } 566 #endif 567 568 /* 569 * Parse the coremask given as argument (hexadecimal string) and fill 570 * the global configuration (core role and core count) with the parsed 571 * value. 572 */ 573 static int xdigit2val(unsigned char c) 574 { 575 int val; 576 577 if (isdigit(c)) 578 val = c - '0'; 579 else if (isupper(c)) 580 val = c - 'A' + 10; 581 else 582 val = c - 'a' + 10; 583 return val; 584 } 585 586 static int 587 eal_parse_service_coremask(const char *coremask) 588 { 589 struct rte_config *cfg = rte_eal_get_configuration(); 590 int i, j, idx = 0; 591 unsigned int count = 0; 592 char c; 593 int val; 594 uint32_t taken_lcore_count = 0; 595 596 if (coremask == NULL) 597 return -1; 598 /* Remove all blank characters ahead and after . 599 * Remove 0x/0X if exists. 600 */ 601 while (isblank(*coremask)) 602 coremask++; 603 if (coremask[0] == '0' && ((coremask[1] == 'x') 604 || (coremask[1] == 'X'))) 605 coremask += 2; 606 i = strlen(coremask); 607 while ((i > 0) && isblank(coremask[i - 1])) 608 i--; 609 610 if (i == 0) 611 return -1; 612 613 for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) { 614 c = coremask[i]; 615 if (isxdigit(c) == 0) { 616 /* invalid characters */ 617 return -1; 618 } 619 val = xdigit2val(c); 620 for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; 621 j++, idx++) { 622 if ((1 << j) & val) { 623 /* handle main lcore already parsed */ 624 uint32_t lcore = idx; 625 if (main_lcore_parsed && 626 cfg->main_lcore == lcore) { 627 RTE_LOG(ERR, EAL, 628 "lcore %u is main lcore, cannot use as service core\n", 629 idx); 630 return -1; 631 } 632 633 if (eal_cpu_detected(idx) == 0) { 634 RTE_LOG(ERR, EAL, 635 "lcore %u unavailable\n", idx); 636 return -1; 637 } 638 639 if (cfg->lcore_role[idx] == ROLE_RTE) 640 taken_lcore_count++; 641 642 lcore_config[idx].core_role = ROLE_SERVICE; 643 count++; 644 } 645 } 646 } 647 648 for (; i >= 0; i--) 649 if (coremask[i] != '0') 650 return -1; 651 652 for (; idx < RTE_MAX_LCORE; idx++) 653 lcore_config[idx].core_index = -1; 654 655 if (count == 0) 656 return -1; 657 658 if (core_parsed && taken_lcore_count != count) { 659 RTE_LOG(WARNING, EAL, 660 "Not all service cores are in the coremask. " 661 "Please ensure -c or -l includes service cores\n"); 662 } 663 664 cfg->service_lcore_count = count; 665 return 0; 666 } 667 668 static int 669 eal_service_cores_parsed(void) 670 { 671 int idx; 672 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 673 if (lcore_config[idx].core_role == ROLE_SERVICE) 674 return 1; 675 } 676 return 0; 677 } 678 679 static int 680 update_lcore_config(int *cores) 681 { 682 struct rte_config *cfg = rte_eal_get_configuration(); 683 unsigned int count = 0; 684 unsigned int i; 685 int ret = 0; 686 687 for (i = 0; i < RTE_MAX_LCORE; i++) { 688 if (cores[i] != -1) { 689 if (eal_cpu_detected(i) == 0) { 690 RTE_LOG(ERR, EAL, "lcore %u unavailable\n", i); 691 ret = -1; 692 continue; 693 } 694 cfg->lcore_role[i] = ROLE_RTE; 695 count++; 696 } else { 697 cfg->lcore_role[i] = ROLE_OFF; 698 } 699 lcore_config[i].core_index = cores[i]; 700 } 701 if (!ret) 702 cfg->lcore_count = count; 703 return ret; 704 } 705 706 static int 707 check_core_list(int *lcores, unsigned int count) 708 { 709 char lcorestr[RTE_MAX_LCORE * 10]; 710 bool overflow = false; 711 int len = 0, ret; 712 unsigned int i; 713 714 for (i = 0; i < count; i++) { 715 if (lcores[i] < RTE_MAX_LCORE) 716 continue; 717 718 RTE_LOG(ERR, EAL, "lcore %d >= RTE_MAX_LCORE (%d)\n", 719 lcores[i], RTE_MAX_LCORE); 720 overflow = true; 721 } 722 if (!overflow) 723 return 0; 724 725 /* 726 * We've encountered a core that's greater than RTE_MAX_LCORE, 727 * suggest using --lcores option to map lcores onto physical cores 728 * greater than RTE_MAX_LCORE. 729 */ 730 for (i = 0; i < count; i++) { 731 ret = snprintf(&lcorestr[len], sizeof(lcorestr) - len, 732 "%d@%d,", i, lcores[i]); 733 if (ret > 0) 734 len = len + ret; 735 } 736 if (len > 0) 737 lcorestr[len - 1] = 0; 738 RTE_LOG(ERR, EAL, "To use high physical core ids, " 739 "please use --lcores to map them to lcore ids below RTE_MAX_LCORE, " 740 "e.g. --lcores %s\n", lcorestr); 741 return -1; 742 } 743 744 static int 745 eal_parse_coremask(const char *coremask, int *cores) 746 { 747 const char *coremask_orig = coremask; 748 int lcores[RTE_MAX_LCORE]; 749 unsigned int count = 0; 750 int i, j, idx; 751 int val; 752 char c; 753 754 for (idx = 0; idx < RTE_MAX_LCORE; idx++) 755 cores[idx] = -1; 756 idx = 0; 757 758 /* Remove all blank characters ahead and after . 759 * Remove 0x/0X if exists. 760 */ 761 while (isblank(*coremask)) 762 coremask++; 763 if (coremask[0] == '0' && ((coremask[1] == 'x') 764 || (coremask[1] == 'X'))) 765 coremask += 2; 766 i = strlen(coremask); 767 while ((i > 0) && isblank(coremask[i - 1])) 768 i--; 769 if (i == 0) { 770 RTE_LOG(ERR, EAL, "No lcores in coremask: [%s]\n", 771 coremask_orig); 772 return -1; 773 } 774 775 for (i = i - 1; i >= 0; i--) { 776 c = coremask[i]; 777 if (isxdigit(c) == 0) { 778 /* invalid characters */ 779 RTE_LOG(ERR, EAL, "invalid characters in coremask: [%s]\n", 780 coremask_orig); 781 return -1; 782 } 783 val = xdigit2val(c); 784 for (j = 0; j < BITS_PER_HEX; j++, idx++) 785 { 786 if ((1 << j) & val) { 787 if (count >= RTE_MAX_LCORE) { 788 RTE_LOG(ERR, EAL, "Too many lcores provided. Cannot exceed RTE_MAX_LCORE (%d)\n", 789 RTE_MAX_LCORE); 790 return -1; 791 } 792 lcores[count++] = idx; 793 } 794 } 795 } 796 if (count == 0) { 797 RTE_LOG(ERR, EAL, "No lcores in coremask: [%s]\n", 798 coremask_orig); 799 return -1; 800 } 801 802 if (check_core_list(lcores, count)) 803 return -1; 804 805 /* 806 * Now that we've got a list of cores no longer than RTE_MAX_LCORE, 807 * and no lcore in that list is greater than RTE_MAX_LCORE, populate 808 * the cores array. 809 */ 810 do { 811 count--; 812 cores[lcores[count]] = count; 813 } while (count != 0); 814 815 return 0; 816 } 817 818 static int 819 eal_parse_service_corelist(const char *corelist) 820 { 821 struct rte_config *cfg = rte_eal_get_configuration(); 822 int i; 823 unsigned count = 0; 824 char *end = NULL; 825 uint32_t min, max, idx; 826 uint32_t taken_lcore_count = 0; 827 828 if (corelist == NULL) 829 return -1; 830 831 /* Remove all blank characters ahead and after */ 832 while (isblank(*corelist)) 833 corelist++; 834 i = strlen(corelist); 835 while ((i > 0) && isblank(corelist[i - 1])) 836 i--; 837 838 /* Get list of cores */ 839 min = RTE_MAX_LCORE; 840 do { 841 while (isblank(*corelist)) 842 corelist++; 843 if (*corelist == '\0') 844 return -1; 845 errno = 0; 846 idx = strtoul(corelist, &end, 10); 847 if (errno || end == NULL) 848 return -1; 849 if (idx >= RTE_MAX_LCORE) 850 return -1; 851 while (isblank(*end)) 852 end++; 853 if (*end == '-') { 854 min = idx; 855 } else if ((*end == ',') || (*end == '\0')) { 856 max = idx; 857 if (min == RTE_MAX_LCORE) 858 min = idx; 859 for (idx = min; idx <= max; idx++) { 860 if (cfg->lcore_role[idx] != ROLE_SERVICE) { 861 /* handle main lcore already parsed */ 862 uint32_t lcore = idx; 863 if (cfg->main_lcore == lcore && 864 main_lcore_parsed) { 865 RTE_LOG(ERR, EAL, 866 "Error: lcore %u is main lcore, cannot use as service core\n", 867 idx); 868 return -1; 869 } 870 if (cfg->lcore_role[idx] == ROLE_RTE) 871 taken_lcore_count++; 872 873 lcore_config[idx].core_role = 874 ROLE_SERVICE; 875 count++; 876 } 877 } 878 min = RTE_MAX_LCORE; 879 } else 880 return -1; 881 corelist = end + 1; 882 } while (*end != '\0'); 883 884 if (count == 0) 885 return -1; 886 887 if (core_parsed && taken_lcore_count != count) { 888 RTE_LOG(WARNING, EAL, 889 "Not all service cores were in the coremask. " 890 "Please ensure -c or -l includes service cores\n"); 891 } 892 893 return 0; 894 } 895 896 static int 897 eal_parse_corelist(const char *corelist, int *cores) 898 { 899 unsigned int count = 0, i; 900 int lcores[RTE_MAX_LCORE]; 901 char *end = NULL; 902 int min, max; 903 int idx; 904 905 for (idx = 0; idx < RTE_MAX_LCORE; idx++) 906 cores[idx] = -1; 907 908 /* Remove all blank characters ahead */ 909 while (isblank(*corelist)) 910 corelist++; 911 912 /* Get list of cores */ 913 min = -1; 914 do { 915 while (isblank(*corelist)) 916 corelist++; 917 if (*corelist == '\0') 918 return -1; 919 errno = 0; 920 idx = strtol(corelist, &end, 10); 921 if (errno || end == NULL) 922 return -1; 923 if (idx < 0) 924 return -1; 925 while (isblank(*end)) 926 end++; 927 if (*end == '-') { 928 min = idx; 929 } else if ((*end == ',') || (*end == '\0')) { 930 max = idx; 931 if (min == -1) 932 min = idx; 933 for (idx = min; idx <= max; idx++) { 934 bool dup = false; 935 936 /* Check if this idx is already present */ 937 for (i = 0; i < count; i++) { 938 if (lcores[i] == idx) 939 dup = true; 940 } 941 if (dup) 942 continue; 943 if (count >= RTE_MAX_LCORE) { 944 RTE_LOG(ERR, EAL, "Too many lcores provided. Cannot exceed RTE_MAX_LCORE (%d)\n", 945 RTE_MAX_LCORE); 946 return -1; 947 } 948 lcores[count++] = idx; 949 } 950 min = -1; 951 } else 952 return -1; 953 corelist = end + 1; 954 } while (*end != '\0'); 955 956 if (count == 0) 957 return -1; 958 959 if (check_core_list(lcores, count)) 960 return -1; 961 962 /* 963 * Now that we've got a list of cores no longer than RTE_MAX_LCORE, 964 * and no lcore in that list is greater than RTE_MAX_LCORE, populate 965 * the cores array. 966 */ 967 do { 968 count--; 969 cores[lcores[count]] = count; 970 } while (count != 0); 971 972 return 0; 973 } 974 975 /* Changes the lcore id of the main thread */ 976 static int 977 eal_parse_main_lcore(const char *arg) 978 { 979 char *parsing_end; 980 struct rte_config *cfg = rte_eal_get_configuration(); 981 982 errno = 0; 983 cfg->main_lcore = (uint32_t) strtol(arg, &parsing_end, 0); 984 if (errno || parsing_end[0] != 0) 985 return -1; 986 if (cfg->main_lcore >= RTE_MAX_LCORE) 987 return -1; 988 main_lcore_parsed = 1; 989 990 /* ensure main core is not used as service core */ 991 if (lcore_config[cfg->main_lcore].core_role == ROLE_SERVICE) { 992 RTE_LOG(ERR, EAL, 993 "Error: Main lcore is used as a service core\n"); 994 return -1; 995 } 996 997 return 0; 998 } 999 1000 /* 1001 * Parse elem, the elem could be single number/range or '(' ')' group 1002 * 1) A single number elem, it's just a simple digit. e.g. 9 1003 * 2) A single range elem, two digits with a '-' between. e.g. 2-6 1004 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) 1005 * Within group elem, '-' used for a range separator; 1006 * ',' used for a single number. 1007 */ 1008 static int 1009 eal_parse_set(const char *input, rte_cpuset_t *set) 1010 { 1011 unsigned idx; 1012 const char *str = input; 1013 char *end = NULL; 1014 unsigned min, max; 1015 1016 CPU_ZERO(set); 1017 1018 while (isblank(*str)) 1019 str++; 1020 1021 /* only digit or left bracket is qualify for start point */ 1022 if ((!isdigit(*str) && *str != '(') || *str == '\0') 1023 return -1; 1024 1025 /* process single number or single range of number */ 1026 if (*str != '(') { 1027 errno = 0; 1028 idx = strtoul(str, &end, 10); 1029 if (errno || end == NULL || idx >= CPU_SETSIZE) 1030 return -1; 1031 else { 1032 while (isblank(*end)) 1033 end++; 1034 1035 min = idx; 1036 max = idx; 1037 if (*end == '-') { 1038 /* process single <number>-<number> */ 1039 end++; 1040 while (isblank(*end)) 1041 end++; 1042 if (!isdigit(*end)) 1043 return -1; 1044 1045 errno = 0; 1046 idx = strtoul(end, &end, 10); 1047 if (errno || end == NULL || idx >= CPU_SETSIZE) 1048 return -1; 1049 max = idx; 1050 while (isblank(*end)) 1051 end++; 1052 if (*end != ',' && *end != '\0') 1053 return -1; 1054 } 1055 1056 if (*end != ',' && *end != '\0' && 1057 *end != '@') 1058 return -1; 1059 1060 for (idx = RTE_MIN(min, max); 1061 idx <= RTE_MAX(min, max); idx++) 1062 CPU_SET(idx, set); 1063 1064 return end - input; 1065 } 1066 } 1067 1068 /* process set within bracket */ 1069 str++; 1070 while (isblank(*str)) 1071 str++; 1072 if (*str == '\0') 1073 return -1; 1074 1075 min = RTE_MAX_LCORE; 1076 do { 1077 1078 /* go ahead to the first digit */ 1079 while (isblank(*str)) 1080 str++; 1081 if (!isdigit(*str)) 1082 return -1; 1083 1084 /* get the digit value */ 1085 errno = 0; 1086 idx = strtoul(str, &end, 10); 1087 if (errno || end == NULL || idx >= CPU_SETSIZE) 1088 return -1; 1089 1090 /* go ahead to separator '-',',' and ')' */ 1091 while (isblank(*end)) 1092 end++; 1093 if (*end == '-') { 1094 if (min == RTE_MAX_LCORE) 1095 min = idx; 1096 else /* avoid continuous '-' */ 1097 return -1; 1098 } else if ((*end == ',') || (*end == ')')) { 1099 max = idx; 1100 if (min == RTE_MAX_LCORE) 1101 min = idx; 1102 for (idx = RTE_MIN(min, max); 1103 idx <= RTE_MAX(min, max); idx++) 1104 CPU_SET(idx, set); 1105 1106 min = RTE_MAX_LCORE; 1107 } else 1108 return -1; 1109 1110 str = end + 1; 1111 } while (*end != '\0' && *end != ')'); 1112 1113 /* 1114 * to avoid failure that tail blank makes end character check fail 1115 * in eal_parse_lcores( ) 1116 */ 1117 while (isblank(*str)) 1118 str++; 1119 1120 return str - input; 1121 } 1122 1123 static int 1124 check_cpuset(rte_cpuset_t *set) 1125 { 1126 unsigned int idx; 1127 1128 for (idx = 0; idx < CPU_SETSIZE; idx++) { 1129 if (!CPU_ISSET(idx, set)) 1130 continue; 1131 1132 if (eal_cpu_detected(idx) == 0) { 1133 RTE_LOG(ERR, EAL, "core %u " 1134 "unavailable\n", idx); 1135 return -1; 1136 } 1137 } 1138 return 0; 1139 } 1140 1141 /* 1142 * The format pattern: --lcores='<lcores[@cpus]>[<,lcores[@cpus]>...]' 1143 * lcores, cpus could be a single digit/range or a group. 1144 * '(' and ')' are necessary if it's a group. 1145 * If not supply '@cpus', the value of cpus uses the same as lcores. 1146 * e.g. '1,2@(5-7),(3-5)@(0,2),(0,6),7-8' means start 9 EAL thread as below 1147 * lcore 0 runs on cpuset 0x41 (cpu 0,6) 1148 * lcore 1 runs on cpuset 0x2 (cpu 1) 1149 * lcore 2 runs on cpuset 0xe0 (cpu 5,6,7) 1150 * lcore 3,4,5 runs on cpuset 0x5 (cpu 0,2) 1151 * lcore 6 runs on cpuset 0x41 (cpu 0,6) 1152 * lcore 7 runs on cpuset 0x80 (cpu 7) 1153 * lcore 8 runs on cpuset 0x100 (cpu 8) 1154 */ 1155 static int 1156 eal_parse_lcores(const char *lcores) 1157 { 1158 struct rte_config *cfg = rte_eal_get_configuration(); 1159 rte_cpuset_t lcore_set; 1160 unsigned int set_count; 1161 unsigned idx = 0; 1162 unsigned count = 0; 1163 const char *lcore_start = NULL; 1164 const char *end = NULL; 1165 int offset; 1166 rte_cpuset_t cpuset; 1167 int lflags; 1168 int ret = -1; 1169 1170 if (lcores == NULL) 1171 return -1; 1172 1173 /* Remove all blank characters ahead and after */ 1174 while (isblank(*lcores)) 1175 lcores++; 1176 1177 CPU_ZERO(&cpuset); 1178 1179 /* Reset lcore config */ 1180 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1181 cfg->lcore_role[idx] = ROLE_OFF; 1182 lcore_config[idx].core_index = -1; 1183 CPU_ZERO(&lcore_config[idx].cpuset); 1184 } 1185 1186 /* Get list of cores */ 1187 do { 1188 while (isblank(*lcores)) 1189 lcores++; 1190 if (*lcores == '\0') 1191 goto err; 1192 1193 lflags = 0; 1194 1195 /* record lcore_set start point */ 1196 lcore_start = lcores; 1197 1198 /* go across a complete bracket */ 1199 if (*lcore_start == '(') { 1200 lcores += strcspn(lcores, ")"); 1201 if (*lcores++ == '\0') 1202 goto err; 1203 } 1204 1205 /* scan the separator '@', ','(next) or '\0'(finish) */ 1206 lcores += strcspn(lcores, "@,"); 1207 1208 if (*lcores == '@') { 1209 /* explicit assign cpuset and update the end cursor */ 1210 offset = eal_parse_set(lcores + 1, &cpuset); 1211 if (offset < 0) 1212 goto err; 1213 end = lcores + 1 + offset; 1214 } else { /* ',' or '\0' */ 1215 /* haven't given cpuset, current loop done */ 1216 end = lcores; 1217 1218 /* go back to check <number>-<number> */ 1219 offset = strcspn(lcore_start, "(-"); 1220 if (offset < (end - lcore_start) && 1221 *(lcore_start + offset) != '(') 1222 lflags = 1; 1223 } 1224 1225 if (*end != ',' && *end != '\0') 1226 goto err; 1227 1228 /* parse lcore_set from start point */ 1229 if (eal_parse_set(lcore_start, &lcore_set) < 0) 1230 goto err; 1231 1232 /* without '@', by default using lcore_set as cpuset */ 1233 if (*lcores != '@') 1234 rte_memcpy(&cpuset, &lcore_set, sizeof(cpuset)); 1235 1236 set_count = CPU_COUNT(&lcore_set); 1237 /* start to update lcore_set */ 1238 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1239 if (!CPU_ISSET(idx, &lcore_set)) 1240 continue; 1241 set_count--; 1242 1243 if (cfg->lcore_role[idx] != ROLE_RTE) { 1244 lcore_config[idx].core_index = count; 1245 cfg->lcore_role[idx] = ROLE_RTE; 1246 count++; 1247 } 1248 1249 if (lflags) { 1250 CPU_ZERO(&cpuset); 1251 CPU_SET(idx, &cpuset); 1252 } 1253 1254 if (check_cpuset(&cpuset) < 0) 1255 goto err; 1256 rte_memcpy(&lcore_config[idx].cpuset, &cpuset, 1257 sizeof(rte_cpuset_t)); 1258 } 1259 1260 /* some cores from the lcore_set can't be handled by EAL */ 1261 if (set_count != 0) 1262 goto err; 1263 1264 lcores = end + 1; 1265 } while (*end != '\0'); 1266 1267 if (count == 0) 1268 goto err; 1269 1270 cfg->lcore_count = count; 1271 ret = 0; 1272 1273 err: 1274 1275 return ret; 1276 } 1277 1278 #ifndef RTE_EXEC_ENV_WINDOWS 1279 static int 1280 eal_parse_syslog(const char *facility, struct internal_config *conf) 1281 { 1282 int i; 1283 static const struct { 1284 const char *name; 1285 int value; 1286 } map[] = { 1287 { "auth", LOG_AUTH }, 1288 { "cron", LOG_CRON }, 1289 { "daemon", LOG_DAEMON }, 1290 { "ftp", LOG_FTP }, 1291 { "kern", LOG_KERN }, 1292 { "lpr", LOG_LPR }, 1293 { "mail", LOG_MAIL }, 1294 { "news", LOG_NEWS }, 1295 { "syslog", LOG_SYSLOG }, 1296 { "user", LOG_USER }, 1297 { "uucp", LOG_UUCP }, 1298 { "local0", LOG_LOCAL0 }, 1299 { "local1", LOG_LOCAL1 }, 1300 { "local2", LOG_LOCAL2 }, 1301 { "local3", LOG_LOCAL3 }, 1302 { "local4", LOG_LOCAL4 }, 1303 { "local5", LOG_LOCAL5 }, 1304 { "local6", LOG_LOCAL6 }, 1305 { "local7", LOG_LOCAL7 }, 1306 { NULL, 0 } 1307 }; 1308 1309 for (i = 0; map[i].name; i++) { 1310 if (!strcmp(facility, map[i].name)) { 1311 conf->syslog_facility = map[i].value; 1312 return 0; 1313 } 1314 } 1315 return -1; 1316 } 1317 #endif 1318 1319 static void 1320 eal_log_usage(void) 1321 { 1322 unsigned int level; 1323 1324 printf("Log type is a pattern matching items of this list" 1325 " (plugins may be missing):\n"); 1326 rte_log_list_types(stdout, "\t"); 1327 printf("\n"); 1328 printf("Syntax using globbing pattern: "); 1329 printf("--"OPT_LOG_LEVEL" pattern:level\n"); 1330 printf("Syntax using regular expression: "); 1331 printf("--"OPT_LOG_LEVEL" regexp,level\n"); 1332 printf("Syntax for the global level: "); 1333 printf("--"OPT_LOG_LEVEL" level\n"); 1334 printf("Logs are emitted if allowed by both global and specific levels.\n"); 1335 printf("\n"); 1336 printf("Log level can be a number or the first letters of its name:\n"); 1337 for (level = 1; level <= RTE_LOG_MAX; level++) 1338 printf("\t%d %s\n", level, eal_log_level2str(level)); 1339 } 1340 1341 static int 1342 eal_parse_log_priority(const char *level) 1343 { 1344 size_t len = strlen(level); 1345 unsigned long tmp; 1346 char *end; 1347 unsigned int i; 1348 1349 if (len == 0) 1350 return -1; 1351 1352 /* look for named values, skip 0 which is not a valid level */ 1353 for (i = 1; i <= RTE_LOG_MAX; i++) { 1354 if (strncmp(eal_log_level2str(i), level, len) == 0) 1355 return i; 1356 } 1357 1358 /* not a string, maybe it is numeric */ 1359 errno = 0; 1360 tmp = strtoul(level, &end, 0); 1361 1362 /* check for errors */ 1363 if (errno != 0 || end == NULL || *end != '\0' || 1364 tmp >= UINT32_MAX) 1365 return -1; 1366 1367 return tmp; 1368 } 1369 1370 static int 1371 eal_parse_log_level(const char *arg) 1372 { 1373 const char *pattern = NULL; 1374 const char *regex = NULL; 1375 char *str, *level; 1376 int priority; 1377 1378 if (strcmp(arg, "help") == 0) { 1379 eal_log_usage(); 1380 exit(EXIT_SUCCESS); 1381 } 1382 1383 str = strdup(arg); 1384 if (str == NULL) 1385 return -1; 1386 1387 if ((level = strchr(str, ','))) { 1388 regex = str; 1389 *level++ = '\0'; 1390 } else if ((level = strchr(str, ':'))) { 1391 pattern = str; 1392 *level++ = '\0'; 1393 } else { 1394 level = str; 1395 } 1396 1397 priority = eal_parse_log_priority(level); 1398 if (priority <= 0) { 1399 fprintf(stderr, "Invalid log level: %s\n", level); 1400 goto fail; 1401 } 1402 if (priority > (int)RTE_LOG_MAX) { 1403 fprintf(stderr, "Log level %d higher than maximum (%d)\n", 1404 priority, RTE_LOG_MAX); 1405 priority = RTE_LOG_MAX; 1406 } 1407 1408 if (regex) { 1409 if (rte_log_set_level_regexp(regex, priority) < 0) { 1410 fprintf(stderr, "cannot set log level %s,%d\n", 1411 regex, priority); 1412 goto fail; 1413 } 1414 if (eal_log_save_regexp(regex, priority) < 0) 1415 goto fail; 1416 } else if (pattern) { 1417 if (rte_log_set_level_pattern(pattern, priority) < 0) { 1418 fprintf(stderr, "cannot set log level %s:%d\n", 1419 pattern, priority); 1420 goto fail; 1421 } 1422 if (eal_log_save_pattern(pattern, priority) < 0) 1423 goto fail; 1424 } else { 1425 rte_log_set_global_level(priority); 1426 } 1427 1428 free(str); 1429 return 0; 1430 1431 fail: 1432 free(str); 1433 return -1; 1434 } 1435 1436 static enum rte_proc_type_t 1437 eal_parse_proc_type(const char *arg) 1438 { 1439 if (strncasecmp(arg, "primary", sizeof("primary")) == 0) 1440 return RTE_PROC_PRIMARY; 1441 if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0) 1442 return RTE_PROC_SECONDARY; 1443 if (strncasecmp(arg, "auto", sizeof("auto")) == 0) 1444 return RTE_PROC_AUTO; 1445 1446 return RTE_PROC_INVALID; 1447 } 1448 1449 static int 1450 eal_parse_iova_mode(const char *name) 1451 { 1452 int mode; 1453 struct internal_config *internal_conf = 1454 eal_get_internal_configuration(); 1455 1456 if (name == NULL) 1457 return -1; 1458 1459 if (!strcmp("pa", name)) 1460 mode = RTE_IOVA_PA; 1461 else if (!strcmp("va", name)) 1462 mode = RTE_IOVA_VA; 1463 else 1464 return -1; 1465 1466 internal_conf->iova_mode = mode; 1467 return 0; 1468 } 1469 1470 static int 1471 eal_parse_simd_bitwidth(const char *arg) 1472 { 1473 char *end; 1474 unsigned long bitwidth; 1475 int ret; 1476 struct internal_config *internal_conf = 1477 eal_get_internal_configuration(); 1478 1479 if (arg == NULL || arg[0] == '\0') 1480 return -1; 1481 1482 errno = 0; 1483 bitwidth = strtoul(arg, &end, 0); 1484 1485 /* check for errors */ 1486 if (errno != 0 || end == NULL || *end != '\0' || bitwidth > RTE_VECT_SIMD_MAX) 1487 return -1; 1488 1489 if (bitwidth == 0) 1490 bitwidth = (unsigned long) RTE_VECT_SIMD_MAX; 1491 ret = rte_vect_set_max_simd_bitwidth(bitwidth); 1492 if (ret < 0) 1493 return -1; 1494 internal_conf->max_simd_bitwidth.forced = 1; 1495 return 0; 1496 } 1497 1498 static int 1499 eal_parse_base_virtaddr(const char *arg) 1500 { 1501 char *end; 1502 uint64_t addr; 1503 struct internal_config *internal_conf = 1504 eal_get_internal_configuration(); 1505 1506 errno = 0; 1507 addr = strtoull(arg, &end, 16); 1508 1509 /* check for errors */ 1510 if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0')) 1511 return -1; 1512 1513 /* make sure we don't exceed 32-bit boundary on 32-bit target */ 1514 #ifndef RTE_ARCH_64 1515 if (addr >= UINTPTR_MAX) 1516 return -1; 1517 #endif 1518 1519 /* align the addr on 16M boundary, 16MB is the minimum huge page 1520 * size on IBM Power architecture. If the addr is aligned to 16MB, 1521 * it can align to 2MB for x86. So this alignment can also be used 1522 * on x86 and other architectures. 1523 */ 1524 internal_conf->base_virtaddr = 1525 RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M); 1526 1527 return 0; 1528 } 1529 1530 /* caller is responsible for freeing the returned string */ 1531 static char * 1532 available_cores(void) 1533 { 1534 char *str = NULL; 1535 int previous; 1536 int sequence; 1537 char *tmp; 1538 int idx; 1539 1540 /* find the first available cpu */ 1541 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1542 if (eal_cpu_detected(idx) == 0) 1543 continue; 1544 break; 1545 } 1546 if (idx >= RTE_MAX_LCORE) 1547 return NULL; 1548 1549 /* first sequence */ 1550 if (asprintf(&str, "%d", idx) < 0) 1551 return NULL; 1552 previous = idx; 1553 sequence = 0; 1554 1555 for (idx++ ; idx < RTE_MAX_LCORE; idx++) { 1556 if (eal_cpu_detected(idx) == 0) 1557 continue; 1558 1559 if (idx == previous + 1) { 1560 previous = idx; 1561 sequence = 1; 1562 continue; 1563 } 1564 1565 /* finish current sequence */ 1566 if (sequence) { 1567 if (asprintf(&tmp, "%s-%d", str, previous) < 0) { 1568 free(str); 1569 return NULL; 1570 } 1571 free(str); 1572 str = tmp; 1573 } 1574 1575 /* new sequence */ 1576 if (asprintf(&tmp, "%s,%d", str, idx) < 0) { 1577 free(str); 1578 return NULL; 1579 } 1580 free(str); 1581 str = tmp; 1582 previous = idx; 1583 sequence = 0; 1584 } 1585 1586 /* finish last sequence */ 1587 if (sequence) { 1588 if (asprintf(&tmp, "%s-%d", str, previous) < 0) { 1589 free(str); 1590 return NULL; 1591 } 1592 free(str); 1593 str = tmp; 1594 } 1595 1596 return str; 1597 } 1598 1599 int 1600 eal_parse_common_option(int opt, const char *optarg, 1601 struct internal_config *conf) 1602 { 1603 static int b_used; 1604 static int a_used; 1605 1606 switch (opt) { 1607 case 'b': 1608 if (a_used) 1609 goto ba_conflict; 1610 if (eal_option_device_add(RTE_DEVTYPE_BLOCKED, optarg) < 0) 1611 return -1; 1612 b_used = 1; 1613 break; 1614 1615 case 'a': 1616 if (b_used) 1617 goto ba_conflict; 1618 if (eal_option_device_add(RTE_DEVTYPE_ALLOWED, optarg) < 0) 1619 return -1; 1620 a_used = 1; 1621 break; 1622 /* coremask */ 1623 case 'c': { 1624 int lcore_indexes[RTE_MAX_LCORE]; 1625 1626 if (eal_service_cores_parsed()) 1627 RTE_LOG(WARNING, EAL, 1628 "Service cores parsed before dataplane cores. Please ensure -c is before -s or -S\n"); 1629 if (eal_parse_coremask(optarg, lcore_indexes) < 0) { 1630 RTE_LOG(ERR, EAL, "invalid coremask syntax\n"); 1631 return -1; 1632 } 1633 if (update_lcore_config(lcore_indexes) < 0) { 1634 char *available = available_cores(); 1635 1636 RTE_LOG(ERR, EAL, 1637 "invalid coremask, please check specified cores are part of %s\n", 1638 available); 1639 free(available); 1640 return -1; 1641 } 1642 1643 if (core_parsed) { 1644 RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n", 1645 (core_parsed == LCORE_OPT_LST) ? "-l" : 1646 (core_parsed == LCORE_OPT_MAP) ? "--lcore" : 1647 "-c"); 1648 return -1; 1649 } 1650 1651 core_parsed = LCORE_OPT_MSK; 1652 break; 1653 } 1654 /* corelist */ 1655 case 'l': { 1656 int lcore_indexes[RTE_MAX_LCORE]; 1657 1658 if (eal_service_cores_parsed()) 1659 RTE_LOG(WARNING, EAL, 1660 "Service cores parsed before dataplane cores. Please ensure -l is before -s or -S\n"); 1661 1662 if (eal_parse_corelist(optarg, lcore_indexes) < 0) { 1663 RTE_LOG(ERR, EAL, "invalid core list syntax\n"); 1664 return -1; 1665 } 1666 if (update_lcore_config(lcore_indexes) < 0) { 1667 char *available = available_cores(); 1668 1669 RTE_LOG(ERR, EAL, 1670 "invalid core list, please check specified cores are part of %s\n", 1671 available); 1672 free(available); 1673 return -1; 1674 } 1675 1676 if (core_parsed) { 1677 RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n", 1678 (core_parsed == LCORE_OPT_MSK) ? "-c" : 1679 (core_parsed == LCORE_OPT_MAP) ? "--lcore" : 1680 "-l"); 1681 return -1; 1682 } 1683 1684 core_parsed = LCORE_OPT_LST; 1685 break; 1686 } 1687 /* service coremask */ 1688 case 's': 1689 if (eal_parse_service_coremask(optarg) < 0) { 1690 RTE_LOG(ERR, EAL, "invalid service coremask\n"); 1691 return -1; 1692 } 1693 break; 1694 /* service corelist */ 1695 case 'S': 1696 if (eal_parse_service_corelist(optarg) < 0) { 1697 RTE_LOG(ERR, EAL, "invalid service core list\n"); 1698 return -1; 1699 } 1700 break; 1701 /* size of memory */ 1702 case 'm': 1703 conf->memory = atoi(optarg); 1704 conf->memory *= 1024ULL; 1705 conf->memory *= 1024ULL; 1706 mem_parsed = 1; 1707 break; 1708 /* force number of channels */ 1709 case 'n': 1710 conf->force_nchannel = atoi(optarg); 1711 if (conf->force_nchannel == 0) { 1712 RTE_LOG(ERR, EAL, "invalid channel number\n"); 1713 return -1; 1714 } 1715 break; 1716 /* force number of ranks */ 1717 case 'r': 1718 conf->force_nrank = atoi(optarg); 1719 if (conf->force_nrank == 0 || 1720 conf->force_nrank > 16) { 1721 RTE_LOG(ERR, EAL, "invalid rank number\n"); 1722 return -1; 1723 } 1724 break; 1725 /* force loading of external driver */ 1726 case 'd': 1727 if (eal_plugin_add(optarg) == -1) 1728 return -1; 1729 break; 1730 case 'v': 1731 /* since message is explicitly requested by user, we 1732 * write message at highest log level so it can always 1733 * be seen 1734 * even if info or warning messages are disabled */ 1735 RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version()); 1736 break; 1737 1738 /* long options */ 1739 case OPT_HUGE_UNLINK_NUM: 1740 conf->hugepage_unlink = 1; 1741 break; 1742 1743 case OPT_NO_HUGE_NUM: 1744 conf->no_hugetlbfs = 1; 1745 /* no-huge is legacy mem */ 1746 conf->legacy_mem = 1; 1747 break; 1748 1749 case OPT_NO_PCI_NUM: 1750 conf->no_pci = 1; 1751 break; 1752 1753 case OPT_NO_HPET_NUM: 1754 conf->no_hpet = 1; 1755 break; 1756 1757 case OPT_VMWARE_TSC_MAP_NUM: 1758 conf->vmware_tsc_map = 1; 1759 break; 1760 1761 case OPT_NO_SHCONF_NUM: 1762 conf->no_shconf = 1; 1763 break; 1764 1765 case OPT_IN_MEMORY_NUM: 1766 conf->in_memory = 1; 1767 /* in-memory is a superset of noshconf and huge-unlink */ 1768 conf->no_shconf = 1; 1769 conf->hugepage_unlink = 1; 1770 break; 1771 1772 case OPT_PROC_TYPE_NUM: 1773 conf->process_type = eal_parse_proc_type(optarg); 1774 break; 1775 1776 case OPT_MAIN_LCORE_NUM: 1777 if (eal_parse_main_lcore(optarg) < 0) { 1778 RTE_LOG(ERR, EAL, "invalid parameter for --" 1779 OPT_MAIN_LCORE "\n"); 1780 return -1; 1781 } 1782 break; 1783 1784 case OPT_VDEV_NUM: 1785 if (eal_option_device_add(RTE_DEVTYPE_VIRTUAL, 1786 optarg) < 0) { 1787 return -1; 1788 } 1789 break; 1790 1791 #ifndef RTE_EXEC_ENV_WINDOWS 1792 case OPT_SYSLOG_NUM: 1793 if (eal_parse_syslog(optarg, conf) < 0) { 1794 RTE_LOG(ERR, EAL, "invalid parameters for --" 1795 OPT_SYSLOG "\n"); 1796 return -1; 1797 } 1798 break; 1799 #endif 1800 1801 case OPT_LOG_LEVEL_NUM: { 1802 if (eal_parse_log_level(optarg) < 0) { 1803 RTE_LOG(ERR, EAL, 1804 "invalid parameters for --" 1805 OPT_LOG_LEVEL "\n"); 1806 return -1; 1807 } 1808 break; 1809 } 1810 1811 #ifndef RTE_EXEC_ENV_WINDOWS 1812 case OPT_TRACE_NUM: { 1813 if (eal_trace_args_save(optarg) < 0) { 1814 RTE_LOG(ERR, EAL, "invalid parameters for --" 1815 OPT_TRACE "\n"); 1816 return -1; 1817 } 1818 break; 1819 } 1820 1821 case OPT_TRACE_DIR_NUM: { 1822 if (eal_trace_dir_args_save(optarg) < 0) { 1823 RTE_LOG(ERR, EAL, "invalid parameters for --" 1824 OPT_TRACE_DIR "\n"); 1825 return -1; 1826 } 1827 break; 1828 } 1829 1830 case OPT_TRACE_BUF_SIZE_NUM: { 1831 if (eal_trace_bufsz_args_save(optarg) < 0) { 1832 RTE_LOG(ERR, EAL, "invalid parameters for --" 1833 OPT_TRACE_BUF_SIZE "\n"); 1834 return -1; 1835 } 1836 break; 1837 } 1838 1839 case OPT_TRACE_MODE_NUM: { 1840 if (eal_trace_mode_args_save(optarg) < 0) { 1841 RTE_LOG(ERR, EAL, "invalid parameters for --" 1842 OPT_TRACE_MODE "\n"); 1843 return -1; 1844 } 1845 break; 1846 } 1847 #endif /* !RTE_EXEC_ENV_WINDOWS */ 1848 1849 case OPT_LCORES_NUM: 1850 if (eal_parse_lcores(optarg) < 0) { 1851 RTE_LOG(ERR, EAL, "invalid parameter for --" 1852 OPT_LCORES "\n"); 1853 return -1; 1854 } 1855 1856 if (core_parsed) { 1857 RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n", 1858 (core_parsed == LCORE_OPT_LST) ? "-l" : 1859 (core_parsed == LCORE_OPT_MSK) ? "-c" : 1860 "--lcore"); 1861 return -1; 1862 } 1863 1864 core_parsed = LCORE_OPT_MAP; 1865 break; 1866 case OPT_LEGACY_MEM_NUM: 1867 conf->legacy_mem = 1; 1868 break; 1869 case OPT_SINGLE_FILE_SEGMENTS_NUM: 1870 conf->single_file_segments = 1; 1871 break; 1872 case OPT_IOVA_MODE_NUM: 1873 if (eal_parse_iova_mode(optarg) < 0) { 1874 RTE_LOG(ERR, EAL, "invalid parameters for --" 1875 OPT_IOVA_MODE "\n"); 1876 return -1; 1877 } 1878 break; 1879 case OPT_BASE_VIRTADDR_NUM: 1880 if (eal_parse_base_virtaddr(optarg) < 0) { 1881 RTE_LOG(ERR, EAL, "invalid parameter for --" 1882 OPT_BASE_VIRTADDR "\n"); 1883 return -1; 1884 } 1885 break; 1886 case OPT_TELEMETRY_NUM: 1887 break; 1888 case OPT_NO_TELEMETRY_NUM: 1889 conf->no_telemetry = 1; 1890 break; 1891 case OPT_FORCE_MAX_SIMD_BITWIDTH_NUM: 1892 if (eal_parse_simd_bitwidth(optarg) < 0) { 1893 RTE_LOG(ERR, EAL, "invalid parameter for --" 1894 OPT_FORCE_MAX_SIMD_BITWIDTH "\n"); 1895 return -1; 1896 } 1897 break; 1898 1899 /* don't know what to do, leave this to caller */ 1900 default: 1901 return 1; 1902 1903 } 1904 1905 return 0; 1906 1907 ba_conflict: 1908 RTE_LOG(ERR, EAL, 1909 "Options allow (-a) and block (-b) can't be used at the same time\n"); 1910 return -1; 1911 } 1912 1913 static void 1914 eal_auto_detect_cores(struct rte_config *cfg) 1915 { 1916 unsigned int lcore_id; 1917 unsigned int removed = 0; 1918 rte_cpuset_t affinity_set; 1919 1920 if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), 1921 &affinity_set)) 1922 CPU_ZERO(&affinity_set); 1923 1924 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1925 if (cfg->lcore_role[lcore_id] == ROLE_RTE && 1926 !CPU_ISSET(lcore_id, &affinity_set)) { 1927 cfg->lcore_role[lcore_id] = ROLE_OFF; 1928 removed++; 1929 } 1930 } 1931 1932 cfg->lcore_count -= removed; 1933 } 1934 1935 static void 1936 compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) 1937 { 1938 rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset; 1939 rte_cpuset_t default_set; 1940 unsigned int lcore_id; 1941 1942 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1943 if (rte_lcore_has_role(lcore_id, ROLE_OFF)) 1944 continue; 1945 RTE_CPU_OR(cpuset, cpuset, &lcore_config[lcore_id].cpuset); 1946 } 1947 RTE_CPU_NOT(cpuset, cpuset); 1948 1949 if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), 1950 &default_set)) 1951 CPU_ZERO(&default_set); 1952 1953 RTE_CPU_AND(cpuset, cpuset, &default_set); 1954 1955 /* if no remaining cpu, use main lcore cpu affinity */ 1956 if (!CPU_COUNT(cpuset)) { 1957 memcpy(cpuset, &lcore_config[rte_get_main_lcore()].cpuset, 1958 sizeof(*cpuset)); 1959 } 1960 } 1961 1962 int 1963 eal_cleanup_config(struct internal_config *internal_cfg) 1964 { 1965 if (internal_cfg->hugefile_prefix != NULL) 1966 free(internal_cfg->hugefile_prefix); 1967 if (internal_cfg->hugepage_dir != NULL) 1968 free(internal_cfg->hugepage_dir); 1969 if (internal_cfg->user_mbuf_pool_ops_name != NULL) 1970 free(internal_cfg->user_mbuf_pool_ops_name); 1971 1972 return 0; 1973 } 1974 1975 int 1976 eal_adjust_config(struct internal_config *internal_cfg) 1977 { 1978 int i; 1979 struct rte_config *cfg = rte_eal_get_configuration(); 1980 struct internal_config *internal_conf = 1981 eal_get_internal_configuration(); 1982 1983 if (!core_parsed) 1984 eal_auto_detect_cores(cfg); 1985 1986 if (internal_conf->process_type == RTE_PROC_AUTO) 1987 internal_conf->process_type = eal_proc_type_detect(); 1988 1989 /* default main lcore is the first one */ 1990 if (!main_lcore_parsed) { 1991 cfg->main_lcore = rte_get_next_lcore(-1, 0, 0); 1992 if (cfg->main_lcore >= RTE_MAX_LCORE) 1993 return -1; 1994 lcore_config[cfg->main_lcore].core_role = ROLE_RTE; 1995 } 1996 1997 compute_ctrl_threads_cpuset(internal_cfg); 1998 1999 /* if no memory amounts were requested, this will result in 0 and 2000 * will be overridden later, right after eal_hugepage_info_init() */ 2001 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 2002 internal_cfg->memory += internal_cfg->socket_mem[i]; 2003 2004 return 0; 2005 } 2006 2007 int 2008 eal_check_common_options(struct internal_config *internal_cfg) 2009 { 2010 struct rte_config *cfg = rte_eal_get_configuration(); 2011 const struct internal_config *internal_conf = 2012 eal_get_internal_configuration(); 2013 2014 if (cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) { 2015 RTE_LOG(ERR, EAL, "Main lcore is not enabled for DPDK\n"); 2016 return -1; 2017 } 2018 2019 if (internal_cfg->process_type == RTE_PROC_INVALID) { 2020 RTE_LOG(ERR, EAL, "Invalid process type specified\n"); 2021 return -1; 2022 } 2023 if (internal_cfg->hugefile_prefix != NULL && 2024 strlen(internal_cfg->hugefile_prefix) < 1) { 2025 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n"); 2026 return -1; 2027 } 2028 if (internal_cfg->hugepage_dir != NULL && 2029 strlen(internal_cfg->hugepage_dir) < 1) { 2030 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n"); 2031 return -1; 2032 } 2033 if (internal_cfg->user_mbuf_pool_ops_name != NULL && 2034 strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) { 2035 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n"); 2036 return -1; 2037 } 2038 if (strchr(eal_get_hugefile_prefix(), '%') != NULL) { 2039 RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" " 2040 "option\n"); 2041 return -1; 2042 } 2043 if (mem_parsed && internal_cfg->force_sockets == 1) { 2044 RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot " 2045 "be specified at the same time\n"); 2046 return -1; 2047 } 2048 if (internal_cfg->no_hugetlbfs && internal_cfg->force_sockets == 1) { 2049 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_MEM" cannot " 2050 "be specified together with --"OPT_NO_HUGE"\n"); 2051 return -1; 2052 } 2053 if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink && 2054 !internal_cfg->in_memory) { 2055 RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot " 2056 "be specified together with --"OPT_NO_HUGE"\n"); 2057 return -1; 2058 } 2059 if (internal_conf->force_socket_limits && internal_conf->legacy_mem) { 2060 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT 2061 " is only supported in non-legacy memory mode\n"); 2062 } 2063 if (internal_cfg->single_file_segments && 2064 internal_cfg->hugepage_unlink && 2065 !internal_cfg->in_memory) { 2066 RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is " 2067 "not compatible with --"OPT_HUGE_UNLINK"\n"); 2068 return -1; 2069 } 2070 if (internal_cfg->legacy_mem && 2071 internal_cfg->in_memory) { 2072 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " 2073 "with --"OPT_IN_MEMORY"\n"); 2074 return -1; 2075 } 2076 if (internal_cfg->legacy_mem && internal_cfg->match_allocations) { 2077 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " 2078 "with --"OPT_MATCH_ALLOCATIONS"\n"); 2079 return -1; 2080 } 2081 if (internal_cfg->no_hugetlbfs && internal_cfg->match_allocations) { 2082 RTE_LOG(ERR, EAL, "Option --"OPT_NO_HUGE" is not compatible " 2083 "with --"OPT_MATCH_ALLOCATIONS"\n"); 2084 return -1; 2085 } 2086 if (internal_cfg->legacy_mem && internal_cfg->memory == 0) { 2087 RTE_LOG(NOTICE, EAL, "Static memory layout is selected, " 2088 "amount of reserved memory can be adjusted with " 2089 "-m or --"OPT_SOCKET_MEM"\n"); 2090 } 2091 2092 return 0; 2093 } 2094 2095 uint16_t 2096 rte_vect_get_max_simd_bitwidth(void) 2097 { 2098 const struct internal_config *internal_conf = 2099 eal_get_internal_configuration(); 2100 return internal_conf->max_simd_bitwidth.bitwidth; 2101 } 2102 2103 int 2104 rte_vect_set_max_simd_bitwidth(uint16_t bitwidth) 2105 { 2106 struct internal_config *internal_conf = 2107 eal_get_internal_configuration(); 2108 if (internal_conf->max_simd_bitwidth.forced) { 2109 RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled"); 2110 return -EPERM; 2111 } 2112 2113 if (bitwidth < RTE_VECT_SIMD_DISABLED || !rte_is_power_of_2(bitwidth)) { 2114 RTE_LOG(ERR, EAL, "Invalid bitwidth value!\n"); 2115 return -EINVAL; 2116 } 2117 internal_conf->max_simd_bitwidth.bitwidth = bitwidth; 2118 return 0; 2119 } 2120 2121 void 2122 eal_common_usage(void) 2123 { 2124 printf("[options]\n\n" 2125 "EAL common options:\n" 2126 " -c COREMASK Hexadecimal bitmask of cores to run on\n" 2127 " -l CORELIST List of cores to run on\n" 2128 " The argument format is <c1>[-c2][,c3[-c4],...]\n" 2129 " where c1, c2, etc are core indexes between 0 and %d\n" 2130 " --"OPT_LCORES" COREMAP Map lcore set to physical cpu set\n" 2131 " The argument format is\n" 2132 " '<lcores[@cpus]>[<,lcores[@cpus]>...]'\n" 2133 " lcores and cpus list are grouped by '(' and ')'\n" 2134 " Within the group, '-' is used for range separator,\n" 2135 " ',' is used for single number separator.\n" 2136 " '( )' can be omitted for single element group,\n" 2137 " '@' can be omitted if cpus and lcores have the same value\n" 2138 " -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n" 2139 " --"OPT_MAIN_LCORE" ID Core ID that is used as main\n" 2140 " --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n" 2141 " -n CHANNELS Number of memory channels\n" 2142 " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n" 2143 " -r RANKS Force number of memory ranks (don't detect)\n" 2144 " -b, --block Add a device to the blocked list.\n" 2145 " Prevent EAL from using this device. The argument\n" 2146 " format for PCI devices is <domain:bus:devid.func>.\n" 2147 " -a, --allow Add a device to the allow list.\n" 2148 " Only use the specified devices. The argument format\n" 2149 " for PCI devices is <[domain:]bus:devid.func>.\n" 2150 " This option can be present several times.\n" 2151 " [NOTE: " OPT_DEV_ALLOW " cannot be used with "OPT_DEV_BLOCK" option]\n" 2152 " --"OPT_VDEV" Add a virtual device.\n" 2153 " The argument format is <driver><id>[,key=val,...]\n" 2154 " (ex: --vdev=net_pcap0,iface=eth2).\n" 2155 " --"OPT_IOVA_MODE" Set IOVA mode. 'pa' for IOVA_PA\n" 2156 " 'va' for IOVA_VA\n" 2157 " -d LIB.so|DIR Add a driver or driver directory\n" 2158 " (can be used multiple times)\n" 2159 " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n" 2160 " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n" 2161 #ifndef RTE_EXEC_ENV_WINDOWS 2162 " --"OPT_SYSLOG" Set syslog facility\n" 2163 #endif 2164 " --"OPT_LOG_LEVEL"=<level> Set global log level\n" 2165 " --"OPT_LOG_LEVEL"=<type-match>:<level>\n" 2166 " Set specific log level\n" 2167 " --"OPT_LOG_LEVEL"=help Show log types and levels\n" 2168 #ifndef RTE_EXEC_ENV_WINDOWS 2169 " --"OPT_TRACE"=<regex-match>\n" 2170 " Enable trace based on regular expression trace name.\n" 2171 " By default, the trace is disabled.\n" 2172 " User must specify this option to enable trace.\n" 2173 " --"OPT_TRACE_DIR"=<directory path>\n" 2174 " Specify trace directory for trace output.\n" 2175 " By default, trace output will created at\n" 2176 " $HOME directory and parameter must be\n" 2177 " specified once only.\n" 2178 " --"OPT_TRACE_BUF_SIZE"=<int>\n" 2179 " Specify maximum size of allocated memory\n" 2180 " for trace output for each thread. Valid\n" 2181 " unit can be either 'B|K|M' for 'Bytes',\n" 2182 " 'KBytes' and 'MBytes' respectively.\n" 2183 " Default is 1MB and parameter must be\n" 2184 " specified once only.\n" 2185 " --"OPT_TRACE_MODE"=<o[verwrite] | d[iscard]>\n" 2186 " Specify the mode of update of trace\n" 2187 " output file. Either update on a file can\n" 2188 " be wrapped or discarded when file size\n" 2189 " reaches its maximum limit.\n" 2190 " Default mode is 'overwrite' and parameter\n" 2191 " must be specified once only.\n" 2192 #endif /* !RTE_EXEC_ENV_WINDOWS */ 2193 " -v Display version information on startup\n" 2194 " -h, --help This help\n" 2195 " --"OPT_IN_MEMORY" Operate entirely in memory. This will\n" 2196 " disable secondary process support\n" 2197 " --"OPT_BASE_VIRTADDR" Base virtual address\n" 2198 " --"OPT_TELEMETRY" Enable telemetry support (on by default)\n" 2199 " --"OPT_NO_TELEMETRY" Disable telemetry support\n" 2200 " --"OPT_FORCE_MAX_SIMD_BITWIDTH" Force the max SIMD bitwidth\n" 2201 "\nEAL options for DEBUG use only:\n" 2202 " --"OPT_HUGE_UNLINK" Unlink hugepage files after init\n" 2203 " --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n" 2204 " --"OPT_NO_PCI" Disable PCI\n" 2205 " --"OPT_NO_HPET" Disable HPET\n" 2206 " --"OPT_NO_SHCONF" No shared config (mmap'd files)\n" 2207 "\n", RTE_MAX_LCORE); 2208 } 2209