1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright(c) 2014 6WIND S.A. 4 */ 5 6 #include <stdlib.h> 7 #include <string.h> 8 #ifndef RTE_EXEC_ENV_WINDOWS 9 #include <syslog.h> 10 #endif 11 #include <ctype.h> 12 #include <limits.h> 13 #include <errno.h> 14 #include <getopt.h> 15 #ifndef RTE_EXEC_ENV_WINDOWS 16 #include <dlfcn.h> 17 #include <libgen.h> 18 #endif 19 #include <sys/stat.h> 20 #ifndef RTE_EXEC_ENV_WINDOWS 21 #include <dirent.h> 22 #endif 23 24 #include <rte_string_fns.h> 25 #include <rte_eal.h> 26 #include <rte_log.h> 27 #include <rte_lcore.h> 28 #include <rte_memory.h> 29 #include <rte_tailq.h> 30 #include <rte_version.h> 31 #include <rte_devargs.h> 32 #include <rte_memcpy.h> 33 #ifndef RTE_EXEC_ENV_WINDOWS 34 #include <rte_telemetry.h> 35 #endif 36 #include <rte_vect.h> 37 38 #include "eal_internal_cfg.h" 39 #include "eal_options.h" 40 #include "eal_filesystem.h" 41 #include "eal_private.h" 42 #include "eal_log.h" 43 #ifndef RTE_EXEC_ENV_WINDOWS 44 #include "eal_trace.h" 45 #endif 46 47 #define BITS_PER_HEX 4 48 #define LCORE_OPT_LST 1 49 #define LCORE_OPT_MSK 2 50 #define LCORE_OPT_MAP 3 51 52 const char 53 eal_short_options[] = 54 "a:" /* allow */ 55 "b:" /* block */ 56 "c:" /* coremask */ 57 "s:" /* service coremask */ 58 "d:" /* driver */ 59 "h" /* help */ 60 "l:" /* corelist */ 61 "S:" /* service corelist */ 62 "m:" /* memory size */ 63 "n:" /* memory channels */ 64 "r:" /* memory ranks */ 65 "v" /* version */ 66 ; 67 68 const struct option 69 eal_long_options[] = { 70 {OPT_BASE_VIRTADDR, 1, NULL, OPT_BASE_VIRTADDR_NUM }, 71 {OPT_CREATE_UIO_DEV, 0, NULL, OPT_CREATE_UIO_DEV_NUM }, 72 {OPT_FILE_PREFIX, 1, NULL, OPT_FILE_PREFIX_NUM }, 73 {OPT_HELP, 0, NULL, OPT_HELP_NUM }, 74 {OPT_HUGE_DIR, 1, NULL, OPT_HUGE_DIR_NUM }, 75 {OPT_HUGE_UNLINK, 2, NULL, OPT_HUGE_UNLINK_NUM }, 76 {OPT_IOVA_MODE, 1, NULL, OPT_IOVA_MODE_NUM }, 77 {OPT_LCORES, 1, NULL, OPT_LCORES_NUM }, 78 {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM }, 79 {OPT_TRACE, 1, NULL, OPT_TRACE_NUM }, 80 {OPT_TRACE_DIR, 1, NULL, OPT_TRACE_DIR_NUM }, 81 {OPT_TRACE_BUF_SIZE, 1, NULL, OPT_TRACE_BUF_SIZE_NUM }, 82 {OPT_TRACE_MODE, 1, NULL, OPT_TRACE_MODE_NUM }, 83 {OPT_MAIN_LCORE, 1, NULL, OPT_MAIN_LCORE_NUM }, 84 {OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM}, 85 {OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM }, 86 {OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM }, 87 {OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM }, 88 {OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM }, 89 {OPT_IN_MEMORY, 0, NULL, OPT_IN_MEMORY_NUM }, 90 {OPT_DEV_BLOCK, 1, NULL, OPT_DEV_BLOCK_NUM }, 91 {OPT_DEV_ALLOW, 1, NULL, OPT_DEV_ALLOW_NUM }, 92 {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM }, 93 {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM }, 94 {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM }, 95 {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM }, 96 {OPT_VDEV, 1, NULL, OPT_VDEV_NUM }, 97 {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM }, 98 {OPT_VFIO_VF_TOKEN, 1, NULL, OPT_VFIO_VF_TOKEN_NUM }, 99 {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM }, 100 {OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM }, 101 {OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM}, 102 {OPT_MATCH_ALLOCATIONS, 0, NULL, OPT_MATCH_ALLOCATIONS_NUM}, 103 {OPT_TELEMETRY, 0, NULL, OPT_TELEMETRY_NUM }, 104 {OPT_NO_TELEMETRY, 0, NULL, OPT_NO_TELEMETRY_NUM }, 105 {OPT_FORCE_MAX_SIMD_BITWIDTH, 1, NULL, OPT_FORCE_MAX_SIMD_BITWIDTH_NUM}, 106 {OPT_HUGE_WORKER_STACK, 2, NULL, OPT_HUGE_WORKER_STACK_NUM }, 107 108 {0, 0, NULL, 0 } 109 }; 110 111 TAILQ_HEAD(shared_driver_list, shared_driver); 112 113 /* Definition for shared object drivers. */ 114 struct shared_driver { 115 TAILQ_ENTRY(shared_driver) next; 116 117 char name[PATH_MAX]; 118 void* lib_handle; 119 }; 120 121 /* List of external loadable drivers */ 122 static struct shared_driver_list solib_list = 123 TAILQ_HEAD_INITIALIZER(solib_list); 124 125 #ifndef RTE_EXEC_ENV_WINDOWS 126 /* Default path of external loadable drivers */ 127 static const char *default_solib_dir = RTE_EAL_PMD_PATH; 128 #endif 129 130 /* 131 * Stringified version of solib path used by dpdk-pmdinfo.py 132 * Note: PLEASE DO NOT ALTER THIS without making a corresponding 133 * change to usertools/dpdk-pmdinfo.py 134 */ 135 static const char dpdk_solib_path[] __rte_used = 136 "DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH; 137 138 TAILQ_HEAD(device_option_list, device_option); 139 140 struct device_option { 141 TAILQ_ENTRY(device_option) next; 142 143 enum rte_devtype type; 144 char arg[]; 145 }; 146 147 static struct device_option_list devopt_list = 148 TAILQ_HEAD_INITIALIZER(devopt_list); 149 150 static int main_lcore_parsed; 151 static int mem_parsed; 152 static int core_parsed; 153 154 /* Allow the application to print its usage message too if set */ 155 static rte_usage_hook_t rte_application_usage_hook; 156 157 /* Returns rte_usage_hook_t */ 158 rte_usage_hook_t 159 eal_get_application_usage_hook(void) 160 { 161 return rte_application_usage_hook; 162 } 163 164 /* Set a per-application usage message */ 165 rte_usage_hook_t 166 rte_set_application_usage_hook(rte_usage_hook_t usage_func) 167 { 168 rte_usage_hook_t old_func; 169 170 /* Will be NULL on the first call to denote the last usage routine. */ 171 old_func = rte_application_usage_hook; 172 rte_application_usage_hook = usage_func; 173 174 return old_func; 175 } 176 177 #ifndef RTE_EXEC_ENV_WINDOWS 178 static char **eal_args; 179 static char **eal_app_args; 180 181 #define EAL_PARAM_REQ "/eal/params" 182 #define EAL_APP_PARAM_REQ "/eal/app_params" 183 184 /* callback handler for telemetry library to report out EAL flags */ 185 int 186 handle_eal_info_request(const char *cmd, const char *params __rte_unused, 187 struct rte_tel_data *d) 188 { 189 char **args; 190 int used = 0; 191 int i = 0; 192 193 if (strcmp(cmd, EAL_PARAM_REQ) == 0) 194 args = eal_args; 195 else 196 args = eal_app_args; 197 198 rte_tel_data_start_array(d, RTE_TEL_STRING_VAL); 199 if (args == NULL || args[0] == NULL) 200 return 0; 201 202 for ( ; args[i] != NULL; i++) 203 used = rte_tel_data_add_array_string(d, args[i]); 204 return used; 205 } 206 207 int 208 eal_save_args(int argc, char **argv) 209 { 210 int i, j; 211 212 rte_telemetry_register_cmd(EAL_PARAM_REQ, handle_eal_info_request, 213 "Returns EAL commandline parameters used. Takes no parameters"); 214 rte_telemetry_register_cmd(EAL_APP_PARAM_REQ, handle_eal_info_request, 215 "Returns app commandline parameters used. Takes no parameters"); 216 217 /* clone argv to report out later. We overprovision, but 218 * this does not waste huge amounts of memory 219 */ 220 eal_args = calloc(argc + 1, sizeof(*eal_args)); 221 if (eal_args == NULL) 222 return -1; 223 224 for (i = 0; i < argc; i++) { 225 if (strcmp(argv[i], "--") == 0) 226 break; 227 eal_args[i] = strdup(argv[i]); 228 } 229 eal_args[i++] = NULL; /* always finish with NULL */ 230 231 /* allow reporting of any app args we know about too */ 232 if (i >= argc) 233 return 0; 234 235 eal_app_args = calloc(argc - i + 1, sizeof(*eal_args)); 236 if (eal_app_args == NULL) 237 return -1; 238 239 for (j = 0; i < argc; j++, i++) 240 eal_app_args[j] = strdup(argv[i]); 241 eal_app_args[j] = NULL; 242 243 return 0; 244 } 245 #endif 246 247 static int 248 eal_option_device_add(enum rte_devtype type, const char *optarg) 249 { 250 struct device_option *devopt; 251 size_t optlen; 252 int ret; 253 254 optlen = strlen(optarg) + 1; 255 devopt = calloc(1, sizeof(*devopt) + optlen); 256 if (devopt == NULL) { 257 RTE_LOG(ERR, EAL, "Unable to allocate device option\n"); 258 return -ENOMEM; 259 } 260 261 devopt->type = type; 262 ret = strlcpy(devopt->arg, optarg, optlen); 263 if (ret < 0) { 264 RTE_LOG(ERR, EAL, "Unable to copy device option\n"); 265 free(devopt); 266 return -EINVAL; 267 } 268 TAILQ_INSERT_TAIL(&devopt_list, devopt, next); 269 return 0; 270 } 271 272 int 273 eal_option_device_parse(void) 274 { 275 struct device_option *devopt; 276 void *tmp; 277 int ret = 0; 278 279 RTE_TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) { 280 if (ret == 0) { 281 ret = rte_devargs_add(devopt->type, devopt->arg); 282 if (ret) 283 RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n", 284 devopt->arg); 285 } 286 TAILQ_REMOVE(&devopt_list, devopt, next); 287 free(devopt); 288 } 289 return ret; 290 } 291 292 const char * 293 eal_get_hugefile_prefix(void) 294 { 295 const struct internal_config *internal_conf = 296 eal_get_internal_configuration(); 297 298 if (internal_conf->hugefile_prefix != NULL) 299 return internal_conf->hugefile_prefix; 300 return HUGEFILE_PREFIX_DEFAULT; 301 } 302 303 void 304 eal_reset_internal_config(struct internal_config *internal_cfg) 305 { 306 int i; 307 308 internal_cfg->memory = 0; 309 internal_cfg->force_nrank = 0; 310 internal_cfg->force_nchannel = 0; 311 internal_cfg->hugefile_prefix = NULL; 312 internal_cfg->hugepage_dir = NULL; 313 internal_cfg->hugepage_file.unlink_before_mapping = false; 314 internal_cfg->hugepage_file.unlink_existing = true; 315 internal_cfg->force_sockets = 0; 316 /* zero out the NUMA config */ 317 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 318 internal_cfg->socket_mem[i] = 0; 319 internal_cfg->force_socket_limits = 0; 320 /* zero out the NUMA limits config */ 321 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 322 internal_cfg->socket_limit[i] = 0; 323 /* zero out hugedir descriptors */ 324 for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) { 325 memset(&internal_cfg->hugepage_info[i], 0, 326 sizeof(internal_cfg->hugepage_info[0])); 327 internal_cfg->hugepage_info[i].lock_descriptor = -1; 328 } 329 internal_cfg->base_virtaddr = 0; 330 331 #ifdef LOG_DAEMON 332 internal_cfg->syslog_facility = LOG_DAEMON; 333 #endif 334 335 /* if set to NONE, interrupt mode is determined automatically */ 336 internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE; 337 memset(internal_cfg->vfio_vf_token, 0, 338 sizeof(internal_cfg->vfio_vf_token)); 339 340 #ifdef RTE_LIBEAL_USE_HPET 341 internal_cfg->no_hpet = 0; 342 #else 343 internal_cfg->no_hpet = 1; 344 #endif 345 internal_cfg->vmware_tsc_map = 0; 346 internal_cfg->create_uio_dev = 0; 347 internal_cfg->iova_mode = RTE_IOVA_DC; 348 internal_cfg->user_mbuf_pool_ops_name = NULL; 349 CPU_ZERO(&internal_cfg->ctrl_cpuset); 350 internal_cfg->init_complete = 0; 351 internal_cfg->max_simd_bitwidth.bitwidth = RTE_VECT_DEFAULT_SIMD_BITWIDTH; 352 internal_cfg->max_simd_bitwidth.forced = 0; 353 } 354 355 static int 356 eal_plugin_add(const char *path) 357 { 358 struct shared_driver *solib; 359 360 solib = malloc(sizeof(*solib)); 361 if (solib == NULL) { 362 RTE_LOG(ERR, EAL, "malloc(solib) failed\n"); 363 return -1; 364 } 365 memset(solib, 0, sizeof(*solib)); 366 strlcpy(solib->name, path, PATH_MAX); 367 TAILQ_INSERT_TAIL(&solib_list, solib, next); 368 369 return 0; 370 } 371 372 #ifdef RTE_EXEC_ENV_WINDOWS 373 int 374 eal_plugins_init(void) 375 { 376 return 0; 377 } 378 #else 379 380 static int 381 eal_plugindir_init(const char *path) 382 { 383 DIR *d = NULL; 384 struct dirent *dent = NULL; 385 char sopath[PATH_MAX]; 386 387 if (path == NULL || *path == '\0') 388 return 0; 389 390 d = opendir(path); 391 if (d == NULL) { 392 RTE_LOG(ERR, EAL, "failed to open directory %s: %s\n", 393 path, strerror(errno)); 394 return -1; 395 } 396 397 while ((dent = readdir(d)) != NULL) { 398 struct stat sb; 399 int nlen = strnlen(dent->d_name, sizeof(dent->d_name)); 400 401 /* check if name ends in .so or .so.ABI_VERSION */ 402 if (strcmp(&dent->d_name[nlen - 3], ".so") != 0 && 403 strcmp(&dent->d_name[nlen - 4 - strlen(ABI_VERSION)], 404 ".so."ABI_VERSION) != 0) 405 continue; 406 407 snprintf(sopath, sizeof(sopath), "%s/%s", path, dent->d_name); 408 409 /* if a regular file, add to list to load */ 410 if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode))) 411 continue; 412 413 if (eal_plugin_add(sopath) == -1) 414 break; 415 } 416 417 closedir(d); 418 /* XXX this ignores failures from readdir() itself */ 419 return (dent == NULL) ? 0 : -1; 420 } 421 422 static int 423 verify_perms(const char *dirpath) 424 { 425 struct stat st; 426 427 /* if not root, check down one level first */ 428 if (strcmp(dirpath, "/") != 0) { 429 static __thread char last_dir_checked[PATH_MAX]; 430 char copy[PATH_MAX]; 431 const char *dir; 432 433 strlcpy(copy, dirpath, PATH_MAX); 434 dir = dirname(copy); 435 if (strncmp(dir, last_dir_checked, PATH_MAX) != 0) { 436 if (verify_perms(dir) != 0) 437 return -1; 438 strlcpy(last_dir_checked, dir, PATH_MAX); 439 } 440 } 441 442 /* call stat to check for permissions and ensure not world writable */ 443 if (stat(dirpath, &st) != 0) { 444 RTE_LOG(ERR, EAL, "Error with stat on %s, %s\n", 445 dirpath, strerror(errno)); 446 return -1; 447 } 448 if (st.st_mode & S_IWOTH) { 449 RTE_LOG(ERR, EAL, 450 "Error, directory path %s is world-writable and insecure\n", 451 dirpath); 452 return -1; 453 } 454 455 return 0; 456 } 457 458 static void * 459 eal_dlopen(const char *pathname) 460 { 461 void *retval = NULL; 462 char *realp = realpath(pathname, NULL); 463 464 if (realp == NULL && errno == ENOENT) { 465 /* not a full or relative path, try a load from system dirs */ 466 retval = dlopen(pathname, RTLD_NOW); 467 if (retval == NULL) 468 RTE_LOG(ERR, EAL, "%s\n", dlerror()); 469 return retval; 470 } 471 if (realp == NULL) { 472 RTE_LOG(ERR, EAL, "Error with realpath for %s, %s\n", 473 pathname, strerror(errno)); 474 goto out; 475 } 476 if (strnlen(realp, PATH_MAX) == PATH_MAX) { 477 RTE_LOG(ERR, EAL, "Error, driver path greater than PATH_MAX\n"); 478 goto out; 479 } 480 481 /* do permissions checks */ 482 if (verify_perms(realp) != 0) 483 goto out; 484 485 retval = dlopen(realp, RTLD_NOW); 486 if (retval == NULL) 487 RTE_LOG(ERR, EAL, "%s\n", dlerror()); 488 out: 489 free(realp); 490 return retval; 491 } 492 493 static int 494 is_shared_build(void) 495 { 496 #define EAL_SO "librte_eal.so" 497 char soname[32]; 498 size_t len, minlen = strlen(EAL_SO); 499 500 len = strlcpy(soname, EAL_SO"."ABI_VERSION, sizeof(soname)); 501 if (len > sizeof(soname)) { 502 RTE_LOG(ERR, EAL, "Shared lib name too long in shared build check\n"); 503 len = sizeof(soname) - 1; 504 } 505 506 while (len >= minlen) { 507 void *handle; 508 509 /* check if we have this .so loaded, if so - shared build */ 510 RTE_LOG(DEBUG, EAL, "Checking presence of .so '%s'\n", soname); 511 handle = dlopen(soname, RTLD_LAZY | RTLD_NOLOAD); 512 if (handle != NULL) { 513 RTE_LOG(INFO, EAL, "Detected shared linkage of DPDK\n"); 514 dlclose(handle); 515 return 1; 516 } 517 518 /* remove any version numbers off the end to retry */ 519 while (len-- > 0) 520 if (soname[len] == '.') { 521 soname[len] = '\0'; 522 break; 523 } 524 } 525 526 RTE_LOG(INFO, EAL, "Detected static linkage of DPDK\n"); 527 return 0; 528 } 529 530 int 531 eal_plugins_init(void) 532 { 533 struct shared_driver *solib = NULL; 534 struct stat sb; 535 536 /* If we are not statically linked, add default driver loading 537 * path if it exists as a directory. 538 * (Using dlopen with NOLOAD flag on EAL, will return NULL if the EAL 539 * shared library is not already loaded i.e. it's statically linked.) 540 */ 541 if (is_shared_build() && 542 *default_solib_dir != '\0' && 543 stat(default_solib_dir, &sb) == 0 && 544 S_ISDIR(sb.st_mode)) 545 eal_plugin_add(default_solib_dir); 546 547 TAILQ_FOREACH(solib, &solib_list, next) { 548 549 if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) { 550 if (eal_plugindir_init(solib->name) == -1) { 551 RTE_LOG(ERR, EAL, 552 "Cannot init plugin directory %s\n", 553 solib->name); 554 return -1; 555 } 556 } else { 557 RTE_LOG(DEBUG, EAL, "open shared lib %s\n", 558 solib->name); 559 solib->lib_handle = eal_dlopen(solib->name); 560 if (solib->lib_handle == NULL) 561 return -1; 562 } 563 564 } 565 return 0; 566 } 567 #endif 568 569 /* 570 * Parse the coremask given as argument (hexadecimal string) and fill 571 * the global configuration (core role and core count) with the parsed 572 * value. 573 */ 574 static int xdigit2val(unsigned char c) 575 { 576 int val; 577 578 if (isdigit(c)) 579 val = c - '0'; 580 else if (isupper(c)) 581 val = c - 'A' + 10; 582 else 583 val = c - 'a' + 10; 584 return val; 585 } 586 587 static int 588 eal_parse_service_coremask(const char *coremask) 589 { 590 struct rte_config *cfg = rte_eal_get_configuration(); 591 int i, j, idx = 0; 592 unsigned int count = 0; 593 char c; 594 int val; 595 uint32_t taken_lcore_count = 0; 596 597 if (coremask == NULL) 598 return -1; 599 /* Remove all blank characters ahead and after . 600 * Remove 0x/0X if exists. 601 */ 602 while (isblank(*coremask)) 603 coremask++; 604 if (coremask[0] == '0' && ((coremask[1] == 'x') 605 || (coremask[1] == 'X'))) 606 coremask += 2; 607 i = strlen(coremask); 608 while ((i > 0) && isblank(coremask[i - 1])) 609 i--; 610 611 if (i == 0) 612 return -1; 613 614 for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) { 615 c = coremask[i]; 616 if (isxdigit(c) == 0) { 617 /* invalid characters */ 618 return -1; 619 } 620 val = xdigit2val(c); 621 for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; 622 j++, idx++) { 623 if ((1 << j) & val) { 624 /* handle main lcore already parsed */ 625 uint32_t lcore = idx; 626 if (main_lcore_parsed && 627 cfg->main_lcore == lcore) { 628 RTE_LOG(ERR, EAL, 629 "lcore %u is main lcore, cannot use as service core\n", 630 idx); 631 return -1; 632 } 633 634 if (eal_cpu_detected(idx) == 0) { 635 RTE_LOG(ERR, EAL, 636 "lcore %u unavailable\n", idx); 637 return -1; 638 } 639 640 if (cfg->lcore_role[idx] == ROLE_RTE) 641 taken_lcore_count++; 642 643 lcore_config[idx].core_role = ROLE_SERVICE; 644 count++; 645 } 646 } 647 } 648 649 for (; i >= 0; i--) 650 if (coremask[i] != '0') 651 return -1; 652 653 for (; idx < RTE_MAX_LCORE; idx++) 654 lcore_config[idx].core_index = -1; 655 656 if (count == 0) 657 return -1; 658 659 if (core_parsed && taken_lcore_count != count) { 660 RTE_LOG(WARNING, EAL, 661 "Not all service cores are in the coremask. " 662 "Please ensure -c or -l includes service cores\n"); 663 } 664 665 cfg->service_lcore_count = count; 666 return 0; 667 } 668 669 static int 670 eal_service_cores_parsed(void) 671 { 672 int idx; 673 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 674 if (lcore_config[idx].core_role == ROLE_SERVICE) 675 return 1; 676 } 677 return 0; 678 } 679 680 static int 681 update_lcore_config(int *cores) 682 { 683 struct rte_config *cfg = rte_eal_get_configuration(); 684 unsigned int count = 0; 685 unsigned int i; 686 int ret = 0; 687 688 for (i = 0; i < RTE_MAX_LCORE; i++) { 689 if (cores[i] != -1) { 690 if (eal_cpu_detected(i) == 0) { 691 RTE_LOG(ERR, EAL, "lcore %u unavailable\n", i); 692 ret = -1; 693 continue; 694 } 695 cfg->lcore_role[i] = ROLE_RTE; 696 count++; 697 } else { 698 cfg->lcore_role[i] = ROLE_OFF; 699 } 700 lcore_config[i].core_index = cores[i]; 701 } 702 if (!ret) 703 cfg->lcore_count = count; 704 return ret; 705 } 706 707 static int 708 check_core_list(int *lcores, unsigned int count) 709 { 710 char lcorestr[RTE_MAX_LCORE * 10]; 711 bool overflow = false; 712 int len = 0, ret; 713 unsigned int i; 714 715 for (i = 0; i < count; i++) { 716 if (lcores[i] < RTE_MAX_LCORE) 717 continue; 718 719 RTE_LOG(ERR, EAL, "lcore %d >= RTE_MAX_LCORE (%d)\n", 720 lcores[i], RTE_MAX_LCORE); 721 overflow = true; 722 } 723 if (!overflow) 724 return 0; 725 726 /* 727 * We've encountered a core that's greater than RTE_MAX_LCORE, 728 * suggest using --lcores option to map lcores onto physical cores 729 * greater than RTE_MAX_LCORE. 730 */ 731 for (i = 0; i < count; i++) { 732 ret = snprintf(&lcorestr[len], sizeof(lcorestr) - len, 733 "%d@%d,", i, lcores[i]); 734 if (ret > 0) 735 len = len + ret; 736 } 737 if (len > 0) 738 lcorestr[len - 1] = 0; 739 RTE_LOG(ERR, EAL, "To use high physical core ids, " 740 "please use --lcores to map them to lcore ids below RTE_MAX_LCORE, " 741 "e.g. --lcores %s\n", lcorestr); 742 return -1; 743 } 744 745 int 746 rte_eal_parse_coremask(const char *coremask, int *cores) 747 { 748 const char *coremask_orig = coremask; 749 int lcores[RTE_MAX_LCORE]; 750 unsigned int count = 0; 751 int i, j, idx; 752 int val; 753 char c; 754 755 for (idx = 0; idx < RTE_MAX_LCORE; idx++) 756 cores[idx] = -1; 757 idx = 0; 758 759 /* Remove all blank characters ahead and after . 760 * Remove 0x/0X if exists. 761 */ 762 while (isblank(*coremask)) 763 coremask++; 764 if (coremask[0] == '0' && ((coremask[1] == 'x') 765 || (coremask[1] == 'X'))) 766 coremask += 2; 767 i = strlen(coremask); 768 while ((i > 0) && isblank(coremask[i - 1])) 769 i--; 770 if (i == 0) { 771 RTE_LOG(ERR, EAL, "No lcores in coremask: [%s]\n", 772 coremask_orig); 773 return -1; 774 } 775 776 for (i = i - 1; i >= 0; i--) { 777 c = coremask[i]; 778 if (isxdigit(c) == 0) { 779 /* invalid characters */ 780 RTE_LOG(ERR, EAL, "invalid characters in coremask: [%s]\n", 781 coremask_orig); 782 return -1; 783 } 784 val = xdigit2val(c); 785 for (j = 0; j < BITS_PER_HEX; j++, idx++) 786 { 787 if ((1 << j) & val) { 788 if (count >= RTE_MAX_LCORE) { 789 RTE_LOG(ERR, EAL, "Too many lcores provided. Cannot exceed RTE_MAX_LCORE (%d)\n", 790 RTE_MAX_LCORE); 791 return -1; 792 } 793 lcores[count++] = idx; 794 } 795 } 796 } 797 if (count == 0) { 798 RTE_LOG(ERR, EAL, "No lcores in coremask: [%s]\n", 799 coremask_orig); 800 return -1; 801 } 802 803 if (check_core_list(lcores, count)) 804 return -1; 805 806 /* 807 * Now that we've got a list of cores no longer than RTE_MAX_LCORE, 808 * and no lcore in that list is greater than RTE_MAX_LCORE, populate 809 * the cores array. 810 */ 811 do { 812 count--; 813 cores[lcores[count]] = count; 814 } while (count != 0); 815 816 return 0; 817 } 818 819 static int 820 eal_parse_service_corelist(const char *corelist) 821 { 822 struct rte_config *cfg = rte_eal_get_configuration(); 823 int i; 824 unsigned count = 0; 825 char *end = NULL; 826 uint32_t min, max, idx; 827 uint32_t taken_lcore_count = 0; 828 829 if (corelist == NULL) 830 return -1; 831 832 /* Remove all blank characters ahead and after */ 833 while (isblank(*corelist)) 834 corelist++; 835 i = strlen(corelist); 836 while ((i > 0) && isblank(corelist[i - 1])) 837 i--; 838 839 /* Get list of cores */ 840 min = RTE_MAX_LCORE; 841 do { 842 while (isblank(*corelist)) 843 corelist++; 844 if (*corelist == '\0') 845 return -1; 846 errno = 0; 847 idx = strtoul(corelist, &end, 10); 848 if (errno || end == NULL) 849 return -1; 850 if (idx >= RTE_MAX_LCORE) 851 return -1; 852 while (isblank(*end)) 853 end++; 854 if (*end == '-') { 855 min = idx; 856 } else if ((*end == ',') || (*end == '\0')) { 857 max = idx; 858 if (min == RTE_MAX_LCORE) 859 min = idx; 860 for (idx = min; idx <= max; idx++) { 861 if (cfg->lcore_role[idx] != ROLE_SERVICE) { 862 /* handle main lcore already parsed */ 863 uint32_t lcore = idx; 864 if (cfg->main_lcore == lcore && 865 main_lcore_parsed) { 866 RTE_LOG(ERR, EAL, 867 "Error: lcore %u is main lcore, cannot use as service core\n", 868 idx); 869 return -1; 870 } 871 if (cfg->lcore_role[idx] == ROLE_RTE) 872 taken_lcore_count++; 873 874 lcore_config[idx].core_role = 875 ROLE_SERVICE; 876 count++; 877 } 878 } 879 min = RTE_MAX_LCORE; 880 } else 881 return -1; 882 corelist = end + 1; 883 } while (*end != '\0'); 884 885 if (count == 0) 886 return -1; 887 888 if (core_parsed && taken_lcore_count != count) { 889 RTE_LOG(WARNING, EAL, 890 "Not all service cores were in the coremask. " 891 "Please ensure -c or -l includes service cores\n"); 892 } 893 894 return 0; 895 } 896 897 static int 898 eal_parse_corelist(const char *corelist, int *cores) 899 { 900 unsigned int count = 0, i; 901 int lcores[RTE_MAX_LCORE]; 902 char *end = NULL; 903 int min, max; 904 int idx; 905 906 for (idx = 0; idx < RTE_MAX_LCORE; idx++) 907 cores[idx] = -1; 908 909 /* Remove all blank characters ahead */ 910 while (isblank(*corelist)) 911 corelist++; 912 913 /* Get list of cores */ 914 min = -1; 915 do { 916 while (isblank(*corelist)) 917 corelist++; 918 if (*corelist == '\0') 919 return -1; 920 errno = 0; 921 idx = strtol(corelist, &end, 10); 922 if (errno || end == NULL) 923 return -1; 924 if (idx < 0) 925 return -1; 926 while (isblank(*end)) 927 end++; 928 if (*end == '-') { 929 min = idx; 930 } else if ((*end == ',') || (*end == '\0')) { 931 max = idx; 932 if (min == -1) 933 min = idx; 934 for (idx = min; idx <= max; idx++) { 935 bool dup = false; 936 937 /* Check if this idx is already present */ 938 for (i = 0; i < count; i++) { 939 if (lcores[i] == idx) 940 dup = true; 941 } 942 if (dup) 943 continue; 944 if (count >= RTE_MAX_LCORE) { 945 RTE_LOG(ERR, EAL, "Too many lcores provided. Cannot exceed RTE_MAX_LCORE (%d)\n", 946 RTE_MAX_LCORE); 947 return -1; 948 } 949 lcores[count++] = idx; 950 } 951 min = -1; 952 } else 953 return -1; 954 corelist = end + 1; 955 } while (*end != '\0'); 956 957 if (count == 0) 958 return -1; 959 960 if (check_core_list(lcores, count)) 961 return -1; 962 963 /* 964 * Now that we've got a list of cores no longer than RTE_MAX_LCORE, 965 * and no lcore in that list is greater than RTE_MAX_LCORE, populate 966 * the cores array. 967 */ 968 do { 969 count--; 970 cores[lcores[count]] = count; 971 } while (count != 0); 972 973 return 0; 974 } 975 976 /* Changes the lcore id of the main thread */ 977 static int 978 eal_parse_main_lcore(const char *arg) 979 { 980 char *parsing_end; 981 struct rte_config *cfg = rte_eal_get_configuration(); 982 983 errno = 0; 984 cfg->main_lcore = (uint32_t) strtol(arg, &parsing_end, 0); 985 if (errno || parsing_end[0] != 0) 986 return -1; 987 if (cfg->main_lcore >= RTE_MAX_LCORE) 988 return -1; 989 main_lcore_parsed = 1; 990 991 /* ensure main core is not used as service core */ 992 if (lcore_config[cfg->main_lcore].core_role == ROLE_SERVICE) { 993 RTE_LOG(ERR, EAL, 994 "Error: Main lcore is used as a service core\n"); 995 return -1; 996 } 997 998 return 0; 999 } 1000 1001 /* 1002 * Parse elem, the elem could be single number/range or '(' ')' group 1003 * 1) A single number elem, it's just a simple digit. e.g. 9 1004 * 2) A single range elem, two digits with a '-' between. e.g. 2-6 1005 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) 1006 * Within group elem, '-' used for a range separator; 1007 * ',' used for a single number. 1008 */ 1009 static int 1010 eal_parse_set(const char *input, rte_cpuset_t *set) 1011 { 1012 unsigned idx; 1013 const char *str = input; 1014 char *end = NULL; 1015 unsigned min, max; 1016 1017 CPU_ZERO(set); 1018 1019 while (isblank(*str)) 1020 str++; 1021 1022 /* only digit or left bracket is qualify for start point */ 1023 if ((!isdigit(*str) && *str != '(') || *str == '\0') 1024 return -1; 1025 1026 /* process single number or single range of number */ 1027 if (*str != '(') { 1028 errno = 0; 1029 idx = strtoul(str, &end, 10); 1030 if (errno || end == NULL || idx >= CPU_SETSIZE) 1031 return -1; 1032 else { 1033 while (isblank(*end)) 1034 end++; 1035 1036 min = idx; 1037 max = idx; 1038 if (*end == '-') { 1039 /* process single <number>-<number> */ 1040 end++; 1041 while (isblank(*end)) 1042 end++; 1043 if (!isdigit(*end)) 1044 return -1; 1045 1046 errno = 0; 1047 idx = strtoul(end, &end, 10); 1048 if (errno || end == NULL || idx >= CPU_SETSIZE) 1049 return -1; 1050 max = idx; 1051 while (isblank(*end)) 1052 end++; 1053 if (*end != ',' && *end != '\0') 1054 return -1; 1055 } 1056 1057 if (*end != ',' && *end != '\0' && 1058 *end != '@') 1059 return -1; 1060 1061 for (idx = RTE_MIN(min, max); 1062 idx <= RTE_MAX(min, max); idx++) 1063 CPU_SET(idx, set); 1064 1065 return end - input; 1066 } 1067 } 1068 1069 /* process set within bracket */ 1070 str++; 1071 while (isblank(*str)) 1072 str++; 1073 if (*str == '\0') 1074 return -1; 1075 1076 min = RTE_MAX_LCORE; 1077 do { 1078 1079 /* go ahead to the first digit */ 1080 while (isblank(*str)) 1081 str++; 1082 if (!isdigit(*str)) 1083 return -1; 1084 1085 /* get the digit value */ 1086 errno = 0; 1087 idx = strtoul(str, &end, 10); 1088 if (errno || end == NULL || idx >= CPU_SETSIZE) 1089 return -1; 1090 1091 /* go ahead to separator '-',',' and ')' */ 1092 while (isblank(*end)) 1093 end++; 1094 if (*end == '-') { 1095 if (min == RTE_MAX_LCORE) 1096 min = idx; 1097 else /* avoid continuous '-' */ 1098 return -1; 1099 } else if ((*end == ',') || (*end == ')')) { 1100 max = idx; 1101 if (min == RTE_MAX_LCORE) 1102 min = idx; 1103 for (idx = RTE_MIN(min, max); 1104 idx <= RTE_MAX(min, max); idx++) 1105 CPU_SET(idx, set); 1106 1107 min = RTE_MAX_LCORE; 1108 } else 1109 return -1; 1110 1111 str = end + 1; 1112 } while (*end != '\0' && *end != ')'); 1113 1114 /* 1115 * to avoid failure that tail blank makes end character check fail 1116 * in eal_parse_lcores( ) 1117 */ 1118 while (isblank(*str)) 1119 str++; 1120 1121 return str - input; 1122 } 1123 1124 static int 1125 check_cpuset(rte_cpuset_t *set) 1126 { 1127 unsigned int idx; 1128 1129 for (idx = 0; idx < CPU_SETSIZE; idx++) { 1130 if (!CPU_ISSET(idx, set)) 1131 continue; 1132 1133 if (eal_cpu_detected(idx) == 0) { 1134 RTE_LOG(ERR, EAL, "core %u " 1135 "unavailable\n", idx); 1136 return -1; 1137 } 1138 } 1139 return 0; 1140 } 1141 1142 /* 1143 * The format pattern: --lcores='<lcores[@cpus]>[<,lcores[@cpus]>...]' 1144 * lcores, cpus could be a single digit/range or a group. 1145 * '(' and ')' are necessary if it's a group. 1146 * If not supply '@cpus', the value of cpus uses the same as lcores. 1147 * e.g. '1,2@(5-7),(3-5)@(0,2),(0,6),7-8' means start 9 EAL thread as below 1148 * lcore 0 runs on cpuset 0x41 (cpu 0,6) 1149 * lcore 1 runs on cpuset 0x2 (cpu 1) 1150 * lcore 2 runs on cpuset 0xe0 (cpu 5,6,7) 1151 * lcore 3,4,5 runs on cpuset 0x5 (cpu 0,2) 1152 * lcore 6 runs on cpuset 0x41 (cpu 0,6) 1153 * lcore 7 runs on cpuset 0x80 (cpu 7) 1154 * lcore 8 runs on cpuset 0x100 (cpu 8) 1155 */ 1156 static int 1157 eal_parse_lcores(const char *lcores) 1158 { 1159 struct rte_config *cfg = rte_eal_get_configuration(); 1160 rte_cpuset_t lcore_set; 1161 unsigned int set_count; 1162 unsigned idx = 0; 1163 unsigned count = 0; 1164 const char *lcore_start = NULL; 1165 const char *end = NULL; 1166 int offset; 1167 rte_cpuset_t cpuset; 1168 int lflags; 1169 int ret = -1; 1170 1171 if (lcores == NULL) 1172 return -1; 1173 1174 /* Remove all blank characters ahead and after */ 1175 while (isblank(*lcores)) 1176 lcores++; 1177 1178 CPU_ZERO(&cpuset); 1179 1180 /* Reset lcore config */ 1181 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1182 cfg->lcore_role[idx] = ROLE_OFF; 1183 lcore_config[idx].core_index = -1; 1184 CPU_ZERO(&lcore_config[idx].cpuset); 1185 } 1186 1187 /* Get list of cores */ 1188 do { 1189 while (isblank(*lcores)) 1190 lcores++; 1191 if (*lcores == '\0') 1192 goto err; 1193 1194 lflags = 0; 1195 1196 /* record lcore_set start point */ 1197 lcore_start = lcores; 1198 1199 /* go across a complete bracket */ 1200 if (*lcore_start == '(') { 1201 lcores += strcspn(lcores, ")"); 1202 if (*lcores++ == '\0') 1203 goto err; 1204 } 1205 1206 /* scan the separator '@', ','(next) or '\0'(finish) */ 1207 lcores += strcspn(lcores, "@,"); 1208 1209 if (*lcores == '@') { 1210 /* explicit assign cpuset and update the end cursor */ 1211 offset = eal_parse_set(lcores + 1, &cpuset); 1212 if (offset < 0) 1213 goto err; 1214 end = lcores + 1 + offset; 1215 } else { /* ',' or '\0' */ 1216 /* haven't given cpuset, current loop done */ 1217 end = lcores; 1218 1219 /* go back to check <number>-<number> */ 1220 offset = strcspn(lcore_start, "(-"); 1221 if (offset < (end - lcore_start) && 1222 *(lcore_start + offset) != '(') 1223 lflags = 1; 1224 } 1225 1226 if (*end != ',' && *end != '\0') 1227 goto err; 1228 1229 /* parse lcore_set from start point */ 1230 if (eal_parse_set(lcore_start, &lcore_set) < 0) 1231 goto err; 1232 1233 /* without '@', by default using lcore_set as cpuset */ 1234 if (*lcores != '@') 1235 rte_memcpy(&cpuset, &lcore_set, sizeof(cpuset)); 1236 1237 set_count = CPU_COUNT(&lcore_set); 1238 /* start to update lcore_set */ 1239 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1240 if (!CPU_ISSET(idx, &lcore_set)) 1241 continue; 1242 set_count--; 1243 1244 if (cfg->lcore_role[idx] != ROLE_RTE) { 1245 lcore_config[idx].core_index = count; 1246 cfg->lcore_role[idx] = ROLE_RTE; 1247 count++; 1248 } 1249 1250 if (lflags) { 1251 CPU_ZERO(&cpuset); 1252 CPU_SET(idx, &cpuset); 1253 } 1254 1255 if (check_cpuset(&cpuset) < 0) 1256 goto err; 1257 rte_memcpy(&lcore_config[idx].cpuset, &cpuset, 1258 sizeof(rte_cpuset_t)); 1259 } 1260 1261 /* some cores from the lcore_set can't be handled by EAL */ 1262 if (set_count != 0) 1263 goto err; 1264 1265 lcores = end + 1; 1266 } while (*end != '\0'); 1267 1268 if (count == 0) 1269 goto err; 1270 1271 cfg->lcore_count = count; 1272 ret = 0; 1273 1274 err: 1275 1276 return ret; 1277 } 1278 1279 #ifndef RTE_EXEC_ENV_WINDOWS 1280 static int 1281 eal_parse_syslog(const char *facility, struct internal_config *conf) 1282 { 1283 int i; 1284 static const struct { 1285 const char *name; 1286 int value; 1287 } map[] = { 1288 { "auth", LOG_AUTH }, 1289 { "cron", LOG_CRON }, 1290 { "daemon", LOG_DAEMON }, 1291 { "ftp", LOG_FTP }, 1292 { "kern", LOG_KERN }, 1293 { "lpr", LOG_LPR }, 1294 { "mail", LOG_MAIL }, 1295 { "news", LOG_NEWS }, 1296 { "syslog", LOG_SYSLOG }, 1297 { "user", LOG_USER }, 1298 { "uucp", LOG_UUCP }, 1299 { "local0", LOG_LOCAL0 }, 1300 { "local1", LOG_LOCAL1 }, 1301 { "local2", LOG_LOCAL2 }, 1302 { "local3", LOG_LOCAL3 }, 1303 { "local4", LOG_LOCAL4 }, 1304 { "local5", LOG_LOCAL5 }, 1305 { "local6", LOG_LOCAL6 }, 1306 { "local7", LOG_LOCAL7 }, 1307 { NULL, 0 } 1308 }; 1309 1310 for (i = 0; map[i].name; i++) { 1311 if (!strcmp(facility, map[i].name)) { 1312 conf->syslog_facility = map[i].value; 1313 return 0; 1314 } 1315 } 1316 return -1; 1317 } 1318 #endif 1319 1320 static void 1321 eal_log_usage(void) 1322 { 1323 unsigned int level; 1324 1325 printf("Log type is a pattern matching items of this list" 1326 " (plugins may be missing):\n"); 1327 rte_log_list_types(stdout, "\t"); 1328 printf("\n"); 1329 printf("Syntax using globbing pattern: "); 1330 printf("--"OPT_LOG_LEVEL" pattern:level\n"); 1331 printf("Syntax using regular expression: "); 1332 printf("--"OPT_LOG_LEVEL" regexp,level\n"); 1333 printf("Syntax for the global level: "); 1334 printf("--"OPT_LOG_LEVEL" level\n"); 1335 printf("Logs are emitted if allowed by both global and specific levels.\n"); 1336 printf("\n"); 1337 printf("Log level can be a number or the first letters of its name:\n"); 1338 for (level = 1; level <= RTE_LOG_MAX; level++) 1339 printf("\t%d %s\n", level, eal_log_level2str(level)); 1340 } 1341 1342 static int 1343 eal_parse_log_priority(const char *level) 1344 { 1345 size_t len = strlen(level); 1346 unsigned long tmp; 1347 char *end; 1348 unsigned int i; 1349 1350 if (len == 0) 1351 return -1; 1352 1353 /* look for named values, skip 0 which is not a valid level */ 1354 for (i = 1; i <= RTE_LOG_MAX; i++) { 1355 if (strncmp(eal_log_level2str(i), level, len) == 0) 1356 return i; 1357 } 1358 1359 /* not a string, maybe it is numeric */ 1360 errno = 0; 1361 tmp = strtoul(level, &end, 0); 1362 1363 /* check for errors */ 1364 if (errno != 0 || end == NULL || *end != '\0' || 1365 tmp >= UINT32_MAX) 1366 return -1; 1367 1368 return tmp; 1369 } 1370 1371 static int 1372 eal_parse_log_level(const char *arg) 1373 { 1374 const char *pattern = NULL; 1375 const char *regex = NULL; 1376 char *str, *level; 1377 int priority; 1378 1379 if (strcmp(arg, "help") == 0) { 1380 eal_log_usage(); 1381 exit(EXIT_SUCCESS); 1382 } 1383 1384 str = strdup(arg); 1385 if (str == NULL) 1386 return -1; 1387 1388 if ((level = strchr(str, ','))) { 1389 regex = str; 1390 *level++ = '\0'; 1391 } else if ((level = strchr(str, ':'))) { 1392 pattern = str; 1393 *level++ = '\0'; 1394 } else { 1395 level = str; 1396 } 1397 1398 priority = eal_parse_log_priority(level); 1399 if (priority <= 0) { 1400 fprintf(stderr, "Invalid log level: %s\n", level); 1401 goto fail; 1402 } 1403 if (priority > (int)RTE_LOG_MAX) { 1404 fprintf(stderr, "Log level %d higher than maximum (%d)\n", 1405 priority, RTE_LOG_MAX); 1406 priority = RTE_LOG_MAX; 1407 } 1408 1409 if (regex) { 1410 if (rte_log_set_level_regexp(regex, priority) < 0) { 1411 fprintf(stderr, "cannot set log level %s,%d\n", 1412 regex, priority); 1413 goto fail; 1414 } 1415 if (eal_log_save_regexp(regex, priority) < 0) 1416 goto fail; 1417 } else if (pattern) { 1418 if (rte_log_set_level_pattern(pattern, priority) < 0) { 1419 fprintf(stderr, "cannot set log level %s:%d\n", 1420 pattern, priority); 1421 goto fail; 1422 } 1423 if (eal_log_save_pattern(pattern, priority) < 0) 1424 goto fail; 1425 } else { 1426 rte_log_set_global_level(priority); 1427 } 1428 1429 free(str); 1430 return 0; 1431 1432 fail: 1433 free(str); 1434 return -1; 1435 } 1436 1437 static enum rte_proc_type_t 1438 eal_parse_proc_type(const char *arg) 1439 { 1440 if (strncasecmp(arg, "primary", sizeof("primary")) == 0) 1441 return RTE_PROC_PRIMARY; 1442 if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0) 1443 return RTE_PROC_SECONDARY; 1444 if (strncasecmp(arg, "auto", sizeof("auto")) == 0) 1445 return RTE_PROC_AUTO; 1446 1447 return RTE_PROC_INVALID; 1448 } 1449 1450 static int 1451 eal_parse_iova_mode(const char *name) 1452 { 1453 int mode; 1454 struct internal_config *internal_conf = 1455 eal_get_internal_configuration(); 1456 1457 if (name == NULL) 1458 return -1; 1459 1460 if (!strcmp("pa", name)) 1461 mode = RTE_IOVA_PA; 1462 else if (!strcmp("va", name)) 1463 mode = RTE_IOVA_VA; 1464 else 1465 return -1; 1466 1467 internal_conf->iova_mode = mode; 1468 return 0; 1469 } 1470 1471 static int 1472 eal_parse_simd_bitwidth(const char *arg) 1473 { 1474 char *end; 1475 unsigned long bitwidth; 1476 int ret; 1477 struct internal_config *internal_conf = 1478 eal_get_internal_configuration(); 1479 1480 if (arg == NULL || arg[0] == '\0') 1481 return -1; 1482 1483 errno = 0; 1484 bitwidth = strtoul(arg, &end, 0); 1485 1486 /* check for errors */ 1487 if (errno != 0 || end == NULL || *end != '\0' || bitwidth > RTE_VECT_SIMD_MAX) 1488 return -1; 1489 1490 if (bitwidth == 0) 1491 bitwidth = (unsigned long) RTE_VECT_SIMD_MAX; 1492 ret = rte_vect_set_max_simd_bitwidth(bitwidth); 1493 if (ret < 0) 1494 return -1; 1495 internal_conf->max_simd_bitwidth.forced = 1; 1496 return 0; 1497 } 1498 1499 static int 1500 eal_parse_base_virtaddr(const char *arg) 1501 { 1502 char *end; 1503 uint64_t addr; 1504 struct internal_config *internal_conf = 1505 eal_get_internal_configuration(); 1506 1507 errno = 0; 1508 addr = strtoull(arg, &end, 16); 1509 1510 /* check for errors */ 1511 if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0')) 1512 return -1; 1513 1514 /* make sure we don't exceed 32-bit boundary on 32-bit target */ 1515 #ifndef RTE_ARCH_64 1516 if (addr >= UINTPTR_MAX) 1517 return -1; 1518 #endif 1519 1520 /* align the addr on 16M boundary, 16MB is the minimum huge page 1521 * size on IBM Power architecture. If the addr is aligned to 16MB, 1522 * it can align to 2MB for x86. So this alignment can also be used 1523 * on x86 and other architectures. 1524 */ 1525 internal_conf->base_virtaddr = 1526 RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M); 1527 1528 return 0; 1529 } 1530 1531 /* caller is responsible for freeing the returned string */ 1532 static char * 1533 available_cores(void) 1534 { 1535 char *str = NULL; 1536 int previous; 1537 int sequence; 1538 char *tmp; 1539 int idx; 1540 1541 /* find the first available cpu */ 1542 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1543 if (eal_cpu_detected(idx) == 0) 1544 continue; 1545 break; 1546 } 1547 if (idx >= RTE_MAX_LCORE) 1548 return NULL; 1549 1550 /* first sequence */ 1551 if (asprintf(&str, "%d", idx) < 0) 1552 return NULL; 1553 previous = idx; 1554 sequence = 0; 1555 1556 for (idx++ ; idx < RTE_MAX_LCORE; idx++) { 1557 if (eal_cpu_detected(idx) == 0) 1558 continue; 1559 1560 if (idx == previous + 1) { 1561 previous = idx; 1562 sequence = 1; 1563 continue; 1564 } 1565 1566 /* finish current sequence */ 1567 if (sequence) { 1568 if (asprintf(&tmp, "%s-%d", str, previous) < 0) { 1569 free(str); 1570 return NULL; 1571 } 1572 free(str); 1573 str = tmp; 1574 } 1575 1576 /* new sequence */ 1577 if (asprintf(&tmp, "%s,%d", str, idx) < 0) { 1578 free(str); 1579 return NULL; 1580 } 1581 free(str); 1582 str = tmp; 1583 previous = idx; 1584 sequence = 0; 1585 } 1586 1587 /* finish last sequence */ 1588 if (sequence) { 1589 if (asprintf(&tmp, "%s-%d", str, previous) < 0) { 1590 free(str); 1591 return NULL; 1592 } 1593 free(str); 1594 str = tmp; 1595 } 1596 1597 return str; 1598 } 1599 1600 #define HUGE_UNLINK_NEVER "never" 1601 1602 static int 1603 eal_parse_huge_unlink(const char *arg, struct hugepage_file_discipline *out) 1604 { 1605 if (arg == NULL || strcmp(arg, "always") == 0) { 1606 out->unlink_before_mapping = true; 1607 return 0; 1608 } 1609 if (strcmp(arg, "existing") == 0) { 1610 /* same as not specifying the option */ 1611 return 0; 1612 } 1613 if (strcmp(arg, HUGE_UNLINK_NEVER) == 0) { 1614 RTE_LOG(WARNING, EAL, "Using --"OPT_HUGE_UNLINK"=" 1615 HUGE_UNLINK_NEVER" may create data leaks.\n"); 1616 out->unlink_existing = false; 1617 return 0; 1618 } 1619 return -1; 1620 } 1621 1622 int 1623 eal_parse_common_option(int opt, const char *optarg, 1624 struct internal_config *conf) 1625 { 1626 static int b_used; 1627 static int a_used; 1628 1629 switch (opt) { 1630 case 'b': 1631 if (a_used) 1632 goto ba_conflict; 1633 if (eal_option_device_add(RTE_DEVTYPE_BLOCKED, optarg) < 0) 1634 return -1; 1635 b_used = 1; 1636 break; 1637 1638 case 'a': 1639 if (b_used) 1640 goto ba_conflict; 1641 if (eal_option_device_add(RTE_DEVTYPE_ALLOWED, optarg) < 0) 1642 return -1; 1643 a_used = 1; 1644 break; 1645 /* coremask */ 1646 case 'c': { 1647 int lcore_indexes[RTE_MAX_LCORE]; 1648 1649 if (eal_service_cores_parsed()) 1650 RTE_LOG(WARNING, EAL, 1651 "Service cores parsed before dataplane cores. Please ensure -c is before -s or -S\n"); 1652 if (rte_eal_parse_coremask(optarg, lcore_indexes) < 0) { 1653 RTE_LOG(ERR, EAL, "invalid coremask syntax\n"); 1654 return -1; 1655 } 1656 if (update_lcore_config(lcore_indexes) < 0) { 1657 char *available = available_cores(); 1658 1659 RTE_LOG(ERR, EAL, 1660 "invalid coremask, please check specified cores are part of %s\n", 1661 available); 1662 free(available); 1663 return -1; 1664 } 1665 1666 if (core_parsed) { 1667 RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n", 1668 (core_parsed == LCORE_OPT_LST) ? "-l" : 1669 (core_parsed == LCORE_OPT_MAP) ? "--lcore" : 1670 "-c"); 1671 return -1; 1672 } 1673 1674 core_parsed = LCORE_OPT_MSK; 1675 break; 1676 } 1677 /* corelist */ 1678 case 'l': { 1679 int lcore_indexes[RTE_MAX_LCORE]; 1680 1681 if (eal_service_cores_parsed()) 1682 RTE_LOG(WARNING, EAL, 1683 "Service cores parsed before dataplane cores. Please ensure -l is before -s or -S\n"); 1684 1685 if (eal_parse_corelist(optarg, lcore_indexes) < 0) { 1686 RTE_LOG(ERR, EAL, "invalid core list syntax\n"); 1687 return -1; 1688 } 1689 if (update_lcore_config(lcore_indexes) < 0) { 1690 char *available = available_cores(); 1691 1692 RTE_LOG(ERR, EAL, 1693 "invalid core list, please check specified cores are part of %s\n", 1694 available); 1695 free(available); 1696 return -1; 1697 } 1698 1699 if (core_parsed) { 1700 RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n", 1701 (core_parsed == LCORE_OPT_MSK) ? "-c" : 1702 (core_parsed == LCORE_OPT_MAP) ? "--lcore" : 1703 "-l"); 1704 return -1; 1705 } 1706 1707 core_parsed = LCORE_OPT_LST; 1708 break; 1709 } 1710 /* service coremask */ 1711 case 's': 1712 if (eal_parse_service_coremask(optarg) < 0) { 1713 RTE_LOG(ERR, EAL, "invalid service coremask\n"); 1714 return -1; 1715 } 1716 break; 1717 /* service corelist */ 1718 case 'S': 1719 if (eal_parse_service_corelist(optarg) < 0) { 1720 RTE_LOG(ERR, EAL, "invalid service core list\n"); 1721 return -1; 1722 } 1723 break; 1724 /* size of memory */ 1725 case 'm': 1726 conf->memory = atoi(optarg); 1727 conf->memory *= 1024ULL; 1728 conf->memory *= 1024ULL; 1729 mem_parsed = 1; 1730 break; 1731 /* force number of channels */ 1732 case 'n': 1733 conf->force_nchannel = atoi(optarg); 1734 if (conf->force_nchannel == 0) { 1735 RTE_LOG(ERR, EAL, "invalid channel number\n"); 1736 return -1; 1737 } 1738 break; 1739 /* force number of ranks */ 1740 case 'r': 1741 conf->force_nrank = atoi(optarg); 1742 if (conf->force_nrank == 0 || 1743 conf->force_nrank > 16) { 1744 RTE_LOG(ERR, EAL, "invalid rank number\n"); 1745 return -1; 1746 } 1747 break; 1748 /* force loading of external driver */ 1749 case 'd': 1750 if (eal_plugin_add(optarg) == -1) 1751 return -1; 1752 break; 1753 case 'v': 1754 /* since message is explicitly requested by user, we 1755 * write message at highest log level so it can always 1756 * be seen 1757 * even if info or warning messages are disabled */ 1758 RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version()); 1759 break; 1760 1761 /* long options */ 1762 case OPT_HUGE_UNLINK_NUM: 1763 if (eal_parse_huge_unlink(optarg, &conf->hugepage_file) < 0) { 1764 RTE_LOG(ERR, EAL, "invalid --"OPT_HUGE_UNLINK" option\n"); 1765 return -1; 1766 } 1767 break; 1768 1769 case OPT_NO_HUGE_NUM: 1770 conf->no_hugetlbfs = 1; 1771 /* no-huge is legacy mem */ 1772 conf->legacy_mem = 1; 1773 break; 1774 1775 case OPT_NO_PCI_NUM: 1776 conf->no_pci = 1; 1777 break; 1778 1779 case OPT_NO_HPET_NUM: 1780 conf->no_hpet = 1; 1781 break; 1782 1783 case OPT_VMWARE_TSC_MAP_NUM: 1784 conf->vmware_tsc_map = 1; 1785 break; 1786 1787 case OPT_NO_SHCONF_NUM: 1788 conf->no_shconf = 1; 1789 break; 1790 1791 case OPT_IN_MEMORY_NUM: 1792 conf->in_memory = 1; 1793 /* in-memory is a superset of noshconf and huge-unlink */ 1794 conf->no_shconf = 1; 1795 conf->hugepage_file.unlink_before_mapping = true; 1796 break; 1797 1798 case OPT_PROC_TYPE_NUM: 1799 conf->process_type = eal_parse_proc_type(optarg); 1800 break; 1801 1802 case OPT_MAIN_LCORE_NUM: 1803 if (eal_parse_main_lcore(optarg) < 0) { 1804 RTE_LOG(ERR, EAL, "invalid parameter for --" 1805 OPT_MAIN_LCORE "\n"); 1806 return -1; 1807 } 1808 break; 1809 1810 case OPT_VDEV_NUM: 1811 if (eal_option_device_add(RTE_DEVTYPE_VIRTUAL, 1812 optarg) < 0) { 1813 return -1; 1814 } 1815 break; 1816 1817 #ifndef RTE_EXEC_ENV_WINDOWS 1818 case OPT_SYSLOG_NUM: 1819 if (eal_parse_syslog(optarg, conf) < 0) { 1820 RTE_LOG(ERR, EAL, "invalid parameters for --" 1821 OPT_SYSLOG "\n"); 1822 return -1; 1823 } 1824 break; 1825 #endif 1826 1827 case OPT_LOG_LEVEL_NUM: { 1828 if (eal_parse_log_level(optarg) < 0) { 1829 RTE_LOG(ERR, EAL, 1830 "invalid parameters for --" 1831 OPT_LOG_LEVEL "\n"); 1832 return -1; 1833 } 1834 break; 1835 } 1836 1837 #ifndef RTE_EXEC_ENV_WINDOWS 1838 case OPT_TRACE_NUM: { 1839 if (eal_trace_args_save(optarg) < 0) { 1840 RTE_LOG(ERR, EAL, "invalid parameters for --" 1841 OPT_TRACE "\n"); 1842 return -1; 1843 } 1844 break; 1845 } 1846 1847 case OPT_TRACE_DIR_NUM: { 1848 if (eal_trace_dir_args_save(optarg) < 0) { 1849 RTE_LOG(ERR, EAL, "invalid parameters for --" 1850 OPT_TRACE_DIR "\n"); 1851 return -1; 1852 } 1853 break; 1854 } 1855 1856 case OPT_TRACE_BUF_SIZE_NUM: { 1857 if (eal_trace_bufsz_args_save(optarg) < 0) { 1858 RTE_LOG(ERR, EAL, "invalid parameters for --" 1859 OPT_TRACE_BUF_SIZE "\n"); 1860 return -1; 1861 } 1862 break; 1863 } 1864 1865 case OPT_TRACE_MODE_NUM: { 1866 if (eal_trace_mode_args_save(optarg) < 0) { 1867 RTE_LOG(ERR, EAL, "invalid parameters for --" 1868 OPT_TRACE_MODE "\n"); 1869 return -1; 1870 } 1871 break; 1872 } 1873 #endif /* !RTE_EXEC_ENV_WINDOWS */ 1874 1875 case OPT_LCORES_NUM: 1876 if (eal_parse_lcores(optarg) < 0) { 1877 RTE_LOG(ERR, EAL, "invalid parameter for --" 1878 OPT_LCORES "\n"); 1879 return -1; 1880 } 1881 1882 if (core_parsed) { 1883 RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n", 1884 (core_parsed == LCORE_OPT_LST) ? "-l" : 1885 (core_parsed == LCORE_OPT_MSK) ? "-c" : 1886 "--lcore"); 1887 return -1; 1888 } 1889 1890 core_parsed = LCORE_OPT_MAP; 1891 break; 1892 case OPT_LEGACY_MEM_NUM: 1893 conf->legacy_mem = 1; 1894 break; 1895 case OPT_SINGLE_FILE_SEGMENTS_NUM: 1896 conf->single_file_segments = 1; 1897 break; 1898 case OPT_IOVA_MODE_NUM: 1899 if (eal_parse_iova_mode(optarg) < 0) { 1900 RTE_LOG(ERR, EAL, "invalid parameters for --" 1901 OPT_IOVA_MODE "\n"); 1902 return -1; 1903 } 1904 break; 1905 case OPT_BASE_VIRTADDR_NUM: 1906 if (eal_parse_base_virtaddr(optarg) < 0) { 1907 RTE_LOG(ERR, EAL, "invalid parameter for --" 1908 OPT_BASE_VIRTADDR "\n"); 1909 return -1; 1910 } 1911 break; 1912 case OPT_TELEMETRY_NUM: 1913 break; 1914 case OPT_NO_TELEMETRY_NUM: 1915 conf->no_telemetry = 1; 1916 break; 1917 case OPT_FORCE_MAX_SIMD_BITWIDTH_NUM: 1918 if (eal_parse_simd_bitwidth(optarg) < 0) { 1919 RTE_LOG(ERR, EAL, "invalid parameter for --" 1920 OPT_FORCE_MAX_SIMD_BITWIDTH "\n"); 1921 return -1; 1922 } 1923 break; 1924 1925 /* don't know what to do, leave this to caller */ 1926 default: 1927 return 1; 1928 1929 } 1930 1931 return 0; 1932 1933 ba_conflict: 1934 RTE_LOG(ERR, EAL, 1935 "Options allow (-a) and block (-b) can't be used at the same time\n"); 1936 return -1; 1937 } 1938 1939 static void 1940 eal_auto_detect_cores(struct rte_config *cfg) 1941 { 1942 unsigned int lcore_id; 1943 unsigned int removed = 0; 1944 rte_cpuset_t affinity_set; 1945 1946 if (rte_thread_get_affinity_by_id(rte_thread_self(), &affinity_set) != 0) 1947 CPU_ZERO(&affinity_set); 1948 1949 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1950 if (cfg->lcore_role[lcore_id] == ROLE_RTE && 1951 !CPU_ISSET(lcore_id, &affinity_set)) { 1952 cfg->lcore_role[lcore_id] = ROLE_OFF; 1953 removed++; 1954 } 1955 } 1956 1957 cfg->lcore_count -= removed; 1958 } 1959 1960 static void 1961 compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) 1962 { 1963 rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset; 1964 rte_cpuset_t default_set; 1965 unsigned int lcore_id; 1966 1967 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1968 if (rte_lcore_has_role(lcore_id, ROLE_OFF)) 1969 continue; 1970 RTE_CPU_OR(cpuset, cpuset, &lcore_config[lcore_id].cpuset); 1971 } 1972 RTE_CPU_NOT(cpuset, cpuset); 1973 1974 if (rte_thread_get_affinity_by_id(rte_thread_self(), &default_set) != 0) 1975 CPU_ZERO(&default_set); 1976 1977 RTE_CPU_AND(cpuset, cpuset, &default_set); 1978 1979 /* if no remaining cpu, use main lcore cpu affinity */ 1980 if (!CPU_COUNT(cpuset)) { 1981 memcpy(cpuset, &lcore_config[rte_get_main_lcore()].cpuset, 1982 sizeof(*cpuset)); 1983 } 1984 } 1985 1986 int 1987 eal_cleanup_config(struct internal_config *internal_cfg) 1988 { 1989 free(internal_cfg->hugefile_prefix); 1990 free(internal_cfg->hugepage_dir); 1991 free(internal_cfg->user_mbuf_pool_ops_name); 1992 1993 return 0; 1994 } 1995 1996 int 1997 eal_adjust_config(struct internal_config *internal_cfg) 1998 { 1999 int i; 2000 struct rte_config *cfg = rte_eal_get_configuration(); 2001 struct internal_config *internal_conf = 2002 eal_get_internal_configuration(); 2003 2004 if (!core_parsed) 2005 eal_auto_detect_cores(cfg); 2006 2007 if (internal_conf->process_type == RTE_PROC_AUTO) 2008 internal_conf->process_type = eal_proc_type_detect(); 2009 2010 /* default main lcore is the first one */ 2011 if (!main_lcore_parsed) { 2012 cfg->main_lcore = rte_get_next_lcore(-1, 0, 0); 2013 if (cfg->main_lcore >= RTE_MAX_LCORE) 2014 return -1; 2015 lcore_config[cfg->main_lcore].core_role = ROLE_RTE; 2016 } 2017 2018 compute_ctrl_threads_cpuset(internal_cfg); 2019 2020 /* if no memory amounts were requested, this will result in 0 and 2021 * will be overridden later, right after eal_hugepage_info_init() */ 2022 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 2023 internal_cfg->memory += internal_cfg->socket_mem[i]; 2024 2025 return 0; 2026 } 2027 2028 int 2029 eal_check_common_options(struct internal_config *internal_cfg) 2030 { 2031 struct rte_config *cfg = rte_eal_get_configuration(); 2032 const struct internal_config *internal_conf = 2033 eal_get_internal_configuration(); 2034 2035 if (cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) { 2036 RTE_LOG(ERR, EAL, "Main lcore is not enabled for DPDK\n"); 2037 return -1; 2038 } 2039 2040 if (internal_cfg->process_type == RTE_PROC_INVALID) { 2041 RTE_LOG(ERR, EAL, "Invalid process type specified\n"); 2042 return -1; 2043 } 2044 if (internal_cfg->hugefile_prefix != NULL && 2045 strlen(internal_cfg->hugefile_prefix) < 1) { 2046 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n"); 2047 return -1; 2048 } 2049 if (internal_cfg->hugepage_dir != NULL && 2050 strlen(internal_cfg->hugepage_dir) < 1) { 2051 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n"); 2052 return -1; 2053 } 2054 if (internal_cfg->user_mbuf_pool_ops_name != NULL && 2055 strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) { 2056 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n"); 2057 return -1; 2058 } 2059 if (strchr(eal_get_hugefile_prefix(), '%') != NULL) { 2060 RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" " 2061 "option\n"); 2062 return -1; 2063 } 2064 if (mem_parsed && internal_cfg->force_sockets == 1) { 2065 RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot " 2066 "be specified at the same time\n"); 2067 return -1; 2068 } 2069 if (internal_cfg->no_hugetlbfs && internal_cfg->force_sockets == 1) { 2070 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_MEM" cannot " 2071 "be specified together with --"OPT_NO_HUGE"\n"); 2072 return -1; 2073 } 2074 if (internal_cfg->no_hugetlbfs && 2075 internal_cfg->hugepage_file.unlink_before_mapping && 2076 !internal_cfg->in_memory) { 2077 RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot " 2078 "be specified together with --"OPT_NO_HUGE"\n"); 2079 return -1; 2080 } 2081 if (internal_cfg->no_hugetlbfs && 2082 internal_cfg->huge_worker_stack_size != 0) { 2083 RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_WORKER_STACK" cannot " 2084 "be specified together with --"OPT_NO_HUGE"\n"); 2085 return -1; 2086 } 2087 if (internal_conf->force_socket_limits && internal_conf->legacy_mem) { 2088 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT 2089 " is only supported in non-legacy memory mode\n"); 2090 } 2091 if (internal_cfg->single_file_segments && 2092 internal_cfg->hugepage_file.unlink_before_mapping && 2093 !internal_cfg->in_memory) { 2094 RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is " 2095 "not compatible with --"OPT_HUGE_UNLINK"\n"); 2096 return -1; 2097 } 2098 if (!internal_cfg->hugepage_file.unlink_existing && 2099 internal_cfg->in_memory) { 2100 RTE_LOG(ERR, EAL, "Option --"OPT_IN_MEMORY" is not compatible " 2101 "with --"OPT_HUGE_UNLINK"="HUGE_UNLINK_NEVER"\n"); 2102 return -1; 2103 } 2104 if (internal_cfg->legacy_mem && 2105 internal_cfg->in_memory) { 2106 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " 2107 "with --"OPT_IN_MEMORY"\n"); 2108 return -1; 2109 } 2110 if (internal_cfg->legacy_mem && internal_cfg->match_allocations) { 2111 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " 2112 "with --"OPT_MATCH_ALLOCATIONS"\n"); 2113 return -1; 2114 } 2115 if (internal_cfg->no_hugetlbfs && internal_cfg->match_allocations) { 2116 RTE_LOG(ERR, EAL, "Option --"OPT_NO_HUGE" is not compatible " 2117 "with --"OPT_MATCH_ALLOCATIONS"\n"); 2118 return -1; 2119 } 2120 if (internal_cfg->legacy_mem && internal_cfg->memory == 0) { 2121 RTE_LOG(NOTICE, EAL, "Static memory layout is selected, " 2122 "amount of reserved memory can be adjusted with " 2123 "-m or --"OPT_SOCKET_MEM"\n"); 2124 } 2125 2126 return 0; 2127 } 2128 2129 uint16_t 2130 rte_vect_get_max_simd_bitwidth(void) 2131 { 2132 const struct internal_config *internal_conf = 2133 eal_get_internal_configuration(); 2134 return internal_conf->max_simd_bitwidth.bitwidth; 2135 } 2136 2137 int 2138 rte_vect_set_max_simd_bitwidth(uint16_t bitwidth) 2139 { 2140 struct internal_config *internal_conf = 2141 eal_get_internal_configuration(); 2142 if (internal_conf->max_simd_bitwidth.forced) { 2143 RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled"); 2144 return -EPERM; 2145 } 2146 2147 if (bitwidth < RTE_VECT_SIMD_DISABLED || !rte_is_power_of_2(bitwidth)) { 2148 RTE_LOG(ERR, EAL, "Invalid bitwidth value!\n"); 2149 return -EINVAL; 2150 } 2151 internal_conf->max_simd_bitwidth.bitwidth = bitwidth; 2152 return 0; 2153 } 2154 2155 void 2156 eal_common_usage(void) 2157 { 2158 printf("[options]\n\n" 2159 "EAL common options:\n" 2160 " -c COREMASK Hexadecimal bitmask of cores to run on\n" 2161 " -l CORELIST List of cores to run on\n" 2162 " The argument format is <c1>[-c2][,c3[-c4],...]\n" 2163 " where c1, c2, etc are core indexes between 0 and %d\n" 2164 " --"OPT_LCORES" COREMAP Map lcore set to physical cpu set\n" 2165 " The argument format is\n" 2166 " '<lcores[@cpus]>[<,lcores[@cpus]>...]'\n" 2167 " lcores and cpus list are grouped by '(' and ')'\n" 2168 " Within the group, '-' is used for range separator,\n" 2169 " ',' is used for single number separator.\n" 2170 " '( )' can be omitted for single element group,\n" 2171 " '@' can be omitted if cpus and lcores have the same value\n" 2172 " -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n" 2173 " --"OPT_MAIN_LCORE" ID Core ID that is used as main\n" 2174 " --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n" 2175 " -n CHANNELS Number of memory channels\n" 2176 " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n" 2177 " -r RANKS Force number of memory ranks (don't detect)\n" 2178 " -b, --block Add a device to the blocked list.\n" 2179 " Prevent EAL from using this device. The argument\n" 2180 " format for PCI devices is <domain:bus:devid.func>.\n" 2181 " -a, --allow Add a device to the allow list.\n" 2182 " Only use the specified devices. The argument format\n" 2183 " for PCI devices is <[domain:]bus:devid.func>.\n" 2184 " This option can be present several times.\n" 2185 " [NOTE: " OPT_DEV_ALLOW " cannot be used with "OPT_DEV_BLOCK" option]\n" 2186 " --"OPT_VDEV" Add a virtual device.\n" 2187 " The argument format is <driver><id>[,key=val,...]\n" 2188 " (ex: --vdev=net_pcap0,iface=eth2).\n" 2189 " --"OPT_IOVA_MODE" Set IOVA mode. 'pa' for IOVA_PA\n" 2190 " 'va' for IOVA_VA\n" 2191 " -d LIB.so|DIR Add a driver or driver directory\n" 2192 " (can be used multiple times)\n" 2193 " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n" 2194 " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n" 2195 #ifndef RTE_EXEC_ENV_WINDOWS 2196 " --"OPT_SYSLOG" Set syslog facility\n" 2197 #endif 2198 " --"OPT_LOG_LEVEL"=<level> Set global log level\n" 2199 " --"OPT_LOG_LEVEL"=<type-match>:<level>\n" 2200 " Set specific log level\n" 2201 " --"OPT_LOG_LEVEL"=help Show log types and levels\n" 2202 #ifndef RTE_EXEC_ENV_WINDOWS 2203 " --"OPT_TRACE"=<regex-match>\n" 2204 " Enable trace based on regular expression trace name.\n" 2205 " By default, the trace is disabled.\n" 2206 " User must specify this option to enable trace.\n" 2207 " --"OPT_TRACE_DIR"=<directory path>\n" 2208 " Specify trace directory for trace output.\n" 2209 " By default, trace output will created at\n" 2210 " $HOME directory and parameter must be\n" 2211 " specified once only.\n" 2212 " --"OPT_TRACE_BUF_SIZE"=<int>\n" 2213 " Specify maximum size of allocated memory\n" 2214 " for trace output for each thread. Valid\n" 2215 " unit can be either 'B|K|M' for 'Bytes',\n" 2216 " 'KBytes' and 'MBytes' respectively.\n" 2217 " Default is 1MB and parameter must be\n" 2218 " specified once only.\n" 2219 " --"OPT_TRACE_MODE"=<o[verwrite] | d[iscard]>\n" 2220 " Specify the mode of update of trace\n" 2221 " output file. Either update on a file can\n" 2222 " be wrapped or discarded when file size\n" 2223 " reaches its maximum limit.\n" 2224 " Default mode is 'overwrite' and parameter\n" 2225 " must be specified once only.\n" 2226 #endif /* !RTE_EXEC_ENV_WINDOWS */ 2227 " -v Display version information on startup\n" 2228 " -h, --help This help\n" 2229 " --"OPT_IN_MEMORY" Operate entirely in memory. This will\n" 2230 " disable secondary process support\n" 2231 " --"OPT_BASE_VIRTADDR" Base virtual address\n" 2232 " --"OPT_TELEMETRY" Enable telemetry support (on by default)\n" 2233 " --"OPT_NO_TELEMETRY" Disable telemetry support\n" 2234 " --"OPT_FORCE_MAX_SIMD_BITWIDTH" Force the max SIMD bitwidth\n" 2235 "\nEAL options for DEBUG use only:\n" 2236 " --"OPT_HUGE_UNLINK"[=existing|always|never]\n" 2237 " When to unlink files in hugetlbfs\n" 2238 " ('existing' by default, no value means 'always')\n" 2239 " --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n" 2240 " --"OPT_NO_PCI" Disable PCI\n" 2241 " --"OPT_NO_HPET" Disable HPET\n" 2242 " --"OPT_NO_SHCONF" No shared config (mmap'd files)\n" 2243 "\n", RTE_MAX_LCORE); 2244 } 2245