1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright(c) 2014 6WIND S.A. 4 */ 5 6 #include <stdlib.h> 7 #include <string.h> 8 #ifndef RTE_EXEC_ENV_WINDOWS 9 #include <syslog.h> 10 #endif 11 #include <ctype.h> 12 #include <limits.h> 13 #include <errno.h> 14 #include <getopt.h> 15 #ifndef RTE_EXEC_ENV_WINDOWS 16 #include <dlfcn.h> 17 #include <libgen.h> 18 #endif 19 #include <sys/stat.h> 20 #ifndef RTE_EXEC_ENV_WINDOWS 21 #include <dirent.h> 22 #endif 23 24 #include <rte_string_fns.h> 25 #include <rte_eal.h> 26 #include <rte_log.h> 27 #include <rte_lcore.h> 28 #include <rte_memory.h> 29 #include <rte_tailq.h> 30 #include <rte_version.h> 31 #include <rte_devargs.h> 32 #include <rte_memcpy.h> 33 #ifndef RTE_EXEC_ENV_WINDOWS 34 #include <rte_telemetry.h> 35 #endif 36 #include <rte_vect.h> 37 38 #include "eal_internal_cfg.h" 39 #include "eal_options.h" 40 #include "eal_filesystem.h" 41 #include "eal_private.h" 42 #include "eal_log.h" 43 #ifndef RTE_EXEC_ENV_WINDOWS 44 #include "eal_trace.h" 45 #endif 46 47 #define BITS_PER_HEX 4 48 #define LCORE_OPT_LST 1 49 #define LCORE_OPT_MSK 2 50 #define LCORE_OPT_MAP 3 51 52 const char 53 eal_short_options[] = 54 "a:" /* allow */ 55 "b:" /* block */ 56 "c:" /* coremask */ 57 "s:" /* service coremask */ 58 "d:" /* driver */ 59 "h" /* help */ 60 "l:" /* corelist */ 61 "S:" /* service corelist */ 62 "m:" /* memory size */ 63 "n:" /* memory channels */ 64 "r:" /* memory ranks */ 65 "v" /* version */ 66 ; 67 68 const struct option 69 eal_long_options[] = { 70 {OPT_BASE_VIRTADDR, 1, NULL, OPT_BASE_VIRTADDR_NUM }, 71 {OPT_CREATE_UIO_DEV, 0, NULL, OPT_CREATE_UIO_DEV_NUM }, 72 {OPT_FILE_PREFIX, 1, NULL, OPT_FILE_PREFIX_NUM }, 73 {OPT_HELP, 0, NULL, OPT_HELP_NUM }, 74 {OPT_HUGE_DIR, 1, NULL, OPT_HUGE_DIR_NUM }, 75 {OPT_HUGE_UNLINK, 2, NULL, OPT_HUGE_UNLINK_NUM }, 76 {OPT_IOVA_MODE, 1, NULL, OPT_IOVA_MODE_NUM }, 77 {OPT_LCORES, 1, NULL, OPT_LCORES_NUM }, 78 {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM }, 79 {OPT_TRACE, 1, NULL, OPT_TRACE_NUM }, 80 {OPT_TRACE_DIR, 1, NULL, OPT_TRACE_DIR_NUM }, 81 {OPT_TRACE_BUF_SIZE, 1, NULL, OPT_TRACE_BUF_SIZE_NUM }, 82 {OPT_TRACE_MODE, 1, NULL, OPT_TRACE_MODE_NUM }, 83 {OPT_MAIN_LCORE, 1, NULL, OPT_MAIN_LCORE_NUM }, 84 {OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM}, 85 {OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM }, 86 {OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM }, 87 {OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM }, 88 {OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM }, 89 {OPT_IN_MEMORY, 0, NULL, OPT_IN_MEMORY_NUM }, 90 {OPT_DEV_BLOCK, 1, NULL, OPT_DEV_BLOCK_NUM }, 91 {OPT_DEV_ALLOW, 1, NULL, OPT_DEV_ALLOW_NUM }, 92 {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM }, 93 {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM }, 94 {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM }, 95 {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM }, 96 {OPT_VDEV, 1, NULL, OPT_VDEV_NUM }, 97 {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM }, 98 {OPT_VFIO_VF_TOKEN, 1, NULL, OPT_VFIO_VF_TOKEN_NUM }, 99 {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM }, 100 {OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM }, 101 {OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM}, 102 {OPT_MATCH_ALLOCATIONS, 0, NULL, OPT_MATCH_ALLOCATIONS_NUM}, 103 {OPT_TELEMETRY, 0, NULL, OPT_TELEMETRY_NUM }, 104 {OPT_NO_TELEMETRY, 0, NULL, OPT_NO_TELEMETRY_NUM }, 105 {OPT_FORCE_MAX_SIMD_BITWIDTH, 1, NULL, OPT_FORCE_MAX_SIMD_BITWIDTH_NUM}, 106 107 {0, 0, NULL, 0 } 108 }; 109 110 TAILQ_HEAD(shared_driver_list, shared_driver); 111 112 /* Definition for shared object drivers. */ 113 struct shared_driver { 114 TAILQ_ENTRY(shared_driver) next; 115 116 char name[PATH_MAX]; 117 void* lib_handle; 118 }; 119 120 /* List of external loadable drivers */ 121 static struct shared_driver_list solib_list = 122 TAILQ_HEAD_INITIALIZER(solib_list); 123 124 #ifndef RTE_EXEC_ENV_WINDOWS 125 /* Default path of external loadable drivers */ 126 static const char *default_solib_dir = RTE_EAL_PMD_PATH; 127 #endif 128 129 /* 130 * Stringified version of solib path used by dpdk-pmdinfo.py 131 * Note: PLEASE DO NOT ALTER THIS without making a corresponding 132 * change to usertools/dpdk-pmdinfo.py 133 */ 134 static const char dpdk_solib_path[] __rte_used = 135 "DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH; 136 137 TAILQ_HEAD(device_option_list, device_option); 138 139 struct device_option { 140 TAILQ_ENTRY(device_option) next; 141 142 enum rte_devtype type; 143 char arg[]; 144 }; 145 146 static struct device_option_list devopt_list = 147 TAILQ_HEAD_INITIALIZER(devopt_list); 148 149 static int main_lcore_parsed; 150 static int mem_parsed; 151 static int core_parsed; 152 153 /* Allow the application to print its usage message too if set */ 154 static rte_usage_hook_t rte_application_usage_hook; 155 156 /* Returns rte_usage_hook_t */ 157 rte_usage_hook_t 158 eal_get_application_usage_hook(void) 159 { 160 return rte_application_usage_hook; 161 } 162 163 /* Set a per-application usage message */ 164 rte_usage_hook_t 165 rte_set_application_usage_hook(rte_usage_hook_t usage_func) 166 { 167 rte_usage_hook_t old_func; 168 169 /* Will be NULL on the first call to denote the last usage routine. */ 170 old_func = rte_application_usage_hook; 171 rte_application_usage_hook = usage_func; 172 173 return old_func; 174 } 175 176 #ifndef RTE_EXEC_ENV_WINDOWS 177 static char **eal_args; 178 static char **eal_app_args; 179 180 #define EAL_PARAM_REQ "/eal/params" 181 #define EAL_APP_PARAM_REQ "/eal/app_params" 182 183 /* callback handler for telemetry library to report out EAL flags */ 184 int 185 handle_eal_info_request(const char *cmd, const char *params __rte_unused, 186 struct rte_tel_data *d) 187 { 188 char **args; 189 int used = 0; 190 int i = 0; 191 192 if (strcmp(cmd, EAL_PARAM_REQ) == 0) 193 args = eal_args; 194 else 195 args = eal_app_args; 196 197 rte_tel_data_start_array(d, RTE_TEL_STRING_VAL); 198 if (args == NULL || args[0] == NULL) 199 return 0; 200 201 for ( ; args[i] != NULL; i++) 202 used = rte_tel_data_add_array_string(d, args[i]); 203 return used; 204 } 205 206 int 207 eal_save_args(int argc, char **argv) 208 { 209 int i, j; 210 211 rte_telemetry_register_cmd(EAL_PARAM_REQ, handle_eal_info_request, 212 "Returns EAL commandline parameters used. Takes no parameters"); 213 rte_telemetry_register_cmd(EAL_APP_PARAM_REQ, handle_eal_info_request, 214 "Returns app commandline parameters used. Takes no parameters"); 215 216 /* clone argv to report out later. We overprovision, but 217 * this does not waste huge amounts of memory 218 */ 219 eal_args = calloc(argc + 1, sizeof(*eal_args)); 220 if (eal_args == NULL) 221 return -1; 222 223 for (i = 0; i < argc; i++) { 224 if (strcmp(argv[i], "--") == 0) 225 break; 226 eal_args[i] = strdup(argv[i]); 227 } 228 eal_args[i++] = NULL; /* always finish with NULL */ 229 230 /* allow reporting of any app args we know about too */ 231 if (i >= argc) 232 return 0; 233 234 eal_app_args = calloc(argc - i + 1, sizeof(*eal_args)); 235 if (eal_app_args == NULL) 236 return -1; 237 238 for (j = 0; i < argc; j++, i++) 239 eal_app_args[j] = strdup(argv[i]); 240 eal_app_args[j] = NULL; 241 242 return 0; 243 } 244 #endif 245 246 static int 247 eal_option_device_add(enum rte_devtype type, const char *optarg) 248 { 249 struct device_option *devopt; 250 size_t optlen; 251 int ret; 252 253 optlen = strlen(optarg) + 1; 254 devopt = calloc(1, sizeof(*devopt) + optlen); 255 if (devopt == NULL) { 256 RTE_LOG(ERR, EAL, "Unable to allocate device option\n"); 257 return -ENOMEM; 258 } 259 260 devopt->type = type; 261 ret = strlcpy(devopt->arg, optarg, optlen); 262 if (ret < 0) { 263 RTE_LOG(ERR, EAL, "Unable to copy device option\n"); 264 free(devopt); 265 return -EINVAL; 266 } 267 TAILQ_INSERT_TAIL(&devopt_list, devopt, next); 268 return 0; 269 } 270 271 int 272 eal_option_device_parse(void) 273 { 274 struct device_option *devopt; 275 void *tmp; 276 int ret = 0; 277 278 RTE_TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) { 279 if (ret == 0) { 280 ret = rte_devargs_add(devopt->type, devopt->arg); 281 if (ret) 282 RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n", 283 devopt->arg); 284 } 285 TAILQ_REMOVE(&devopt_list, devopt, next); 286 free(devopt); 287 } 288 return ret; 289 } 290 291 const char * 292 eal_get_hugefile_prefix(void) 293 { 294 const struct internal_config *internal_conf = 295 eal_get_internal_configuration(); 296 297 if (internal_conf->hugefile_prefix != NULL) 298 return internal_conf->hugefile_prefix; 299 return HUGEFILE_PREFIX_DEFAULT; 300 } 301 302 void 303 eal_reset_internal_config(struct internal_config *internal_cfg) 304 { 305 int i; 306 307 internal_cfg->memory = 0; 308 internal_cfg->force_nrank = 0; 309 internal_cfg->force_nchannel = 0; 310 internal_cfg->hugefile_prefix = NULL; 311 internal_cfg->hugepage_dir = NULL; 312 internal_cfg->hugepage_file.unlink_before_mapping = false; 313 internal_cfg->hugepage_file.unlink_existing = true; 314 internal_cfg->force_sockets = 0; 315 /* zero out the NUMA config */ 316 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 317 internal_cfg->socket_mem[i] = 0; 318 internal_cfg->force_socket_limits = 0; 319 /* zero out the NUMA limits config */ 320 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 321 internal_cfg->socket_limit[i] = 0; 322 /* zero out hugedir descriptors */ 323 for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) { 324 memset(&internal_cfg->hugepage_info[i], 0, 325 sizeof(internal_cfg->hugepage_info[0])); 326 internal_cfg->hugepage_info[i].lock_descriptor = -1; 327 } 328 internal_cfg->base_virtaddr = 0; 329 330 #ifdef LOG_DAEMON 331 internal_cfg->syslog_facility = LOG_DAEMON; 332 #endif 333 334 /* if set to NONE, interrupt mode is determined automatically */ 335 internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE; 336 memset(internal_cfg->vfio_vf_token, 0, 337 sizeof(internal_cfg->vfio_vf_token)); 338 339 #ifdef RTE_LIBEAL_USE_HPET 340 internal_cfg->no_hpet = 0; 341 #else 342 internal_cfg->no_hpet = 1; 343 #endif 344 internal_cfg->vmware_tsc_map = 0; 345 internal_cfg->create_uio_dev = 0; 346 internal_cfg->iova_mode = RTE_IOVA_DC; 347 internal_cfg->user_mbuf_pool_ops_name = NULL; 348 CPU_ZERO(&internal_cfg->ctrl_cpuset); 349 internal_cfg->init_complete = 0; 350 internal_cfg->max_simd_bitwidth.bitwidth = RTE_VECT_DEFAULT_SIMD_BITWIDTH; 351 internal_cfg->max_simd_bitwidth.forced = 0; 352 } 353 354 static int 355 eal_plugin_add(const char *path) 356 { 357 struct shared_driver *solib; 358 359 solib = malloc(sizeof(*solib)); 360 if (solib == NULL) { 361 RTE_LOG(ERR, EAL, "malloc(solib) failed\n"); 362 return -1; 363 } 364 memset(solib, 0, sizeof(*solib)); 365 strlcpy(solib->name, path, PATH_MAX); 366 TAILQ_INSERT_TAIL(&solib_list, solib, next); 367 368 return 0; 369 } 370 371 #ifdef RTE_EXEC_ENV_WINDOWS 372 int 373 eal_plugins_init(void) 374 { 375 return 0; 376 } 377 #else 378 379 static int 380 eal_plugindir_init(const char *path) 381 { 382 DIR *d = NULL; 383 struct dirent *dent = NULL; 384 char sopath[PATH_MAX]; 385 386 if (path == NULL || *path == '\0') 387 return 0; 388 389 d = opendir(path); 390 if (d == NULL) { 391 RTE_LOG(ERR, EAL, "failed to open directory %s: %s\n", 392 path, strerror(errno)); 393 return -1; 394 } 395 396 while ((dent = readdir(d)) != NULL) { 397 struct stat sb; 398 int nlen = strnlen(dent->d_name, sizeof(dent->d_name)); 399 400 /* check if name ends in .so or .so.ABI_VERSION */ 401 if (strcmp(&dent->d_name[nlen - 3], ".so") != 0 && 402 strcmp(&dent->d_name[nlen - 4 - strlen(ABI_VERSION)], 403 ".so."ABI_VERSION) != 0) 404 continue; 405 406 snprintf(sopath, sizeof(sopath), "%s/%s", path, dent->d_name); 407 408 /* if a regular file, add to list to load */ 409 if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode))) 410 continue; 411 412 if (eal_plugin_add(sopath) == -1) 413 break; 414 } 415 416 closedir(d); 417 /* XXX this ignores failures from readdir() itself */ 418 return (dent == NULL) ? 0 : -1; 419 } 420 421 static int 422 verify_perms(const char *dirpath) 423 { 424 struct stat st; 425 426 /* if not root, check down one level first */ 427 if (strcmp(dirpath, "/") != 0) { 428 static __thread char last_dir_checked[PATH_MAX]; 429 char copy[PATH_MAX]; 430 const char *dir; 431 432 strlcpy(copy, dirpath, PATH_MAX); 433 dir = dirname(copy); 434 if (strncmp(dir, last_dir_checked, PATH_MAX) != 0) { 435 if (verify_perms(dir) != 0) 436 return -1; 437 strlcpy(last_dir_checked, dir, PATH_MAX); 438 } 439 } 440 441 /* call stat to check for permissions and ensure not world writable */ 442 if (stat(dirpath, &st) != 0) { 443 RTE_LOG(ERR, EAL, "Error with stat on %s, %s\n", 444 dirpath, strerror(errno)); 445 return -1; 446 } 447 if (st.st_mode & S_IWOTH) { 448 RTE_LOG(ERR, EAL, 449 "Error, directory path %s is world-writable and insecure\n", 450 dirpath); 451 return -1; 452 } 453 454 return 0; 455 } 456 457 static void * 458 eal_dlopen(const char *pathname) 459 { 460 void *retval = NULL; 461 char *realp = realpath(pathname, NULL); 462 463 if (realp == NULL && errno == ENOENT) { 464 /* not a full or relative path, try a load from system dirs */ 465 retval = dlopen(pathname, RTLD_NOW); 466 if (retval == NULL) 467 RTE_LOG(ERR, EAL, "%s\n", dlerror()); 468 return retval; 469 } 470 if (realp == NULL) { 471 RTE_LOG(ERR, EAL, "Error with realpath for %s, %s\n", 472 pathname, strerror(errno)); 473 goto out; 474 } 475 if (strnlen(realp, PATH_MAX) == PATH_MAX) { 476 RTE_LOG(ERR, EAL, "Error, driver path greater than PATH_MAX\n"); 477 goto out; 478 } 479 480 /* do permissions checks */ 481 if (verify_perms(realp) != 0) 482 goto out; 483 484 retval = dlopen(realp, RTLD_NOW); 485 if (retval == NULL) 486 RTE_LOG(ERR, EAL, "%s\n", dlerror()); 487 out: 488 free(realp); 489 return retval; 490 } 491 492 static int 493 is_shared_build(void) 494 { 495 #define EAL_SO "librte_eal.so" 496 char soname[32]; 497 size_t len, minlen = strlen(EAL_SO); 498 499 len = strlcpy(soname, EAL_SO"."ABI_VERSION, sizeof(soname)); 500 if (len > sizeof(soname)) { 501 RTE_LOG(ERR, EAL, "Shared lib name too long in shared build check\n"); 502 len = sizeof(soname) - 1; 503 } 504 505 while (len >= minlen) { 506 void *handle; 507 508 /* check if we have this .so loaded, if so - shared build */ 509 RTE_LOG(DEBUG, EAL, "Checking presence of .so '%s'\n", soname); 510 handle = dlopen(soname, RTLD_LAZY | RTLD_NOLOAD); 511 if (handle != NULL) { 512 RTE_LOG(INFO, EAL, "Detected shared linkage of DPDK\n"); 513 dlclose(handle); 514 return 1; 515 } 516 517 /* remove any version numbers off the end to retry */ 518 while (len-- > 0) 519 if (soname[len] == '.') { 520 soname[len] = '\0'; 521 break; 522 } 523 } 524 525 RTE_LOG(INFO, EAL, "Detected static linkage of DPDK\n"); 526 return 0; 527 } 528 529 int 530 eal_plugins_init(void) 531 { 532 struct shared_driver *solib = NULL; 533 struct stat sb; 534 535 /* If we are not statically linked, add default driver loading 536 * path if it exists as a directory. 537 * (Using dlopen with NOLOAD flag on EAL, will return NULL if the EAL 538 * shared library is not already loaded i.e. it's statically linked.) 539 */ 540 if (is_shared_build() && 541 *default_solib_dir != '\0' && 542 stat(default_solib_dir, &sb) == 0 && 543 S_ISDIR(sb.st_mode)) 544 eal_plugin_add(default_solib_dir); 545 546 TAILQ_FOREACH(solib, &solib_list, next) { 547 548 if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) { 549 if (eal_plugindir_init(solib->name) == -1) { 550 RTE_LOG(ERR, EAL, 551 "Cannot init plugin directory %s\n", 552 solib->name); 553 return -1; 554 } 555 } else { 556 RTE_LOG(DEBUG, EAL, "open shared lib %s\n", 557 solib->name); 558 solib->lib_handle = eal_dlopen(solib->name); 559 if (solib->lib_handle == NULL) 560 return -1; 561 } 562 563 } 564 return 0; 565 } 566 #endif 567 568 /* 569 * Parse the coremask given as argument (hexadecimal string) and fill 570 * the global configuration (core role and core count) with the parsed 571 * value. 572 */ 573 static int xdigit2val(unsigned char c) 574 { 575 int val; 576 577 if (isdigit(c)) 578 val = c - '0'; 579 else if (isupper(c)) 580 val = c - 'A' + 10; 581 else 582 val = c - 'a' + 10; 583 return val; 584 } 585 586 static int 587 eal_parse_service_coremask(const char *coremask) 588 { 589 struct rte_config *cfg = rte_eal_get_configuration(); 590 int i, j, idx = 0; 591 unsigned int count = 0; 592 char c; 593 int val; 594 uint32_t taken_lcore_count = 0; 595 596 if (coremask == NULL) 597 return -1; 598 /* Remove all blank characters ahead and after . 599 * Remove 0x/0X if exists. 600 */ 601 while (isblank(*coremask)) 602 coremask++; 603 if (coremask[0] == '0' && ((coremask[1] == 'x') 604 || (coremask[1] == 'X'))) 605 coremask += 2; 606 i = strlen(coremask); 607 while ((i > 0) && isblank(coremask[i - 1])) 608 i--; 609 610 if (i == 0) 611 return -1; 612 613 for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) { 614 c = coremask[i]; 615 if (isxdigit(c) == 0) { 616 /* invalid characters */ 617 return -1; 618 } 619 val = xdigit2val(c); 620 for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; 621 j++, idx++) { 622 if ((1 << j) & val) { 623 /* handle main lcore already parsed */ 624 uint32_t lcore = idx; 625 if (main_lcore_parsed && 626 cfg->main_lcore == lcore) { 627 RTE_LOG(ERR, EAL, 628 "lcore %u is main lcore, cannot use as service core\n", 629 idx); 630 return -1; 631 } 632 633 if (eal_cpu_detected(idx) == 0) { 634 RTE_LOG(ERR, EAL, 635 "lcore %u unavailable\n", idx); 636 return -1; 637 } 638 639 if (cfg->lcore_role[idx] == ROLE_RTE) 640 taken_lcore_count++; 641 642 lcore_config[idx].core_role = ROLE_SERVICE; 643 count++; 644 } 645 } 646 } 647 648 for (; i >= 0; i--) 649 if (coremask[i] != '0') 650 return -1; 651 652 for (; idx < RTE_MAX_LCORE; idx++) 653 lcore_config[idx].core_index = -1; 654 655 if (count == 0) 656 return -1; 657 658 if (core_parsed && taken_lcore_count != count) { 659 RTE_LOG(WARNING, EAL, 660 "Not all service cores are in the coremask. " 661 "Please ensure -c or -l includes service cores\n"); 662 } 663 664 cfg->service_lcore_count = count; 665 return 0; 666 } 667 668 static int 669 eal_service_cores_parsed(void) 670 { 671 int idx; 672 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 673 if (lcore_config[idx].core_role == ROLE_SERVICE) 674 return 1; 675 } 676 return 0; 677 } 678 679 static int 680 update_lcore_config(int *cores) 681 { 682 struct rte_config *cfg = rte_eal_get_configuration(); 683 unsigned int count = 0; 684 unsigned int i; 685 int ret = 0; 686 687 for (i = 0; i < RTE_MAX_LCORE; i++) { 688 if (cores[i] != -1) { 689 if (eal_cpu_detected(i) == 0) { 690 RTE_LOG(ERR, EAL, "lcore %u unavailable\n", i); 691 ret = -1; 692 continue; 693 } 694 cfg->lcore_role[i] = ROLE_RTE; 695 count++; 696 } else { 697 cfg->lcore_role[i] = ROLE_OFF; 698 } 699 lcore_config[i].core_index = cores[i]; 700 } 701 if (!ret) 702 cfg->lcore_count = count; 703 return ret; 704 } 705 706 static int 707 check_core_list(int *lcores, unsigned int count) 708 { 709 char lcorestr[RTE_MAX_LCORE * 10]; 710 bool overflow = false; 711 int len = 0, ret; 712 unsigned int i; 713 714 for (i = 0; i < count; i++) { 715 if (lcores[i] < RTE_MAX_LCORE) 716 continue; 717 718 RTE_LOG(ERR, EAL, "lcore %d >= RTE_MAX_LCORE (%d)\n", 719 lcores[i], RTE_MAX_LCORE); 720 overflow = true; 721 } 722 if (!overflow) 723 return 0; 724 725 /* 726 * We've encountered a core that's greater than RTE_MAX_LCORE, 727 * suggest using --lcores option to map lcores onto physical cores 728 * greater than RTE_MAX_LCORE. 729 */ 730 for (i = 0; i < count; i++) { 731 ret = snprintf(&lcorestr[len], sizeof(lcorestr) - len, 732 "%d@%d,", i, lcores[i]); 733 if (ret > 0) 734 len = len + ret; 735 } 736 if (len > 0) 737 lcorestr[len - 1] = 0; 738 RTE_LOG(ERR, EAL, "To use high physical core ids, " 739 "please use --lcores to map them to lcore ids below RTE_MAX_LCORE, " 740 "e.g. --lcores %s\n", lcorestr); 741 return -1; 742 } 743 744 static int 745 eal_parse_coremask(const char *coremask, int *cores) 746 { 747 const char *coremask_orig = coremask; 748 int lcores[RTE_MAX_LCORE]; 749 unsigned int count = 0; 750 int i, j, idx; 751 int val; 752 char c; 753 754 for (idx = 0; idx < RTE_MAX_LCORE; idx++) 755 cores[idx] = -1; 756 idx = 0; 757 758 /* Remove all blank characters ahead and after . 759 * Remove 0x/0X if exists. 760 */ 761 while (isblank(*coremask)) 762 coremask++; 763 if (coremask[0] == '0' && ((coremask[1] == 'x') 764 || (coremask[1] == 'X'))) 765 coremask += 2; 766 i = strlen(coremask); 767 while ((i > 0) && isblank(coremask[i - 1])) 768 i--; 769 if (i == 0) { 770 RTE_LOG(ERR, EAL, "No lcores in coremask: [%s]\n", 771 coremask_orig); 772 return -1; 773 } 774 775 for (i = i - 1; i >= 0; i--) { 776 c = coremask[i]; 777 if (isxdigit(c) == 0) { 778 /* invalid characters */ 779 RTE_LOG(ERR, EAL, "invalid characters in coremask: [%s]\n", 780 coremask_orig); 781 return -1; 782 } 783 val = xdigit2val(c); 784 for (j = 0; j < BITS_PER_HEX; j++, idx++) 785 { 786 if ((1 << j) & val) { 787 if (count >= RTE_MAX_LCORE) { 788 RTE_LOG(ERR, EAL, "Too many lcores provided. Cannot exceed RTE_MAX_LCORE (%d)\n", 789 RTE_MAX_LCORE); 790 return -1; 791 } 792 lcores[count++] = idx; 793 } 794 } 795 } 796 if (count == 0) { 797 RTE_LOG(ERR, EAL, "No lcores in coremask: [%s]\n", 798 coremask_orig); 799 return -1; 800 } 801 802 if (check_core_list(lcores, count)) 803 return -1; 804 805 /* 806 * Now that we've got a list of cores no longer than RTE_MAX_LCORE, 807 * and no lcore in that list is greater than RTE_MAX_LCORE, populate 808 * the cores array. 809 */ 810 do { 811 count--; 812 cores[lcores[count]] = count; 813 } while (count != 0); 814 815 return 0; 816 } 817 818 static int 819 eal_parse_service_corelist(const char *corelist) 820 { 821 struct rte_config *cfg = rte_eal_get_configuration(); 822 int i; 823 unsigned count = 0; 824 char *end = NULL; 825 uint32_t min, max, idx; 826 uint32_t taken_lcore_count = 0; 827 828 if (corelist == NULL) 829 return -1; 830 831 /* Remove all blank characters ahead and after */ 832 while (isblank(*corelist)) 833 corelist++; 834 i = strlen(corelist); 835 while ((i > 0) && isblank(corelist[i - 1])) 836 i--; 837 838 /* Get list of cores */ 839 min = RTE_MAX_LCORE; 840 do { 841 while (isblank(*corelist)) 842 corelist++; 843 if (*corelist == '\0') 844 return -1; 845 errno = 0; 846 idx = strtoul(corelist, &end, 10); 847 if (errno || end == NULL) 848 return -1; 849 if (idx >= RTE_MAX_LCORE) 850 return -1; 851 while (isblank(*end)) 852 end++; 853 if (*end == '-') { 854 min = idx; 855 } else if ((*end == ',') || (*end == '\0')) { 856 max = idx; 857 if (min == RTE_MAX_LCORE) 858 min = idx; 859 for (idx = min; idx <= max; idx++) { 860 if (cfg->lcore_role[idx] != ROLE_SERVICE) { 861 /* handle main lcore already parsed */ 862 uint32_t lcore = idx; 863 if (cfg->main_lcore == lcore && 864 main_lcore_parsed) { 865 RTE_LOG(ERR, EAL, 866 "Error: lcore %u is main lcore, cannot use as service core\n", 867 idx); 868 return -1; 869 } 870 if (cfg->lcore_role[idx] == ROLE_RTE) 871 taken_lcore_count++; 872 873 lcore_config[idx].core_role = 874 ROLE_SERVICE; 875 count++; 876 } 877 } 878 min = RTE_MAX_LCORE; 879 } else 880 return -1; 881 corelist = end + 1; 882 } while (*end != '\0'); 883 884 if (count == 0) 885 return -1; 886 887 if (core_parsed && taken_lcore_count != count) { 888 RTE_LOG(WARNING, EAL, 889 "Not all service cores were in the coremask. " 890 "Please ensure -c or -l includes service cores\n"); 891 } 892 893 return 0; 894 } 895 896 static int 897 eal_parse_corelist(const char *corelist, int *cores) 898 { 899 unsigned int count = 0, i; 900 int lcores[RTE_MAX_LCORE]; 901 char *end = NULL; 902 int min, max; 903 int idx; 904 905 for (idx = 0; idx < RTE_MAX_LCORE; idx++) 906 cores[idx] = -1; 907 908 /* Remove all blank characters ahead */ 909 while (isblank(*corelist)) 910 corelist++; 911 912 /* Get list of cores */ 913 min = -1; 914 do { 915 while (isblank(*corelist)) 916 corelist++; 917 if (*corelist == '\0') 918 return -1; 919 errno = 0; 920 idx = strtol(corelist, &end, 10); 921 if (errno || end == NULL) 922 return -1; 923 if (idx < 0) 924 return -1; 925 while (isblank(*end)) 926 end++; 927 if (*end == '-') { 928 min = idx; 929 } else if ((*end == ',') || (*end == '\0')) { 930 max = idx; 931 if (min == -1) 932 min = idx; 933 for (idx = min; idx <= max; idx++) { 934 bool dup = false; 935 936 /* Check if this idx is already present */ 937 for (i = 0; i < count; i++) { 938 if (lcores[i] == idx) 939 dup = true; 940 } 941 if (dup) 942 continue; 943 if (count >= RTE_MAX_LCORE) { 944 RTE_LOG(ERR, EAL, "Too many lcores provided. Cannot exceed RTE_MAX_LCORE (%d)\n", 945 RTE_MAX_LCORE); 946 return -1; 947 } 948 lcores[count++] = idx; 949 } 950 min = -1; 951 } else 952 return -1; 953 corelist = end + 1; 954 } while (*end != '\0'); 955 956 if (count == 0) 957 return -1; 958 959 if (check_core_list(lcores, count)) 960 return -1; 961 962 /* 963 * Now that we've got a list of cores no longer than RTE_MAX_LCORE, 964 * and no lcore in that list is greater than RTE_MAX_LCORE, populate 965 * the cores array. 966 */ 967 do { 968 count--; 969 cores[lcores[count]] = count; 970 } while (count != 0); 971 972 return 0; 973 } 974 975 /* Changes the lcore id of the main thread */ 976 static int 977 eal_parse_main_lcore(const char *arg) 978 { 979 char *parsing_end; 980 struct rte_config *cfg = rte_eal_get_configuration(); 981 982 errno = 0; 983 cfg->main_lcore = (uint32_t) strtol(arg, &parsing_end, 0); 984 if (errno || parsing_end[0] != 0) 985 return -1; 986 if (cfg->main_lcore >= RTE_MAX_LCORE) 987 return -1; 988 main_lcore_parsed = 1; 989 990 /* ensure main core is not used as service core */ 991 if (lcore_config[cfg->main_lcore].core_role == ROLE_SERVICE) { 992 RTE_LOG(ERR, EAL, 993 "Error: Main lcore is used as a service core\n"); 994 return -1; 995 } 996 997 return 0; 998 } 999 1000 /* 1001 * Parse elem, the elem could be single number/range or '(' ')' group 1002 * 1) A single number elem, it's just a simple digit. e.g. 9 1003 * 2) A single range elem, two digits with a '-' between. e.g. 2-6 1004 * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6) 1005 * Within group elem, '-' used for a range separator; 1006 * ',' used for a single number. 1007 */ 1008 static int 1009 eal_parse_set(const char *input, rte_cpuset_t *set) 1010 { 1011 unsigned idx; 1012 const char *str = input; 1013 char *end = NULL; 1014 unsigned min, max; 1015 1016 CPU_ZERO(set); 1017 1018 while (isblank(*str)) 1019 str++; 1020 1021 /* only digit or left bracket is qualify for start point */ 1022 if ((!isdigit(*str) && *str != '(') || *str == '\0') 1023 return -1; 1024 1025 /* process single number or single range of number */ 1026 if (*str != '(') { 1027 errno = 0; 1028 idx = strtoul(str, &end, 10); 1029 if (errno || end == NULL || idx >= CPU_SETSIZE) 1030 return -1; 1031 else { 1032 while (isblank(*end)) 1033 end++; 1034 1035 min = idx; 1036 max = idx; 1037 if (*end == '-') { 1038 /* process single <number>-<number> */ 1039 end++; 1040 while (isblank(*end)) 1041 end++; 1042 if (!isdigit(*end)) 1043 return -1; 1044 1045 errno = 0; 1046 idx = strtoul(end, &end, 10); 1047 if (errno || end == NULL || idx >= CPU_SETSIZE) 1048 return -1; 1049 max = idx; 1050 while (isblank(*end)) 1051 end++; 1052 if (*end != ',' && *end != '\0') 1053 return -1; 1054 } 1055 1056 if (*end != ',' && *end != '\0' && 1057 *end != '@') 1058 return -1; 1059 1060 for (idx = RTE_MIN(min, max); 1061 idx <= RTE_MAX(min, max); idx++) 1062 CPU_SET(idx, set); 1063 1064 return end - input; 1065 } 1066 } 1067 1068 /* process set within bracket */ 1069 str++; 1070 while (isblank(*str)) 1071 str++; 1072 if (*str == '\0') 1073 return -1; 1074 1075 min = RTE_MAX_LCORE; 1076 do { 1077 1078 /* go ahead to the first digit */ 1079 while (isblank(*str)) 1080 str++; 1081 if (!isdigit(*str)) 1082 return -1; 1083 1084 /* get the digit value */ 1085 errno = 0; 1086 idx = strtoul(str, &end, 10); 1087 if (errno || end == NULL || idx >= CPU_SETSIZE) 1088 return -1; 1089 1090 /* go ahead to separator '-',',' and ')' */ 1091 while (isblank(*end)) 1092 end++; 1093 if (*end == '-') { 1094 if (min == RTE_MAX_LCORE) 1095 min = idx; 1096 else /* avoid continuous '-' */ 1097 return -1; 1098 } else if ((*end == ',') || (*end == ')')) { 1099 max = idx; 1100 if (min == RTE_MAX_LCORE) 1101 min = idx; 1102 for (idx = RTE_MIN(min, max); 1103 idx <= RTE_MAX(min, max); idx++) 1104 CPU_SET(idx, set); 1105 1106 min = RTE_MAX_LCORE; 1107 } else 1108 return -1; 1109 1110 str = end + 1; 1111 } while (*end != '\0' && *end != ')'); 1112 1113 /* 1114 * to avoid failure that tail blank makes end character check fail 1115 * in eal_parse_lcores( ) 1116 */ 1117 while (isblank(*str)) 1118 str++; 1119 1120 return str - input; 1121 } 1122 1123 static int 1124 check_cpuset(rte_cpuset_t *set) 1125 { 1126 unsigned int idx; 1127 1128 for (idx = 0; idx < CPU_SETSIZE; idx++) { 1129 if (!CPU_ISSET(idx, set)) 1130 continue; 1131 1132 if (eal_cpu_detected(idx) == 0) { 1133 RTE_LOG(ERR, EAL, "core %u " 1134 "unavailable\n", idx); 1135 return -1; 1136 } 1137 } 1138 return 0; 1139 } 1140 1141 /* 1142 * The format pattern: --lcores='<lcores[@cpus]>[<,lcores[@cpus]>...]' 1143 * lcores, cpus could be a single digit/range or a group. 1144 * '(' and ')' are necessary if it's a group. 1145 * If not supply '@cpus', the value of cpus uses the same as lcores. 1146 * e.g. '1,2@(5-7),(3-5)@(0,2),(0,6),7-8' means start 9 EAL thread as below 1147 * lcore 0 runs on cpuset 0x41 (cpu 0,6) 1148 * lcore 1 runs on cpuset 0x2 (cpu 1) 1149 * lcore 2 runs on cpuset 0xe0 (cpu 5,6,7) 1150 * lcore 3,4,5 runs on cpuset 0x5 (cpu 0,2) 1151 * lcore 6 runs on cpuset 0x41 (cpu 0,6) 1152 * lcore 7 runs on cpuset 0x80 (cpu 7) 1153 * lcore 8 runs on cpuset 0x100 (cpu 8) 1154 */ 1155 static int 1156 eal_parse_lcores(const char *lcores) 1157 { 1158 struct rte_config *cfg = rte_eal_get_configuration(); 1159 rte_cpuset_t lcore_set; 1160 unsigned int set_count; 1161 unsigned idx = 0; 1162 unsigned count = 0; 1163 const char *lcore_start = NULL; 1164 const char *end = NULL; 1165 int offset; 1166 rte_cpuset_t cpuset; 1167 int lflags; 1168 int ret = -1; 1169 1170 if (lcores == NULL) 1171 return -1; 1172 1173 /* Remove all blank characters ahead and after */ 1174 while (isblank(*lcores)) 1175 lcores++; 1176 1177 CPU_ZERO(&cpuset); 1178 1179 /* Reset lcore config */ 1180 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1181 cfg->lcore_role[idx] = ROLE_OFF; 1182 lcore_config[idx].core_index = -1; 1183 CPU_ZERO(&lcore_config[idx].cpuset); 1184 } 1185 1186 /* Get list of cores */ 1187 do { 1188 while (isblank(*lcores)) 1189 lcores++; 1190 if (*lcores == '\0') 1191 goto err; 1192 1193 lflags = 0; 1194 1195 /* record lcore_set start point */ 1196 lcore_start = lcores; 1197 1198 /* go across a complete bracket */ 1199 if (*lcore_start == '(') { 1200 lcores += strcspn(lcores, ")"); 1201 if (*lcores++ == '\0') 1202 goto err; 1203 } 1204 1205 /* scan the separator '@', ','(next) or '\0'(finish) */ 1206 lcores += strcspn(lcores, "@,"); 1207 1208 if (*lcores == '@') { 1209 /* explicit assign cpuset and update the end cursor */ 1210 offset = eal_parse_set(lcores + 1, &cpuset); 1211 if (offset < 0) 1212 goto err; 1213 end = lcores + 1 + offset; 1214 } else { /* ',' or '\0' */ 1215 /* haven't given cpuset, current loop done */ 1216 end = lcores; 1217 1218 /* go back to check <number>-<number> */ 1219 offset = strcspn(lcore_start, "(-"); 1220 if (offset < (end - lcore_start) && 1221 *(lcore_start + offset) != '(') 1222 lflags = 1; 1223 } 1224 1225 if (*end != ',' && *end != '\0') 1226 goto err; 1227 1228 /* parse lcore_set from start point */ 1229 if (eal_parse_set(lcore_start, &lcore_set) < 0) 1230 goto err; 1231 1232 /* without '@', by default using lcore_set as cpuset */ 1233 if (*lcores != '@') 1234 rte_memcpy(&cpuset, &lcore_set, sizeof(cpuset)); 1235 1236 set_count = CPU_COUNT(&lcore_set); 1237 /* start to update lcore_set */ 1238 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1239 if (!CPU_ISSET(idx, &lcore_set)) 1240 continue; 1241 set_count--; 1242 1243 if (cfg->lcore_role[idx] != ROLE_RTE) { 1244 lcore_config[idx].core_index = count; 1245 cfg->lcore_role[idx] = ROLE_RTE; 1246 count++; 1247 } 1248 1249 if (lflags) { 1250 CPU_ZERO(&cpuset); 1251 CPU_SET(idx, &cpuset); 1252 } 1253 1254 if (check_cpuset(&cpuset) < 0) 1255 goto err; 1256 rte_memcpy(&lcore_config[idx].cpuset, &cpuset, 1257 sizeof(rte_cpuset_t)); 1258 } 1259 1260 /* some cores from the lcore_set can't be handled by EAL */ 1261 if (set_count != 0) 1262 goto err; 1263 1264 lcores = end + 1; 1265 } while (*end != '\0'); 1266 1267 if (count == 0) 1268 goto err; 1269 1270 cfg->lcore_count = count; 1271 ret = 0; 1272 1273 err: 1274 1275 return ret; 1276 } 1277 1278 #ifndef RTE_EXEC_ENV_WINDOWS 1279 static int 1280 eal_parse_syslog(const char *facility, struct internal_config *conf) 1281 { 1282 int i; 1283 static const struct { 1284 const char *name; 1285 int value; 1286 } map[] = { 1287 { "auth", LOG_AUTH }, 1288 { "cron", LOG_CRON }, 1289 { "daemon", LOG_DAEMON }, 1290 { "ftp", LOG_FTP }, 1291 { "kern", LOG_KERN }, 1292 { "lpr", LOG_LPR }, 1293 { "mail", LOG_MAIL }, 1294 { "news", LOG_NEWS }, 1295 { "syslog", LOG_SYSLOG }, 1296 { "user", LOG_USER }, 1297 { "uucp", LOG_UUCP }, 1298 { "local0", LOG_LOCAL0 }, 1299 { "local1", LOG_LOCAL1 }, 1300 { "local2", LOG_LOCAL2 }, 1301 { "local3", LOG_LOCAL3 }, 1302 { "local4", LOG_LOCAL4 }, 1303 { "local5", LOG_LOCAL5 }, 1304 { "local6", LOG_LOCAL6 }, 1305 { "local7", LOG_LOCAL7 }, 1306 { NULL, 0 } 1307 }; 1308 1309 for (i = 0; map[i].name; i++) { 1310 if (!strcmp(facility, map[i].name)) { 1311 conf->syslog_facility = map[i].value; 1312 return 0; 1313 } 1314 } 1315 return -1; 1316 } 1317 #endif 1318 1319 static void 1320 eal_log_usage(void) 1321 { 1322 unsigned int level; 1323 1324 printf("Log type is a pattern matching items of this list" 1325 " (plugins may be missing):\n"); 1326 rte_log_list_types(stdout, "\t"); 1327 printf("\n"); 1328 printf("Syntax using globbing pattern: "); 1329 printf("--"OPT_LOG_LEVEL" pattern:level\n"); 1330 printf("Syntax using regular expression: "); 1331 printf("--"OPT_LOG_LEVEL" regexp,level\n"); 1332 printf("Syntax for the global level: "); 1333 printf("--"OPT_LOG_LEVEL" level\n"); 1334 printf("Logs are emitted if allowed by both global and specific levels.\n"); 1335 printf("\n"); 1336 printf("Log level can be a number or the first letters of its name:\n"); 1337 for (level = 1; level <= RTE_LOG_MAX; level++) 1338 printf("\t%d %s\n", level, eal_log_level2str(level)); 1339 } 1340 1341 static int 1342 eal_parse_log_priority(const char *level) 1343 { 1344 size_t len = strlen(level); 1345 unsigned long tmp; 1346 char *end; 1347 unsigned int i; 1348 1349 if (len == 0) 1350 return -1; 1351 1352 /* look for named values, skip 0 which is not a valid level */ 1353 for (i = 1; i <= RTE_LOG_MAX; i++) { 1354 if (strncmp(eal_log_level2str(i), level, len) == 0) 1355 return i; 1356 } 1357 1358 /* not a string, maybe it is numeric */ 1359 errno = 0; 1360 tmp = strtoul(level, &end, 0); 1361 1362 /* check for errors */ 1363 if (errno != 0 || end == NULL || *end != '\0' || 1364 tmp >= UINT32_MAX) 1365 return -1; 1366 1367 return tmp; 1368 } 1369 1370 static int 1371 eal_parse_log_level(const char *arg) 1372 { 1373 const char *pattern = NULL; 1374 const char *regex = NULL; 1375 char *str, *level; 1376 int priority; 1377 1378 if (strcmp(arg, "help") == 0) { 1379 eal_log_usage(); 1380 exit(EXIT_SUCCESS); 1381 } 1382 1383 str = strdup(arg); 1384 if (str == NULL) 1385 return -1; 1386 1387 if ((level = strchr(str, ','))) { 1388 regex = str; 1389 *level++ = '\0'; 1390 } else if ((level = strchr(str, ':'))) { 1391 pattern = str; 1392 *level++ = '\0'; 1393 } else { 1394 level = str; 1395 } 1396 1397 priority = eal_parse_log_priority(level); 1398 if (priority <= 0) { 1399 fprintf(stderr, "Invalid log level: %s\n", level); 1400 goto fail; 1401 } 1402 if (priority > (int)RTE_LOG_MAX) { 1403 fprintf(stderr, "Log level %d higher than maximum (%d)\n", 1404 priority, RTE_LOG_MAX); 1405 priority = RTE_LOG_MAX; 1406 } 1407 1408 if (regex) { 1409 if (rte_log_set_level_regexp(regex, priority) < 0) { 1410 fprintf(stderr, "cannot set log level %s,%d\n", 1411 regex, priority); 1412 goto fail; 1413 } 1414 if (eal_log_save_regexp(regex, priority) < 0) 1415 goto fail; 1416 } else if (pattern) { 1417 if (rte_log_set_level_pattern(pattern, priority) < 0) { 1418 fprintf(stderr, "cannot set log level %s:%d\n", 1419 pattern, priority); 1420 goto fail; 1421 } 1422 if (eal_log_save_pattern(pattern, priority) < 0) 1423 goto fail; 1424 } else { 1425 rte_log_set_global_level(priority); 1426 } 1427 1428 free(str); 1429 return 0; 1430 1431 fail: 1432 free(str); 1433 return -1; 1434 } 1435 1436 static enum rte_proc_type_t 1437 eal_parse_proc_type(const char *arg) 1438 { 1439 if (strncasecmp(arg, "primary", sizeof("primary")) == 0) 1440 return RTE_PROC_PRIMARY; 1441 if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0) 1442 return RTE_PROC_SECONDARY; 1443 if (strncasecmp(arg, "auto", sizeof("auto")) == 0) 1444 return RTE_PROC_AUTO; 1445 1446 return RTE_PROC_INVALID; 1447 } 1448 1449 static int 1450 eal_parse_iova_mode(const char *name) 1451 { 1452 int mode; 1453 struct internal_config *internal_conf = 1454 eal_get_internal_configuration(); 1455 1456 if (name == NULL) 1457 return -1; 1458 1459 if (!strcmp("pa", name)) 1460 mode = RTE_IOVA_PA; 1461 else if (!strcmp("va", name)) 1462 mode = RTE_IOVA_VA; 1463 else 1464 return -1; 1465 1466 internal_conf->iova_mode = mode; 1467 return 0; 1468 } 1469 1470 static int 1471 eal_parse_simd_bitwidth(const char *arg) 1472 { 1473 char *end; 1474 unsigned long bitwidth; 1475 int ret; 1476 struct internal_config *internal_conf = 1477 eal_get_internal_configuration(); 1478 1479 if (arg == NULL || arg[0] == '\0') 1480 return -1; 1481 1482 errno = 0; 1483 bitwidth = strtoul(arg, &end, 0); 1484 1485 /* check for errors */ 1486 if (errno != 0 || end == NULL || *end != '\0' || bitwidth > RTE_VECT_SIMD_MAX) 1487 return -1; 1488 1489 if (bitwidth == 0) 1490 bitwidth = (unsigned long) RTE_VECT_SIMD_MAX; 1491 ret = rte_vect_set_max_simd_bitwidth(bitwidth); 1492 if (ret < 0) 1493 return -1; 1494 internal_conf->max_simd_bitwidth.forced = 1; 1495 return 0; 1496 } 1497 1498 static int 1499 eal_parse_base_virtaddr(const char *arg) 1500 { 1501 char *end; 1502 uint64_t addr; 1503 struct internal_config *internal_conf = 1504 eal_get_internal_configuration(); 1505 1506 errno = 0; 1507 addr = strtoull(arg, &end, 16); 1508 1509 /* check for errors */ 1510 if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0')) 1511 return -1; 1512 1513 /* make sure we don't exceed 32-bit boundary on 32-bit target */ 1514 #ifndef RTE_ARCH_64 1515 if (addr >= UINTPTR_MAX) 1516 return -1; 1517 #endif 1518 1519 /* align the addr on 16M boundary, 16MB is the minimum huge page 1520 * size on IBM Power architecture. If the addr is aligned to 16MB, 1521 * it can align to 2MB for x86. So this alignment can also be used 1522 * on x86 and other architectures. 1523 */ 1524 internal_conf->base_virtaddr = 1525 RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M); 1526 1527 return 0; 1528 } 1529 1530 /* caller is responsible for freeing the returned string */ 1531 static char * 1532 available_cores(void) 1533 { 1534 char *str = NULL; 1535 int previous; 1536 int sequence; 1537 char *tmp; 1538 int idx; 1539 1540 /* find the first available cpu */ 1541 for (idx = 0; idx < RTE_MAX_LCORE; idx++) { 1542 if (eal_cpu_detected(idx) == 0) 1543 continue; 1544 break; 1545 } 1546 if (idx >= RTE_MAX_LCORE) 1547 return NULL; 1548 1549 /* first sequence */ 1550 if (asprintf(&str, "%d", idx) < 0) 1551 return NULL; 1552 previous = idx; 1553 sequence = 0; 1554 1555 for (idx++ ; idx < RTE_MAX_LCORE; idx++) { 1556 if (eal_cpu_detected(idx) == 0) 1557 continue; 1558 1559 if (idx == previous + 1) { 1560 previous = idx; 1561 sequence = 1; 1562 continue; 1563 } 1564 1565 /* finish current sequence */ 1566 if (sequence) { 1567 if (asprintf(&tmp, "%s-%d", str, previous) < 0) { 1568 free(str); 1569 return NULL; 1570 } 1571 free(str); 1572 str = tmp; 1573 } 1574 1575 /* new sequence */ 1576 if (asprintf(&tmp, "%s,%d", str, idx) < 0) { 1577 free(str); 1578 return NULL; 1579 } 1580 free(str); 1581 str = tmp; 1582 previous = idx; 1583 sequence = 0; 1584 } 1585 1586 /* finish last sequence */ 1587 if (sequence) { 1588 if (asprintf(&tmp, "%s-%d", str, previous) < 0) { 1589 free(str); 1590 return NULL; 1591 } 1592 free(str); 1593 str = tmp; 1594 } 1595 1596 return str; 1597 } 1598 1599 #define HUGE_UNLINK_NEVER "never" 1600 1601 static int 1602 eal_parse_huge_unlink(const char *arg, struct hugepage_file_discipline *out) 1603 { 1604 if (arg == NULL || strcmp(arg, "always") == 0) { 1605 out->unlink_before_mapping = true; 1606 return 0; 1607 } 1608 if (strcmp(arg, "existing") == 0) { 1609 /* same as not specifying the option */ 1610 return 0; 1611 } 1612 if (strcmp(arg, HUGE_UNLINK_NEVER) == 0) { 1613 RTE_LOG(WARNING, EAL, "Using --"OPT_HUGE_UNLINK"=" 1614 HUGE_UNLINK_NEVER" may create data leaks.\n"); 1615 out->unlink_existing = false; 1616 return 0; 1617 } 1618 return -1; 1619 } 1620 1621 int 1622 eal_parse_common_option(int opt, const char *optarg, 1623 struct internal_config *conf) 1624 { 1625 static int b_used; 1626 static int a_used; 1627 1628 switch (opt) { 1629 case 'b': 1630 if (a_used) 1631 goto ba_conflict; 1632 if (eal_option_device_add(RTE_DEVTYPE_BLOCKED, optarg) < 0) 1633 return -1; 1634 b_used = 1; 1635 break; 1636 1637 case 'a': 1638 if (b_used) 1639 goto ba_conflict; 1640 if (eal_option_device_add(RTE_DEVTYPE_ALLOWED, optarg) < 0) 1641 return -1; 1642 a_used = 1; 1643 break; 1644 /* coremask */ 1645 case 'c': { 1646 int lcore_indexes[RTE_MAX_LCORE]; 1647 1648 if (eal_service_cores_parsed()) 1649 RTE_LOG(WARNING, EAL, 1650 "Service cores parsed before dataplane cores. Please ensure -c is before -s or -S\n"); 1651 if (eal_parse_coremask(optarg, lcore_indexes) < 0) { 1652 RTE_LOG(ERR, EAL, "invalid coremask syntax\n"); 1653 return -1; 1654 } 1655 if (update_lcore_config(lcore_indexes) < 0) { 1656 char *available = available_cores(); 1657 1658 RTE_LOG(ERR, EAL, 1659 "invalid coremask, please check specified cores are part of %s\n", 1660 available); 1661 free(available); 1662 return -1; 1663 } 1664 1665 if (core_parsed) { 1666 RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n", 1667 (core_parsed == LCORE_OPT_LST) ? "-l" : 1668 (core_parsed == LCORE_OPT_MAP) ? "--lcore" : 1669 "-c"); 1670 return -1; 1671 } 1672 1673 core_parsed = LCORE_OPT_MSK; 1674 break; 1675 } 1676 /* corelist */ 1677 case 'l': { 1678 int lcore_indexes[RTE_MAX_LCORE]; 1679 1680 if (eal_service_cores_parsed()) 1681 RTE_LOG(WARNING, EAL, 1682 "Service cores parsed before dataplane cores. Please ensure -l is before -s or -S\n"); 1683 1684 if (eal_parse_corelist(optarg, lcore_indexes) < 0) { 1685 RTE_LOG(ERR, EAL, "invalid core list syntax\n"); 1686 return -1; 1687 } 1688 if (update_lcore_config(lcore_indexes) < 0) { 1689 char *available = available_cores(); 1690 1691 RTE_LOG(ERR, EAL, 1692 "invalid core list, please check specified cores are part of %s\n", 1693 available); 1694 free(available); 1695 return -1; 1696 } 1697 1698 if (core_parsed) { 1699 RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n", 1700 (core_parsed == LCORE_OPT_MSK) ? "-c" : 1701 (core_parsed == LCORE_OPT_MAP) ? "--lcore" : 1702 "-l"); 1703 return -1; 1704 } 1705 1706 core_parsed = LCORE_OPT_LST; 1707 break; 1708 } 1709 /* service coremask */ 1710 case 's': 1711 if (eal_parse_service_coremask(optarg) < 0) { 1712 RTE_LOG(ERR, EAL, "invalid service coremask\n"); 1713 return -1; 1714 } 1715 break; 1716 /* service corelist */ 1717 case 'S': 1718 if (eal_parse_service_corelist(optarg) < 0) { 1719 RTE_LOG(ERR, EAL, "invalid service core list\n"); 1720 return -1; 1721 } 1722 break; 1723 /* size of memory */ 1724 case 'm': 1725 conf->memory = atoi(optarg); 1726 conf->memory *= 1024ULL; 1727 conf->memory *= 1024ULL; 1728 mem_parsed = 1; 1729 break; 1730 /* force number of channels */ 1731 case 'n': 1732 conf->force_nchannel = atoi(optarg); 1733 if (conf->force_nchannel == 0) { 1734 RTE_LOG(ERR, EAL, "invalid channel number\n"); 1735 return -1; 1736 } 1737 break; 1738 /* force number of ranks */ 1739 case 'r': 1740 conf->force_nrank = atoi(optarg); 1741 if (conf->force_nrank == 0 || 1742 conf->force_nrank > 16) { 1743 RTE_LOG(ERR, EAL, "invalid rank number\n"); 1744 return -1; 1745 } 1746 break; 1747 /* force loading of external driver */ 1748 case 'd': 1749 if (eal_plugin_add(optarg) == -1) 1750 return -1; 1751 break; 1752 case 'v': 1753 /* since message is explicitly requested by user, we 1754 * write message at highest log level so it can always 1755 * be seen 1756 * even if info or warning messages are disabled */ 1757 RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version()); 1758 break; 1759 1760 /* long options */ 1761 case OPT_HUGE_UNLINK_NUM: 1762 if (eal_parse_huge_unlink(optarg, &conf->hugepage_file) < 0) { 1763 RTE_LOG(ERR, EAL, "invalid --"OPT_HUGE_UNLINK" option\n"); 1764 return -1; 1765 } 1766 break; 1767 1768 case OPT_NO_HUGE_NUM: 1769 conf->no_hugetlbfs = 1; 1770 /* no-huge is legacy mem */ 1771 conf->legacy_mem = 1; 1772 break; 1773 1774 case OPT_NO_PCI_NUM: 1775 conf->no_pci = 1; 1776 break; 1777 1778 case OPT_NO_HPET_NUM: 1779 conf->no_hpet = 1; 1780 break; 1781 1782 case OPT_VMWARE_TSC_MAP_NUM: 1783 conf->vmware_tsc_map = 1; 1784 break; 1785 1786 case OPT_NO_SHCONF_NUM: 1787 conf->no_shconf = 1; 1788 break; 1789 1790 case OPT_IN_MEMORY_NUM: 1791 conf->in_memory = 1; 1792 /* in-memory is a superset of noshconf and huge-unlink */ 1793 conf->no_shconf = 1; 1794 conf->hugepage_file.unlink_before_mapping = true; 1795 break; 1796 1797 case OPT_PROC_TYPE_NUM: 1798 conf->process_type = eal_parse_proc_type(optarg); 1799 break; 1800 1801 case OPT_MAIN_LCORE_NUM: 1802 if (eal_parse_main_lcore(optarg) < 0) { 1803 RTE_LOG(ERR, EAL, "invalid parameter for --" 1804 OPT_MAIN_LCORE "\n"); 1805 return -1; 1806 } 1807 break; 1808 1809 case OPT_VDEV_NUM: 1810 if (eal_option_device_add(RTE_DEVTYPE_VIRTUAL, 1811 optarg) < 0) { 1812 return -1; 1813 } 1814 break; 1815 1816 #ifndef RTE_EXEC_ENV_WINDOWS 1817 case OPT_SYSLOG_NUM: 1818 if (eal_parse_syslog(optarg, conf) < 0) { 1819 RTE_LOG(ERR, EAL, "invalid parameters for --" 1820 OPT_SYSLOG "\n"); 1821 return -1; 1822 } 1823 break; 1824 #endif 1825 1826 case OPT_LOG_LEVEL_NUM: { 1827 if (eal_parse_log_level(optarg) < 0) { 1828 RTE_LOG(ERR, EAL, 1829 "invalid parameters for --" 1830 OPT_LOG_LEVEL "\n"); 1831 return -1; 1832 } 1833 break; 1834 } 1835 1836 #ifndef RTE_EXEC_ENV_WINDOWS 1837 case OPT_TRACE_NUM: { 1838 if (eal_trace_args_save(optarg) < 0) { 1839 RTE_LOG(ERR, EAL, "invalid parameters for --" 1840 OPT_TRACE "\n"); 1841 return -1; 1842 } 1843 break; 1844 } 1845 1846 case OPT_TRACE_DIR_NUM: { 1847 if (eal_trace_dir_args_save(optarg) < 0) { 1848 RTE_LOG(ERR, EAL, "invalid parameters for --" 1849 OPT_TRACE_DIR "\n"); 1850 return -1; 1851 } 1852 break; 1853 } 1854 1855 case OPT_TRACE_BUF_SIZE_NUM: { 1856 if (eal_trace_bufsz_args_save(optarg) < 0) { 1857 RTE_LOG(ERR, EAL, "invalid parameters for --" 1858 OPT_TRACE_BUF_SIZE "\n"); 1859 return -1; 1860 } 1861 break; 1862 } 1863 1864 case OPT_TRACE_MODE_NUM: { 1865 if (eal_trace_mode_args_save(optarg) < 0) { 1866 RTE_LOG(ERR, EAL, "invalid parameters for --" 1867 OPT_TRACE_MODE "\n"); 1868 return -1; 1869 } 1870 break; 1871 } 1872 #endif /* !RTE_EXEC_ENV_WINDOWS */ 1873 1874 case OPT_LCORES_NUM: 1875 if (eal_parse_lcores(optarg) < 0) { 1876 RTE_LOG(ERR, EAL, "invalid parameter for --" 1877 OPT_LCORES "\n"); 1878 return -1; 1879 } 1880 1881 if (core_parsed) { 1882 RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n", 1883 (core_parsed == LCORE_OPT_LST) ? "-l" : 1884 (core_parsed == LCORE_OPT_MSK) ? "-c" : 1885 "--lcore"); 1886 return -1; 1887 } 1888 1889 core_parsed = LCORE_OPT_MAP; 1890 break; 1891 case OPT_LEGACY_MEM_NUM: 1892 conf->legacy_mem = 1; 1893 break; 1894 case OPT_SINGLE_FILE_SEGMENTS_NUM: 1895 conf->single_file_segments = 1; 1896 break; 1897 case OPT_IOVA_MODE_NUM: 1898 if (eal_parse_iova_mode(optarg) < 0) { 1899 RTE_LOG(ERR, EAL, "invalid parameters for --" 1900 OPT_IOVA_MODE "\n"); 1901 return -1; 1902 } 1903 break; 1904 case OPT_BASE_VIRTADDR_NUM: 1905 if (eal_parse_base_virtaddr(optarg) < 0) { 1906 RTE_LOG(ERR, EAL, "invalid parameter for --" 1907 OPT_BASE_VIRTADDR "\n"); 1908 return -1; 1909 } 1910 break; 1911 case OPT_TELEMETRY_NUM: 1912 break; 1913 case OPT_NO_TELEMETRY_NUM: 1914 conf->no_telemetry = 1; 1915 break; 1916 case OPT_FORCE_MAX_SIMD_BITWIDTH_NUM: 1917 if (eal_parse_simd_bitwidth(optarg) < 0) { 1918 RTE_LOG(ERR, EAL, "invalid parameter for --" 1919 OPT_FORCE_MAX_SIMD_BITWIDTH "\n"); 1920 return -1; 1921 } 1922 break; 1923 1924 /* don't know what to do, leave this to caller */ 1925 default: 1926 return 1; 1927 1928 } 1929 1930 return 0; 1931 1932 ba_conflict: 1933 RTE_LOG(ERR, EAL, 1934 "Options allow (-a) and block (-b) can't be used at the same time\n"); 1935 return -1; 1936 } 1937 1938 static void 1939 eal_auto_detect_cores(struct rte_config *cfg) 1940 { 1941 unsigned int lcore_id; 1942 unsigned int removed = 0; 1943 rte_cpuset_t affinity_set; 1944 1945 if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), 1946 &affinity_set)) 1947 CPU_ZERO(&affinity_set); 1948 1949 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1950 if (cfg->lcore_role[lcore_id] == ROLE_RTE && 1951 !CPU_ISSET(lcore_id, &affinity_set)) { 1952 cfg->lcore_role[lcore_id] = ROLE_OFF; 1953 removed++; 1954 } 1955 } 1956 1957 cfg->lcore_count -= removed; 1958 } 1959 1960 static void 1961 compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) 1962 { 1963 rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset; 1964 rte_cpuset_t default_set; 1965 unsigned int lcore_id; 1966 1967 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { 1968 if (rte_lcore_has_role(lcore_id, ROLE_OFF)) 1969 continue; 1970 RTE_CPU_OR(cpuset, cpuset, &lcore_config[lcore_id].cpuset); 1971 } 1972 RTE_CPU_NOT(cpuset, cpuset); 1973 1974 if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), 1975 &default_set)) 1976 CPU_ZERO(&default_set); 1977 1978 RTE_CPU_AND(cpuset, cpuset, &default_set); 1979 1980 /* if no remaining cpu, use main lcore cpu affinity */ 1981 if (!CPU_COUNT(cpuset)) { 1982 memcpy(cpuset, &lcore_config[rte_get_main_lcore()].cpuset, 1983 sizeof(*cpuset)); 1984 } 1985 } 1986 1987 int 1988 eal_cleanup_config(struct internal_config *internal_cfg) 1989 { 1990 free(internal_cfg->hugefile_prefix); 1991 free(internal_cfg->hugepage_dir); 1992 free(internal_cfg->user_mbuf_pool_ops_name); 1993 1994 return 0; 1995 } 1996 1997 int 1998 eal_adjust_config(struct internal_config *internal_cfg) 1999 { 2000 int i; 2001 struct rte_config *cfg = rte_eal_get_configuration(); 2002 struct internal_config *internal_conf = 2003 eal_get_internal_configuration(); 2004 2005 if (!core_parsed) 2006 eal_auto_detect_cores(cfg); 2007 2008 if (internal_conf->process_type == RTE_PROC_AUTO) 2009 internal_conf->process_type = eal_proc_type_detect(); 2010 2011 /* default main lcore is the first one */ 2012 if (!main_lcore_parsed) { 2013 cfg->main_lcore = rte_get_next_lcore(-1, 0, 0); 2014 if (cfg->main_lcore >= RTE_MAX_LCORE) 2015 return -1; 2016 lcore_config[cfg->main_lcore].core_role = ROLE_RTE; 2017 } 2018 2019 compute_ctrl_threads_cpuset(internal_cfg); 2020 2021 /* if no memory amounts were requested, this will result in 0 and 2022 * will be overridden later, right after eal_hugepage_info_init() */ 2023 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) 2024 internal_cfg->memory += internal_cfg->socket_mem[i]; 2025 2026 return 0; 2027 } 2028 2029 int 2030 eal_check_common_options(struct internal_config *internal_cfg) 2031 { 2032 struct rte_config *cfg = rte_eal_get_configuration(); 2033 const struct internal_config *internal_conf = 2034 eal_get_internal_configuration(); 2035 2036 if (cfg->lcore_role[cfg->main_lcore] != ROLE_RTE) { 2037 RTE_LOG(ERR, EAL, "Main lcore is not enabled for DPDK\n"); 2038 return -1; 2039 } 2040 2041 if (internal_cfg->process_type == RTE_PROC_INVALID) { 2042 RTE_LOG(ERR, EAL, "Invalid process type specified\n"); 2043 return -1; 2044 } 2045 if (internal_cfg->hugefile_prefix != NULL && 2046 strlen(internal_cfg->hugefile_prefix) < 1) { 2047 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n"); 2048 return -1; 2049 } 2050 if (internal_cfg->hugepage_dir != NULL && 2051 strlen(internal_cfg->hugepage_dir) < 1) { 2052 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n"); 2053 return -1; 2054 } 2055 if (internal_cfg->user_mbuf_pool_ops_name != NULL && 2056 strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) { 2057 RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n"); 2058 return -1; 2059 } 2060 if (strchr(eal_get_hugefile_prefix(), '%') != NULL) { 2061 RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" " 2062 "option\n"); 2063 return -1; 2064 } 2065 if (mem_parsed && internal_cfg->force_sockets == 1) { 2066 RTE_LOG(ERR, EAL, "Options -m and --"OPT_SOCKET_MEM" cannot " 2067 "be specified at the same time\n"); 2068 return -1; 2069 } 2070 if (internal_cfg->no_hugetlbfs && internal_cfg->force_sockets == 1) { 2071 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_MEM" cannot " 2072 "be specified together with --"OPT_NO_HUGE"\n"); 2073 return -1; 2074 } 2075 if (internal_cfg->no_hugetlbfs && 2076 internal_cfg->hugepage_file.unlink_before_mapping && 2077 !internal_cfg->in_memory) { 2078 RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot " 2079 "be specified together with --"OPT_NO_HUGE"\n"); 2080 return -1; 2081 } 2082 if (internal_conf->force_socket_limits && internal_conf->legacy_mem) { 2083 RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT 2084 " is only supported in non-legacy memory mode\n"); 2085 } 2086 if (internal_cfg->single_file_segments && 2087 internal_cfg->hugepage_file.unlink_before_mapping && 2088 !internal_cfg->in_memory) { 2089 RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is " 2090 "not compatible with --"OPT_HUGE_UNLINK"\n"); 2091 return -1; 2092 } 2093 if (!internal_cfg->hugepage_file.unlink_existing && 2094 internal_cfg->in_memory) { 2095 RTE_LOG(ERR, EAL, "Option --"OPT_IN_MEMORY" is not compatible " 2096 "with --"OPT_HUGE_UNLINK"="HUGE_UNLINK_NEVER"\n"); 2097 return -1; 2098 } 2099 if (internal_cfg->legacy_mem && 2100 internal_cfg->in_memory) { 2101 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " 2102 "with --"OPT_IN_MEMORY"\n"); 2103 return -1; 2104 } 2105 if (internal_cfg->legacy_mem && internal_cfg->match_allocations) { 2106 RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " 2107 "with --"OPT_MATCH_ALLOCATIONS"\n"); 2108 return -1; 2109 } 2110 if (internal_cfg->no_hugetlbfs && internal_cfg->match_allocations) { 2111 RTE_LOG(ERR, EAL, "Option --"OPT_NO_HUGE" is not compatible " 2112 "with --"OPT_MATCH_ALLOCATIONS"\n"); 2113 return -1; 2114 } 2115 if (internal_cfg->legacy_mem && internal_cfg->memory == 0) { 2116 RTE_LOG(NOTICE, EAL, "Static memory layout is selected, " 2117 "amount of reserved memory can be adjusted with " 2118 "-m or --"OPT_SOCKET_MEM"\n"); 2119 } 2120 2121 return 0; 2122 } 2123 2124 uint16_t 2125 rte_vect_get_max_simd_bitwidth(void) 2126 { 2127 const struct internal_config *internal_conf = 2128 eal_get_internal_configuration(); 2129 return internal_conf->max_simd_bitwidth.bitwidth; 2130 } 2131 2132 int 2133 rte_vect_set_max_simd_bitwidth(uint16_t bitwidth) 2134 { 2135 struct internal_config *internal_conf = 2136 eal_get_internal_configuration(); 2137 if (internal_conf->max_simd_bitwidth.forced) { 2138 RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled"); 2139 return -EPERM; 2140 } 2141 2142 if (bitwidth < RTE_VECT_SIMD_DISABLED || !rte_is_power_of_2(bitwidth)) { 2143 RTE_LOG(ERR, EAL, "Invalid bitwidth value!\n"); 2144 return -EINVAL; 2145 } 2146 internal_conf->max_simd_bitwidth.bitwidth = bitwidth; 2147 return 0; 2148 } 2149 2150 void 2151 eal_common_usage(void) 2152 { 2153 printf("[options]\n\n" 2154 "EAL common options:\n" 2155 " -c COREMASK Hexadecimal bitmask of cores to run on\n" 2156 " -l CORELIST List of cores to run on\n" 2157 " The argument format is <c1>[-c2][,c3[-c4],...]\n" 2158 " where c1, c2, etc are core indexes between 0 and %d\n" 2159 " --"OPT_LCORES" COREMAP Map lcore set to physical cpu set\n" 2160 " The argument format is\n" 2161 " '<lcores[@cpus]>[<,lcores[@cpus]>...]'\n" 2162 " lcores and cpus list are grouped by '(' and ')'\n" 2163 " Within the group, '-' is used for range separator,\n" 2164 " ',' is used for single number separator.\n" 2165 " '( )' can be omitted for single element group,\n" 2166 " '@' can be omitted if cpus and lcores have the same value\n" 2167 " -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n" 2168 " --"OPT_MAIN_LCORE" ID Core ID that is used as main\n" 2169 " --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n" 2170 " -n CHANNELS Number of memory channels\n" 2171 " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n" 2172 " -r RANKS Force number of memory ranks (don't detect)\n" 2173 " -b, --block Add a device to the blocked list.\n" 2174 " Prevent EAL from using this device. The argument\n" 2175 " format for PCI devices is <domain:bus:devid.func>.\n" 2176 " -a, --allow Add a device to the allow list.\n" 2177 " Only use the specified devices. The argument format\n" 2178 " for PCI devices is <[domain:]bus:devid.func>.\n" 2179 " This option can be present several times.\n" 2180 " [NOTE: " OPT_DEV_ALLOW " cannot be used with "OPT_DEV_BLOCK" option]\n" 2181 " --"OPT_VDEV" Add a virtual device.\n" 2182 " The argument format is <driver><id>[,key=val,...]\n" 2183 " (ex: --vdev=net_pcap0,iface=eth2).\n" 2184 " --"OPT_IOVA_MODE" Set IOVA mode. 'pa' for IOVA_PA\n" 2185 " 'va' for IOVA_VA\n" 2186 " -d LIB.so|DIR Add a driver or driver directory\n" 2187 " (can be used multiple times)\n" 2188 " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n" 2189 " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n" 2190 #ifndef RTE_EXEC_ENV_WINDOWS 2191 " --"OPT_SYSLOG" Set syslog facility\n" 2192 #endif 2193 " --"OPT_LOG_LEVEL"=<level> Set global log level\n" 2194 " --"OPT_LOG_LEVEL"=<type-match>:<level>\n" 2195 " Set specific log level\n" 2196 " --"OPT_LOG_LEVEL"=help Show log types and levels\n" 2197 #ifndef RTE_EXEC_ENV_WINDOWS 2198 " --"OPT_TRACE"=<regex-match>\n" 2199 " Enable trace based on regular expression trace name.\n" 2200 " By default, the trace is disabled.\n" 2201 " User must specify this option to enable trace.\n" 2202 " --"OPT_TRACE_DIR"=<directory path>\n" 2203 " Specify trace directory for trace output.\n" 2204 " By default, trace output will created at\n" 2205 " $HOME directory and parameter must be\n" 2206 " specified once only.\n" 2207 " --"OPT_TRACE_BUF_SIZE"=<int>\n" 2208 " Specify maximum size of allocated memory\n" 2209 " for trace output for each thread. Valid\n" 2210 " unit can be either 'B|K|M' for 'Bytes',\n" 2211 " 'KBytes' and 'MBytes' respectively.\n" 2212 " Default is 1MB and parameter must be\n" 2213 " specified once only.\n" 2214 " --"OPT_TRACE_MODE"=<o[verwrite] | d[iscard]>\n" 2215 " Specify the mode of update of trace\n" 2216 " output file. Either update on a file can\n" 2217 " be wrapped or discarded when file size\n" 2218 " reaches its maximum limit.\n" 2219 " Default mode is 'overwrite' and parameter\n" 2220 " must be specified once only.\n" 2221 #endif /* !RTE_EXEC_ENV_WINDOWS */ 2222 " -v Display version information on startup\n" 2223 " -h, --help This help\n" 2224 " --"OPT_IN_MEMORY" Operate entirely in memory. This will\n" 2225 " disable secondary process support\n" 2226 " --"OPT_BASE_VIRTADDR" Base virtual address\n" 2227 " --"OPT_TELEMETRY" Enable telemetry support (on by default)\n" 2228 " --"OPT_NO_TELEMETRY" Disable telemetry support\n" 2229 " --"OPT_FORCE_MAX_SIMD_BITWIDTH" Force the max SIMD bitwidth\n" 2230 "\nEAL options for DEBUG use only:\n" 2231 " --"OPT_HUGE_UNLINK"[=existing|always|never]\n" 2232 " When to unlink files in hugetlbfs\n" 2233 " ('existing' by default, no value means 'always')\n" 2234 " --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n" 2235 " --"OPT_NO_PCI" Disable PCI\n" 2236 " --"OPT_NO_HPET" Disable HPET\n" 2237 " --"OPT_NO_SHCONF" No shared config (mmap'd files)\n" 2238 "\n", RTE_MAX_LCORE); 2239 } 2240