1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2019 Mellanox Technologies, Ltd 3 */ 4 5 #include <unistd.h> 6 #include <string.h> 7 #include <stdio.h> 8 9 #include <rte_errno.h> 10 #include <rte_mempool.h> 11 #include <rte_class.h> 12 #include <rte_malloc.h> 13 #include <rte_eal_paging.h> 14 15 #include "mlx5_common.h" 16 #include "mlx5_common_os.h" 17 #include "mlx5_common_mp.h" 18 #include "mlx5_common_log.h" 19 #include "mlx5_common_defs.h" 20 #include "mlx5_common_private.h" 21 22 uint8_t haswell_broadwell_cpu; 23 24 /* Driver type key for new device global syntax. */ 25 #define MLX5_DRIVER_KEY "driver" 26 27 /* Device parameter to get file descriptor for import device. */ 28 #define MLX5_DEVICE_FD "cmd_fd" 29 30 /* Device parameter to get PD number for import Protection Domain. */ 31 #define MLX5_PD_HANDLE "pd_handle" 32 33 /* Enable extending memsegs when creating a MR. */ 34 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 35 36 /* Device parameter to configure implicit registration of mempool memory. */ 37 #define MLX5_MR_MEMPOOL_REG_EN "mr_mempool_reg_en" 38 39 /* The default memory allocator used in PMD. */ 40 #define MLX5_SYS_MEM_EN "sys_mem_en" 41 42 /* 43 * Device parameter to force doorbell register mapping 44 * to non-cached region eliminating the extra write memory barrier. 45 * Deprecated, ignored (Name changed to sq_db_nc). 46 */ 47 #define MLX5_TX_DB_NC "tx_db_nc" 48 49 /* 50 * Device parameter to force doorbell register mapping 51 * to non-cached region eliminating the extra write memory barrier. 52 */ 53 #define MLX5_SQ_DB_NC "sq_db_nc" 54 55 /* In case this is an x86_64 intel processor to check if 56 * we should use relaxed ordering. 57 */ 58 #ifdef RTE_ARCH_X86_64 59 /** 60 * This function returns processor identification and feature information 61 * into the registers. 62 * 63 * @param eax, ebx, ecx, edx 64 * Pointers to the registers that will hold cpu information. 65 * @param level 66 * The main category of information returned. 67 */ 68 static inline void mlx5_cpu_id(unsigned int level, 69 unsigned int *eax, unsigned int *ebx, 70 unsigned int *ecx, unsigned int *edx) 71 { 72 __asm__("cpuid\n\t" 73 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) 74 : "0" (level)); 75 } 76 #endif 77 78 RTE_LOG_REGISTER_DEFAULT(mlx5_common_logtype, NOTICE) 79 80 /* Head of list of drivers. */ 81 static TAILQ_HEAD(mlx5_drivers, mlx5_class_driver) drivers_list = 82 TAILQ_HEAD_INITIALIZER(drivers_list); 83 84 /* Head of devices. */ 85 static TAILQ_HEAD(mlx5_devices, mlx5_common_device) devices_list = 86 TAILQ_HEAD_INITIALIZER(devices_list); 87 static pthread_mutex_t devices_list_lock; 88 89 static const struct { 90 const char *name; 91 unsigned int drv_class; 92 } mlx5_classes[] = { 93 { .name = "vdpa", .drv_class = MLX5_CLASS_VDPA }, 94 { .name = "eth", .drv_class = MLX5_CLASS_ETH }, 95 /* Keep class "net" for backward compatibility. */ 96 { .name = "net", .drv_class = MLX5_CLASS_ETH }, 97 { .name = "regex", .drv_class = MLX5_CLASS_REGEX }, 98 { .name = "compress", .drv_class = MLX5_CLASS_COMPRESS }, 99 { .name = "crypto", .drv_class = MLX5_CLASS_CRYPTO }, 100 }; 101 102 static int 103 class_name_to_value(const char *class_name) 104 { 105 unsigned int i; 106 107 for (i = 0; i < RTE_DIM(mlx5_classes); i++) { 108 if (strcmp(class_name, mlx5_classes[i].name) == 0) 109 return mlx5_classes[i].drv_class; 110 } 111 return -EINVAL; 112 } 113 114 static struct mlx5_class_driver * 115 driver_get(uint32_t class) 116 { 117 struct mlx5_class_driver *driver; 118 119 TAILQ_FOREACH(driver, &drivers_list, next) { 120 if ((uint32_t)driver->drv_class == class) 121 return driver; 122 } 123 return NULL; 124 } 125 126 int 127 mlx5_kvargs_process(struct mlx5_kvargs_ctrl *mkvlist, const char *const keys[], 128 arg_handler_t handler, void *opaque_arg) 129 { 130 const struct rte_kvargs_pair *pair; 131 uint32_t i, j; 132 133 MLX5_ASSERT(mkvlist && mkvlist->kvlist); 134 /* Process parameters. */ 135 for (i = 0; i < mkvlist->kvlist->count; i++) { 136 pair = &mkvlist->kvlist->pairs[i]; 137 for (j = 0; keys[j] != NULL; ++j) { 138 if (strcmp(pair->key, keys[j]) != 0) 139 continue; 140 if ((*handler)(pair->key, pair->value, opaque_arg) < 0) 141 return -1; 142 mkvlist->is_used[i] = true; 143 break; 144 } 145 } 146 return 0; 147 } 148 149 /** 150 * Prepare a mlx5 kvargs control. 151 * 152 * @param[out] mkvlist 153 * Pointer to mlx5 kvargs control. 154 * @param[in] devargs 155 * The input string containing the key/value associations. 156 * 157 * @return 158 * 0 on success, a negative errno value otherwise and rte_errno is set. 159 */ 160 static int 161 mlx5_kvargs_prepare(struct mlx5_kvargs_ctrl *mkvlist, 162 const struct rte_devargs *devargs) 163 { 164 struct rte_kvargs *kvlist; 165 uint32_t i; 166 167 if (devargs == NULL) 168 return 0; 169 kvlist = rte_kvargs_parse(devargs->args, NULL); 170 if (kvlist == NULL) { 171 rte_errno = EINVAL; 172 return -rte_errno; 173 } 174 /* 175 * rte_kvargs_parse enable key without value, in mlx5 PMDs we disable 176 * this syntax. 177 */ 178 for (i = 0; i < kvlist->count; i++) { 179 const struct rte_kvargs_pair *pair = &kvlist->pairs[i]; 180 if (pair->value == NULL || *(pair->value) == '\0') { 181 DRV_LOG(ERR, "Key %s is missing value.", pair->key); 182 rte_kvargs_free(kvlist); 183 rte_errno = EINVAL; 184 return -rte_errno; 185 } 186 } 187 /* Makes sure all devargs used array is false. */ 188 memset(mkvlist, 0, sizeof(*mkvlist)); 189 mkvlist->kvlist = kvlist; 190 DRV_LOG(DEBUG, "Parse successfully %u devargs.", 191 mkvlist->kvlist->count); 192 return 0; 193 } 194 195 /** 196 * Release a mlx5 kvargs control. 197 * 198 * @param[out] mkvlist 199 * Pointer to mlx5 kvargs control. 200 */ 201 static void 202 mlx5_kvargs_release(struct mlx5_kvargs_ctrl *mkvlist) 203 { 204 if (mkvlist == NULL) 205 return; 206 rte_kvargs_free(mkvlist->kvlist); 207 memset(mkvlist, 0, sizeof(*mkvlist)); 208 } 209 210 /** 211 * Validate device arguments list. 212 * It report about the first unknown parameter. 213 * 214 * @param[in] mkvlist 215 * Pointer to mlx5 kvargs control. 216 * 217 * @return 218 * 0 on success, a negative errno value otherwise and rte_errno is set. 219 */ 220 static int 221 mlx5_kvargs_validate(struct mlx5_kvargs_ctrl *mkvlist) 222 { 223 uint32_t i; 224 225 /* Secondary process should not handle devargs. */ 226 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 227 return 0; 228 if (mkvlist == NULL) 229 return 0; 230 for (i = 0; i < mkvlist->kvlist->count; i++) { 231 if (mkvlist->is_used[i] == 0) { 232 DRV_LOG(ERR, "Key \"%s\" " 233 "is unknown for the provided classes.", 234 mkvlist->kvlist->pairs[i].key); 235 rte_errno = EINVAL; 236 return -rte_errno; 237 } 238 } 239 return 0; 240 } 241 242 /** 243 * Verify and store value for devargs. 244 * 245 * @param[in] key 246 * Key argument to verify. 247 * @param[in] val 248 * Value associated with key. 249 * @param opaque 250 * User data. 251 * 252 * @return 253 * 0 on success, a negative errno value otherwise and rte_errno is set. 254 */ 255 static int 256 mlx5_common_args_check_handler(const char *key, const char *val, void *opaque) 257 { 258 struct mlx5_common_dev_config *config = opaque; 259 signed long tmp; 260 261 if (strcmp(MLX5_DRIVER_KEY, key) == 0 || 262 strcmp(RTE_DEVARGS_KEY_CLASS, key) == 0) 263 return 0; 264 errno = 0; 265 tmp = strtol(val, NULL, 0); 266 if (errno) { 267 rte_errno = errno; 268 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val); 269 return -rte_errno; 270 } 271 if (strcmp(key, MLX5_TX_DB_NC) == 0) 272 DRV_LOG(WARNING, 273 "%s: deprecated parameter, converted to queue_db_nc", 274 key); 275 if (strcmp(key, MLX5_SQ_DB_NC) == 0 || 276 strcmp(key, MLX5_TX_DB_NC) == 0) { 277 if (tmp != MLX5_SQ_DB_CACHED && 278 tmp != MLX5_SQ_DB_NCACHED && 279 tmp != MLX5_SQ_DB_HEURISTIC) { 280 DRV_LOG(ERR, 281 "Invalid Send Queue doorbell mapping parameter."); 282 rte_errno = EINVAL; 283 return -rte_errno; 284 } 285 config->dbnc = tmp; 286 } else if (strcmp(key, MLX5_MR_EXT_MEMSEG_EN) == 0) { 287 config->mr_ext_memseg_en = !!tmp; 288 } else if (strcmp(key, MLX5_MR_MEMPOOL_REG_EN) == 0) { 289 config->mr_mempool_reg_en = !!tmp; 290 } else if (strcmp(key, MLX5_SYS_MEM_EN) == 0) { 291 config->sys_mem_en = !!tmp; 292 } else if (strcmp(key, MLX5_DEVICE_FD) == 0) { 293 config->device_fd = tmp; 294 } else if (strcmp(key, MLX5_PD_HANDLE) == 0) { 295 config->pd_handle = tmp; 296 } 297 return 0; 298 } 299 300 /** 301 * Parse common device parameters. 302 * 303 * @param devargs 304 * Device arguments structure. 305 * @param config 306 * Pointer to device configuration structure. 307 * 308 * @return 309 * 0 on success, a negative errno value otherwise and rte_errno is set. 310 */ 311 static int 312 mlx5_common_config_get(struct mlx5_kvargs_ctrl *mkvlist, 313 struct mlx5_common_dev_config *config) 314 { 315 const char **params = (const char *[]){ 316 RTE_DEVARGS_KEY_CLASS, 317 MLX5_DRIVER_KEY, 318 MLX5_TX_DB_NC, 319 MLX5_SQ_DB_NC, 320 MLX5_MR_EXT_MEMSEG_EN, 321 MLX5_SYS_MEM_EN, 322 MLX5_MR_MEMPOOL_REG_EN, 323 MLX5_DEVICE_FD, 324 MLX5_PD_HANDLE, 325 NULL, 326 }; 327 int ret = 0; 328 329 /* Set defaults. */ 330 config->mr_ext_memseg_en = 1; 331 config->mr_mempool_reg_en = 1; 332 config->sys_mem_en = 0; 333 config->dbnc = MLX5_ARG_UNSET; 334 config->device_fd = MLX5_ARG_UNSET; 335 config->pd_handle = MLX5_ARG_UNSET; 336 if (mkvlist == NULL) 337 return 0; 338 /* Process common parameters. */ 339 ret = mlx5_kvargs_process(mkvlist, params, 340 mlx5_common_args_check_handler, config); 341 if (ret) { 342 rte_errno = EINVAL; 343 return -rte_errno; 344 } 345 /* Validate user arguments for remote PD and CTX if it is given. */ 346 ret = mlx5_os_remote_pd_and_ctx_validate(config); 347 if (ret) 348 return ret; 349 DRV_LOG(DEBUG, "mr_ext_memseg_en is %u.", config->mr_ext_memseg_en); 350 DRV_LOG(DEBUG, "mr_mempool_reg_en is %u.", config->mr_mempool_reg_en); 351 DRV_LOG(DEBUG, "sys_mem_en is %u.", config->sys_mem_en); 352 DRV_LOG(DEBUG, "Send Queue doorbell mapping parameter is %d.", 353 config->dbnc); 354 return ret; 355 } 356 357 static int 358 devargs_class_handler(__rte_unused const char *key, 359 const char *class_names, void *opaque) 360 { 361 int *ret = opaque; 362 int class_val; 363 char *scratch; 364 char *found; 365 char *refstr = NULL; 366 367 *ret = 0; 368 scratch = strdup(class_names); 369 if (scratch == NULL) { 370 *ret = -ENOMEM; 371 return *ret; 372 } 373 found = strtok_r(scratch, ":", &refstr); 374 if (found == NULL) 375 /* Empty string. */ 376 goto err; 377 do { 378 /* Extract each individual class name. Multiple 379 * classes can be supplied as class=net:regex:foo:bar. 380 */ 381 class_val = class_name_to_value(found); 382 /* Check if its a valid class. */ 383 if (class_val < 0) { 384 *ret = -EINVAL; 385 goto err; 386 } 387 *ret |= class_val; 388 found = strtok_r(NULL, ":", &refstr); 389 } while (found != NULL); 390 err: 391 free(scratch); 392 if (*ret < 0) 393 DRV_LOG(ERR, "Invalid mlx5 class options: %s.\n", class_names); 394 return *ret; 395 } 396 397 static int 398 parse_class_options(const struct rte_devargs *devargs, 399 struct mlx5_kvargs_ctrl *mkvlist) 400 { 401 int ret = 0; 402 403 if (devargs == NULL) 404 return 0; 405 if (devargs->cls != NULL && devargs->cls->name != NULL) 406 /* Global syntax, only one class type. */ 407 return class_name_to_value(devargs->cls->name); 408 /* Legacy devargs support multiple classes. */ 409 rte_kvargs_process(mkvlist->kvlist, RTE_DEVARGS_KEY_CLASS, 410 devargs_class_handler, &ret); 411 return ret; 412 } 413 414 static const unsigned int mlx5_class_invalid_combinations[] = { 415 MLX5_CLASS_ETH | MLX5_CLASS_VDPA, 416 /* New class combination should be added here. */ 417 }; 418 419 static int 420 is_valid_class_combination(uint32_t user_classes) 421 { 422 unsigned int i; 423 424 /* Verify if user specified unsupported combination. */ 425 for (i = 0; i < RTE_DIM(mlx5_class_invalid_combinations); i++) { 426 if ((mlx5_class_invalid_combinations[i] & user_classes) == 427 mlx5_class_invalid_combinations[i]) 428 return -EINVAL; 429 } 430 /* Not found any invalid class combination. */ 431 return 0; 432 } 433 434 static bool 435 mlx5_bus_match(const struct mlx5_class_driver *drv, 436 const struct rte_device *dev) 437 { 438 if (mlx5_dev_is_pci(dev)) 439 return mlx5_dev_pci_match(drv, dev); 440 return true; 441 } 442 443 static struct mlx5_common_device * 444 to_mlx5_device(const struct rte_device *rte_dev) 445 { 446 struct mlx5_common_device *cdev; 447 448 TAILQ_FOREACH(cdev, &devices_list, next) { 449 if (rte_dev == cdev->dev) 450 return cdev; 451 } 452 return NULL; 453 } 454 455 int 456 mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size) 457 { 458 struct rte_pci_addr pci_addr = { 0 }; 459 int ret; 460 461 if (mlx5_dev_is_pci(dev)) { 462 /* Input might be <BDF>, format PCI address to <DBDF>. */ 463 ret = rte_pci_addr_parse(dev->name, &pci_addr); 464 if (ret != 0) 465 return -ENODEV; 466 rte_pci_device_name(&pci_addr, addr, size); 467 return 0; 468 } 469 #ifdef RTE_EXEC_ENV_LINUX 470 return mlx5_auxiliary_get_pci_str(RTE_DEV_TO_AUXILIARY_CONST(dev), 471 addr, size); 472 #else 473 rte_errno = ENODEV; 474 return -rte_errno; 475 #endif 476 } 477 478 /** 479 * Register the mempool for the protection domain. 480 * 481 * @param cdev 482 * Pointer to the mlx5 common device. 483 * @param mp 484 * Mempool being registered. 485 * 486 * @return 487 * 0 on success, (-1) on failure and rte_errno is set. 488 */ 489 static int 490 mlx5_dev_mempool_register(struct mlx5_common_device *cdev, 491 struct rte_mempool *mp, bool is_extmem) 492 { 493 return mlx5_mr_mempool_register(cdev, mp, is_extmem); 494 } 495 496 /** 497 * Unregister the mempool from the protection domain. 498 * 499 * @param cdev 500 * Pointer to the mlx5 common device. 501 * @param mp 502 * Mempool being unregistered. 503 */ 504 void 505 mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev, 506 struct rte_mempool *mp) 507 { 508 if (mlx5_mr_mempool_unregister(cdev, mp) < 0) 509 DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s", 510 mp->name, cdev->pd, rte_strerror(rte_errno)); 511 } 512 513 /** 514 * rte_mempool_walk() callback to register mempools for the protection domain. 515 * 516 * @param mp 517 * The mempool being walked. 518 * @param arg 519 * Pointer to the device shared context. 520 */ 521 static void 522 mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg) 523 { 524 struct mlx5_common_device *cdev = arg; 525 int ret; 526 527 ret = mlx5_dev_mempool_register(cdev, mp, false); 528 if (ret < 0 && rte_errno != EEXIST) 529 DRV_LOG(ERR, 530 "Failed to register existing mempool %s for PD %p: %s", 531 mp->name, cdev->pd, rte_strerror(rte_errno)); 532 } 533 534 /** 535 * rte_mempool_walk() callback to unregister mempools 536 * from the protection domain. 537 * 538 * @param mp 539 * The mempool being walked. 540 * @param arg 541 * Pointer to the device shared context. 542 */ 543 static void 544 mlx5_dev_mempool_unregister_cb(struct rte_mempool *mp, void *arg) 545 { 546 mlx5_dev_mempool_unregister((struct mlx5_common_device *)arg, mp); 547 } 548 549 /** 550 * Mempool life cycle callback for mlx5 common devices. 551 * 552 * @param event 553 * Mempool life cycle event. 554 * @param mp 555 * Associated mempool. 556 * @param arg 557 * Pointer to a device shared context. 558 */ 559 static void 560 mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp, 561 void *arg) 562 { 563 struct mlx5_common_device *cdev = arg; 564 565 switch (event) { 566 case RTE_MEMPOOL_EVENT_READY: 567 if (mlx5_dev_mempool_register(cdev, mp, false) < 0) 568 DRV_LOG(ERR, 569 "Failed to register new mempool %s for PD %p: %s", 570 mp->name, cdev->pd, rte_strerror(rte_errno)); 571 break; 572 case RTE_MEMPOOL_EVENT_DESTROY: 573 mlx5_dev_mempool_unregister(cdev, mp); 574 break; 575 } 576 } 577 578 int 579 mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev) 580 { 581 int ret = 0; 582 583 if (!cdev->config.mr_mempool_reg_en) 584 return 0; 585 rte_rwlock_write_lock(&cdev->mr_scache.mprwlock); 586 if (cdev->mr_scache.mp_cb_registered) 587 goto exit; 588 /* Callback for this device may be already registered. */ 589 ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb, 590 cdev); 591 if (ret != 0 && rte_errno != EEXIST) 592 goto exit; 593 /* Register mempools only once for this device. */ 594 if (ret == 0) 595 rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev); 596 ret = 0; 597 cdev->mr_scache.mp_cb_registered = 1; 598 exit: 599 rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock); 600 return ret; 601 } 602 603 static void 604 mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev) 605 { 606 int ret; 607 608 if (!cdev->mr_scache.mp_cb_registered || 609 !cdev->config.mr_mempool_reg_en) 610 return; 611 /* Stop watching for mempool events and unregister all mempools. */ 612 ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb, 613 cdev); 614 if (ret == 0) 615 rte_mempool_walk(mlx5_dev_mempool_unregister_cb, cdev); 616 } 617 618 /** 619 * Callback for memory event. 620 * 621 * @param event_type 622 * Memory event type. 623 * @param addr 624 * Address of memory. 625 * @param len 626 * Size of memory. 627 */ 628 static void 629 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, 630 size_t len, void *arg __rte_unused) 631 { 632 struct mlx5_common_device *cdev; 633 634 /* Must be called from the primary process. */ 635 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 636 switch (event_type) { 637 case RTE_MEM_EVENT_FREE: 638 pthread_mutex_lock(&devices_list_lock); 639 /* Iterate all the existing mlx5 devices. */ 640 TAILQ_FOREACH(cdev, &devices_list, next) 641 mlx5_free_mr_by_addr(&cdev->mr_scache, 642 mlx5_os_get_ctx_device_name 643 (cdev->ctx), 644 addr, len); 645 pthread_mutex_unlock(&devices_list_lock); 646 break; 647 case RTE_MEM_EVENT_ALLOC: 648 default: 649 break; 650 } 651 } 652 653 /** 654 * Uninitialize all HW global of device context. 655 * 656 * @param cdev 657 * Pointer to mlx5 device structure. 658 * 659 * @return 660 * 0 on success, a negative errno value otherwise and rte_errno is set. 661 */ 662 static void 663 mlx5_dev_hw_global_release(struct mlx5_common_device *cdev) 664 { 665 if (cdev->pd != NULL) { 666 claim_zero(mlx5_os_pd_release(cdev)); 667 cdev->pd = NULL; 668 } 669 if (cdev->ctx != NULL) { 670 claim_zero(mlx5_glue->close_device(cdev->ctx)); 671 cdev->ctx = NULL; 672 } 673 } 674 675 /** 676 * Initialize all HW global of device context. 677 * 678 * @param cdev 679 * Pointer to mlx5 device structure. 680 * @param classes 681 * Chosen classes come from user device arguments. 682 * 683 * @return 684 * 0 on success, a negative errno value otherwise and rte_errno is set. 685 */ 686 static int 687 mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes) 688 { 689 int ret; 690 691 /* Create context device */ 692 ret = mlx5_os_open_device(cdev, classes); 693 if (ret < 0) 694 return ret; 695 /* 696 * When CTX is created by Verbs, query HCA attribute is unsupported. 697 * When CTX is imported, we cannot know if it is created by DevX or 698 * Verbs. So, we use query HCA attribute function to check it. 699 */ 700 if (cdev->config.devx || cdev->config.device_fd != MLX5_ARG_UNSET) { 701 /* Query HCA attributes. */ 702 ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, 703 &cdev->config.hca_attr); 704 if (ret) { 705 DRV_LOG(ERR, "Unable to read HCA caps in DevX mode."); 706 rte_errno = ENOTSUP; 707 goto error; 708 } 709 cdev->config.devx = 1; 710 } 711 DRV_LOG(DEBUG, "DevX is %ssupported.", cdev->config.devx ? "" : "NOT "); 712 /* Prepare Protection Domain object and extract its pdn. */ 713 ret = mlx5_os_pd_prepare(cdev); 714 if (ret) 715 goto error; 716 return 0; 717 error: 718 mlx5_dev_hw_global_release(cdev); 719 return ret; 720 } 721 722 static void 723 mlx5_common_dev_release(struct mlx5_common_device *cdev) 724 { 725 pthread_mutex_lock(&devices_list_lock); 726 TAILQ_REMOVE(&devices_list, cdev, next); 727 pthread_mutex_unlock(&devices_list_lock); 728 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 729 if (TAILQ_EMPTY(&devices_list)) 730 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", 731 NULL); 732 mlx5_dev_mempool_unsubscribe(cdev); 733 mlx5_mr_release_cache(&cdev->mr_scache); 734 mlx5_dev_hw_global_release(cdev); 735 } 736 rte_free(cdev); 737 } 738 739 static struct mlx5_common_device * 740 mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes, 741 struct mlx5_kvargs_ctrl *mkvlist) 742 { 743 struct mlx5_common_device *cdev; 744 int ret; 745 746 cdev = rte_zmalloc("mlx5_common_device", sizeof(*cdev), 0); 747 if (!cdev) { 748 DRV_LOG(ERR, "Device allocation failure."); 749 rte_errno = ENOMEM; 750 return NULL; 751 } 752 cdev->dev = eal_dev; 753 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 754 goto exit; 755 /* Parse device parameters. */ 756 ret = mlx5_common_config_get(mkvlist, &cdev->config); 757 if (ret < 0) { 758 DRV_LOG(ERR, "Failed to process device arguments: %s", 759 strerror(rte_errno)); 760 rte_free(cdev); 761 return NULL; 762 } 763 mlx5_malloc_mem_select(cdev->config.sys_mem_en); 764 /* Initialize all HW global of device context. */ 765 ret = mlx5_dev_hw_global_prepare(cdev, classes); 766 if (ret) { 767 DRV_LOG(ERR, "Failed to initialize device context."); 768 rte_free(cdev); 769 return NULL; 770 } 771 /* Initialize global MR cache resources and update its functions. */ 772 ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node); 773 if (ret) { 774 DRV_LOG(ERR, "Failed to initialize global MR share cache."); 775 mlx5_dev_hw_global_release(cdev); 776 rte_free(cdev); 777 return NULL; 778 } 779 /* Register callback function for global shared MR cache management. */ 780 if (TAILQ_EMPTY(&devices_list)) 781 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", 782 mlx5_mr_mem_event_cb, NULL); 783 exit: 784 pthread_mutex_lock(&devices_list_lock); 785 TAILQ_INSERT_HEAD(&devices_list, cdev, next); 786 pthread_mutex_unlock(&devices_list_lock); 787 return cdev; 788 } 789 790 /** 791 * Validate common devargs when probing again. 792 * 793 * When common device probing again, it cannot change its configurations. 794 * If user ask non compatible configurations in devargs, it is error. 795 * This function checks the match between: 796 * - Common device configurations requested by probe again devargs. 797 * - Existing common device configurations. 798 * 799 * @param cdev 800 * Pointer to mlx5 device structure. 801 * @param mkvlist 802 * Pointer to mlx5 kvargs control, can be NULL if there is no devargs. 803 * 804 * @return 805 * 0 on success, a negative errno value otherwise and rte_errno is set. 806 */ 807 static int 808 mlx5_common_probe_again_args_validate(struct mlx5_common_device *cdev, 809 struct mlx5_kvargs_ctrl *mkvlist) 810 { 811 struct mlx5_common_dev_config *config; 812 int ret; 813 814 /* Secondary process should not handle devargs. */ 815 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 816 return 0; 817 /* Probe again doesn't have to generate devargs. */ 818 if (mkvlist == NULL) 819 return 0; 820 config = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 821 sizeof(struct mlx5_common_dev_config), 822 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 823 if (config == NULL) { 824 rte_errno = -ENOMEM; 825 return -rte_errno; 826 } 827 /* 828 * Creates a temporary common configure structure according to new 829 * devargs attached in probing again. 830 */ 831 ret = mlx5_common_config_get(mkvlist, config); 832 if (ret) { 833 DRV_LOG(ERR, "Failed to process device configure: %s", 834 strerror(rte_errno)); 835 mlx5_free(config); 836 return ret; 837 } 838 /* 839 * Checks the match between the temporary structure and the existing 840 * common device structure. 841 */ 842 if (cdev->config.mr_ext_memseg_en != config->mr_ext_memseg_en) { 843 DRV_LOG(ERR, "\"" MLX5_MR_EXT_MEMSEG_EN "\" " 844 "configuration mismatch for device %s.", 845 cdev->dev->name); 846 goto error; 847 } 848 if (cdev->config.mr_mempool_reg_en != config->mr_mempool_reg_en) { 849 DRV_LOG(ERR, "\"" MLX5_MR_MEMPOOL_REG_EN "\" " 850 "configuration mismatch for device %s.", 851 cdev->dev->name); 852 goto error; 853 } 854 if (cdev->config.device_fd != config->device_fd) { 855 DRV_LOG(ERR, "\"" MLX5_DEVICE_FD "\" " 856 "configuration mismatch for device %s.", 857 cdev->dev->name); 858 goto error; 859 } 860 if (cdev->config.pd_handle != config->pd_handle) { 861 DRV_LOG(ERR, "\"" MLX5_PD_HANDLE "\" " 862 "configuration mismatch for device %s.", 863 cdev->dev->name); 864 goto error; 865 } 866 if (cdev->config.sys_mem_en != config->sys_mem_en) { 867 DRV_LOG(ERR, "\"" MLX5_SYS_MEM_EN "\" " 868 "configuration mismatch for device %s.", 869 cdev->dev->name); 870 goto error; 871 } 872 if (cdev->config.dbnc != config->dbnc) { 873 DRV_LOG(ERR, "\"" MLX5_SQ_DB_NC "\" " 874 "configuration mismatch for device %s.", 875 cdev->dev->name); 876 goto error; 877 } 878 mlx5_free(config); 879 return 0; 880 error: 881 mlx5_free(config); 882 rte_errno = EINVAL; 883 return -rte_errno; 884 } 885 886 static int 887 drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes) 888 { 889 struct mlx5_class_driver *driver; 890 int local_ret = -ENODEV; 891 unsigned int i = 0; 892 int ret = 0; 893 894 while (enabled_classes) { 895 driver = driver_get(RTE_BIT64(i)); 896 if (driver != NULL) { 897 local_ret = driver->remove(cdev); 898 if (local_ret == 0) 899 cdev->classes_loaded &= ~RTE_BIT64(i); 900 else if (ret == 0) 901 ret = local_ret; 902 } 903 enabled_classes &= ~RTE_BIT64(i); 904 i++; 905 } 906 if (local_ret != 0 && ret == 0) 907 ret = local_ret; 908 return ret; 909 } 910 911 static int 912 drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes, 913 struct mlx5_kvargs_ctrl *mkvlist) 914 { 915 struct mlx5_class_driver *driver; 916 uint32_t enabled_classes = 0; 917 bool already_loaded; 918 int ret = -EINVAL; 919 920 TAILQ_FOREACH(driver, &drivers_list, next) { 921 if ((driver->drv_class & user_classes) == 0) 922 continue; 923 if (!mlx5_bus_match(driver, cdev->dev)) 924 continue; 925 already_loaded = cdev->classes_loaded & driver->drv_class; 926 if (already_loaded && driver->probe_again == 0) { 927 DRV_LOG(ERR, "Device %s is already probed", 928 cdev->dev->name); 929 ret = -EEXIST; 930 goto probe_err; 931 } 932 ret = driver->probe(cdev, mkvlist); 933 if (ret < 0) { 934 DRV_LOG(ERR, "Failed to load driver %s", 935 driver->name); 936 goto probe_err; 937 } 938 enabled_classes |= driver->drv_class; 939 } 940 if (!ret) { 941 cdev->classes_loaded |= enabled_classes; 942 return 0; 943 } 944 probe_err: 945 /* 946 * Need to remove only drivers which were not probed before this probe 947 * instance, but have already been probed before this failure. 948 */ 949 enabled_classes &= ~cdev->classes_loaded; 950 drivers_remove(cdev, enabled_classes); 951 return ret; 952 } 953 954 int 955 mlx5_common_dev_probe(struct rte_device *eal_dev) 956 { 957 struct mlx5_common_device *cdev; 958 struct mlx5_kvargs_ctrl mkvlist; 959 struct mlx5_kvargs_ctrl *mkvlist_p = NULL; 960 uint32_t classes = 0; 961 bool new_device = false; 962 int ret; 963 964 DRV_LOG(INFO, "probe device \"%s\".", eal_dev->name); 965 if (eal_dev->devargs != NULL) 966 mkvlist_p = &mkvlist; 967 ret = mlx5_kvargs_prepare(mkvlist_p, eal_dev->devargs); 968 if (ret < 0) { 969 DRV_LOG(ERR, "Unsupported device arguments: %s", 970 eal_dev->devargs->args); 971 return ret; 972 } 973 ret = parse_class_options(eal_dev->devargs, mkvlist_p); 974 if (ret < 0) { 975 DRV_LOG(ERR, "Unsupported mlx5 class type: %s", 976 eal_dev->devargs->args); 977 goto class_err; 978 } 979 classes = ret; 980 if (classes == 0) 981 /* Default to net class. */ 982 classes = MLX5_CLASS_ETH; 983 /* 984 * MLX5 common driver supports probing again in two scenarios: 985 * - Add new driver under existing common device (regardless of the 986 * driver's own support in probing again). 987 * - Transfer the probing again support of the drivers themselves. 988 * 989 * In both scenarios it uses in the existing device. here it looks for 990 * device that match to rte device, if it exists, the request classes 991 * were probed with this device. 992 */ 993 cdev = to_mlx5_device(eal_dev); 994 if (!cdev) { 995 /* It isn't probing again, creates a new device. */ 996 cdev = mlx5_common_dev_create(eal_dev, classes, mkvlist_p); 997 if (!cdev) { 998 ret = -ENOMEM; 999 goto class_err; 1000 } 1001 new_device = true; 1002 } else { 1003 /* It is probing again, validate common devargs match. */ 1004 ret = mlx5_common_probe_again_args_validate(cdev, mkvlist_p); 1005 if (ret) { 1006 DRV_LOG(ERR, 1007 "Probe again parameters aren't compatible : %s", 1008 strerror(rte_errno)); 1009 goto class_err; 1010 } 1011 } 1012 /* 1013 * Validate combination here. 1014 * For new device, the classes_loaded field is 0 and it check only 1015 * the classes given as user device arguments. 1016 */ 1017 ret = is_valid_class_combination(classes | cdev->classes_loaded); 1018 if (ret != 0) { 1019 DRV_LOG(ERR, "Unsupported mlx5 classes combination."); 1020 goto class_err; 1021 } 1022 ret = drivers_probe(cdev, classes, mkvlist_p); 1023 if (ret) 1024 goto class_err; 1025 /* 1026 * Validate that all devargs have been used, unused key -> unknown Key. 1027 * When probe again validate is failed, the added drivers aren't removed 1028 * here but when device is released. 1029 */ 1030 ret = mlx5_kvargs_validate(mkvlist_p); 1031 if (ret) 1032 goto class_err; 1033 mlx5_kvargs_release(mkvlist_p); 1034 return 0; 1035 class_err: 1036 if (new_device) { 1037 /* 1038 * For new device, classes_loaded is always 0 before 1039 * drivers_probe function. 1040 */ 1041 if (cdev->classes_loaded) 1042 drivers_remove(cdev, cdev->classes_loaded); 1043 mlx5_common_dev_release(cdev); 1044 } 1045 mlx5_kvargs_release(mkvlist_p); 1046 return ret; 1047 } 1048 1049 int 1050 mlx5_common_dev_remove(struct rte_device *eal_dev) 1051 { 1052 struct mlx5_common_device *cdev; 1053 int ret; 1054 1055 cdev = to_mlx5_device(eal_dev); 1056 if (!cdev) 1057 return -ENODEV; 1058 /* Matching device found, cleanup and unload drivers. */ 1059 ret = drivers_remove(cdev, cdev->classes_loaded); 1060 if (ret == 0) 1061 mlx5_common_dev_release(cdev); 1062 return ret; 1063 } 1064 1065 /** 1066 * Callback to DMA map external memory to a device. 1067 * 1068 * @param rte_dev 1069 * Pointer to the generic device. 1070 * @param addr 1071 * Starting virtual address of memory to be mapped. 1072 * @param iova 1073 * Starting IOVA address of memory to be mapped. 1074 * @param len 1075 * Length of memory segment being mapped. 1076 * 1077 * @return 1078 * 0 on success, negative value on error. 1079 */ 1080 int 1081 mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr, 1082 uint64_t iova __rte_unused, size_t len) 1083 { 1084 struct mlx5_common_device *dev; 1085 struct mlx5_mr_btree *bt; 1086 struct mlx5_mr *mr; 1087 1088 dev = to_mlx5_device(rte_dev); 1089 if (!dev) { 1090 DRV_LOG(WARNING, 1091 "Unable to find matching mlx5 device to device %s", 1092 rte_dev->name); 1093 rte_errno = ENODEV; 1094 return -1; 1095 } 1096 mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len, 1097 SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb); 1098 if (!mr) { 1099 DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name); 1100 rte_errno = EINVAL; 1101 return -1; 1102 } 1103 try_insert: 1104 rte_rwlock_write_lock(&dev->mr_scache.rwlock); 1105 bt = &dev->mr_scache.cache; 1106 if (bt->len == bt->size) { 1107 uint32_t size; 1108 int ret; 1109 1110 size = bt->size + 1; 1111 MLX5_ASSERT(size > bt->size); 1112 /* 1113 * Avoid deadlock (numbers show the sequence of events): 1114 * mlx5_mr_create_primary(): 1115 * 1) take EAL memory lock 1116 * 3) take MR lock 1117 * this function: 1118 * 2) take MR lock 1119 * 4) take EAL memory lock while allocating the new cache 1120 * Releasing the MR lock before step 4 1121 * allows another thread to execute step 3. 1122 */ 1123 rte_rwlock_write_unlock(&dev->mr_scache.rwlock); 1124 ret = mlx5_mr_expand_cache(&dev->mr_scache, size, 1125 rte_dev->numa_node); 1126 if (ret < 0) { 1127 mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb); 1128 rte_errno = ret; 1129 return -1; 1130 } 1131 goto try_insert; 1132 } 1133 LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr); 1134 /* Insert to the global cache table. */ 1135 mlx5_mr_insert_cache(&dev->mr_scache, mr); 1136 rte_rwlock_write_unlock(&dev->mr_scache.rwlock); 1137 return 0; 1138 } 1139 1140 /** 1141 * Callback to DMA unmap external memory to a device. 1142 * 1143 * @param rte_dev 1144 * Pointer to the generic device. 1145 * @param addr 1146 * Starting virtual address of memory to be unmapped. 1147 * @param iova 1148 * Starting IOVA address of memory to be unmapped. 1149 * @param len 1150 * Length of memory segment being unmapped. 1151 * 1152 * @return 1153 * 0 on success, negative value on error. 1154 */ 1155 int 1156 mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr, 1157 uint64_t iova __rte_unused, size_t len __rte_unused) 1158 { 1159 struct mlx5_common_device *dev; 1160 struct mr_cache_entry entry; 1161 struct mlx5_mr *mr; 1162 1163 dev = to_mlx5_device(rte_dev); 1164 if (!dev) { 1165 DRV_LOG(WARNING, 1166 "Unable to find matching mlx5 device to device %s.", 1167 rte_dev->name); 1168 rte_errno = ENODEV; 1169 return -1; 1170 } 1171 rte_rwlock_read_lock(&dev->mr_scache.rwlock); 1172 mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr); 1173 if (!mr) { 1174 rte_rwlock_read_unlock(&dev->mr_scache.rwlock); 1175 DRV_LOG(WARNING, 1176 "Address 0x%" PRIxPTR " wasn't registered to device %s", 1177 (uintptr_t)addr, rte_dev->name); 1178 rte_errno = EINVAL; 1179 return -1; 1180 } 1181 LIST_REMOVE(mr, mr); 1182 DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr); 1183 mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb); 1184 mlx5_mr_rebuild_cache(&dev->mr_scache); 1185 /* 1186 * No explicit wmb is needed after updating dev_gen due to 1187 * store-release ordering in unlock that provides the 1188 * implicit barrier at the software visible level. 1189 */ 1190 ++dev->mr_scache.dev_gen; 1191 DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.", 1192 dev->mr_scache.dev_gen); 1193 rte_rwlock_read_unlock(&dev->mr_scache.rwlock); 1194 return 0; 1195 } 1196 1197 void 1198 mlx5_class_driver_register(struct mlx5_class_driver *driver) 1199 { 1200 mlx5_common_driver_on_register_pci(driver); 1201 TAILQ_INSERT_TAIL(&drivers_list, driver, next); 1202 } 1203 1204 static void mlx5_common_driver_init(void) 1205 { 1206 mlx5_common_pci_init(); 1207 #ifdef RTE_EXEC_ENV_LINUX 1208 mlx5_common_auxiliary_init(); 1209 #endif 1210 } 1211 1212 static bool mlx5_common_initialized; 1213 1214 /** 1215 * One time initialization routine for run-time dependency on glue library 1216 * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module, 1217 * must invoke in its constructor. 1218 */ 1219 void 1220 mlx5_common_init(void) 1221 { 1222 if (mlx5_common_initialized) 1223 return; 1224 1225 pthread_mutex_init(&devices_list_lock, NULL); 1226 mlx5_glue_constructor(); 1227 mlx5_common_driver_init(); 1228 mlx5_common_initialized = true; 1229 } 1230 1231 /** 1232 * This function is responsible of initializing the variable 1233 * haswell_broadwell_cpu by checking if the cpu is intel 1234 * and reading the data returned from mlx5_cpu_id(). 1235 * since haswell and broadwell cpus don't have improved performance 1236 * when using relaxed ordering we want to check the cpu type before 1237 * before deciding whether to enable RO or not. 1238 * if the cpu is haswell or broadwell the variable will be set to 1 1239 * otherwise it will be 0. 1240 */ 1241 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG) 1242 { 1243 #ifdef RTE_ARCH_X86_64 1244 unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56}; 1245 unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46}; 1246 unsigned int i, model, family, brand_id, vendor; 1247 unsigned int signature_intel_ebx = 0x756e6547; 1248 unsigned int extended_model; 1249 unsigned int eax = 0; 1250 unsigned int ebx = 0; 1251 unsigned int ecx = 0; 1252 unsigned int edx = 0; 1253 int max_level; 1254 1255 mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx); 1256 vendor = ebx; 1257 max_level = eax; 1258 if (max_level < 1) { 1259 haswell_broadwell_cpu = 0; 1260 return; 1261 } 1262 mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx); 1263 model = (eax >> 4) & 0x0f; 1264 family = (eax >> 8) & 0x0f; 1265 brand_id = ebx & 0xff; 1266 extended_model = (eax >> 12) & 0xf0; 1267 /* Check if the processor is Haswell or Broadwell */ 1268 if (vendor == signature_intel_ebx) { 1269 if (family == 0x06) 1270 model += extended_model; 1271 if (brand_id == 0 && family == 0x6) { 1272 for (i = 0; i < RTE_DIM(broadwell_models); i++) 1273 if (model == broadwell_models[i]) { 1274 haswell_broadwell_cpu = 1; 1275 return; 1276 } 1277 for (i = 0; i < RTE_DIM(haswell_models); i++) 1278 if (model == haswell_models[i]) { 1279 haswell_broadwell_cpu = 1; 1280 return; 1281 } 1282 } 1283 } 1284 #endif 1285 haswell_broadwell_cpu = 0; 1286 } 1287 1288 /** 1289 * Allocate the User Access Region with DevX on specified device. 1290 * This routine handles the following UAR allocation issues: 1291 * 1292 * - Try to allocate the UAR with the most appropriate memory mapping 1293 * type from the ones supported by the host. 1294 * 1295 * - Try to allocate the UAR with non-NULL base address OFED 5.0.x and 1296 * Upstream rdma_core before v29 returned the NULL as UAR base address 1297 * if UAR was not the first object in the UAR page. 1298 * It caused the PMD failure and we should try to get another UAR till 1299 * we get the first one with non-NULL base address returned. 1300 * 1301 * @param [in] cdev 1302 * Pointer to mlx5 device structure to perform allocation on its context. 1303 * 1304 * @return 1305 * UAR object pointer on success, NULL otherwise and rte_errno is set. 1306 */ 1307 static void * 1308 mlx5_devx_alloc_uar(struct mlx5_common_device *cdev) 1309 { 1310 void *uar; 1311 uint32_t retry, uar_mapping; 1312 void *base_addr; 1313 1314 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { 1315 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC 1316 /* Control the mapping type according to the settings. */ 1317 uar_mapping = (cdev->config.dbnc == MLX5_SQ_DB_NCACHED) ? 1318 MLX5DV_UAR_ALLOC_TYPE_NC : MLX5DV_UAR_ALLOC_TYPE_BF; 1319 #else 1320 /* 1321 * It seems we have no way to control the memory mapping type 1322 * for the UAR, the default "Write-Combining" type is supposed. 1323 */ 1324 uar_mapping = 0; 1325 #endif 1326 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping); 1327 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC 1328 if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { 1329 /* 1330 * In some environments like virtual machine the 1331 * Write Combining mapped might be not supported and 1332 * UAR allocation fails. We tried "Non-Cached" mapping 1333 * for the case. 1334 */ 1335 DRV_LOG(DEBUG, "Failed to allocate DevX UAR (BF)"); 1336 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; 1337 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping); 1338 } else if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) { 1339 /* 1340 * If Verbs/kernel does not support "Non-Cached" 1341 * try the "Write-Combining". 1342 */ 1343 DRV_LOG(DEBUG, "Failed to allocate DevX UAR (NC)"); 1344 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF; 1345 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping); 1346 } 1347 #endif 1348 if (!uar) { 1349 DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)"); 1350 rte_errno = ENOMEM; 1351 goto exit; 1352 } 1353 base_addr = mlx5_os_get_devx_uar_base_addr(uar); 1354 if (base_addr) 1355 break; 1356 /* 1357 * The UARs are allocated by rdma_core within the 1358 * IB device context, on context closure all UARs 1359 * will be freed, should be no memory/object leakage. 1360 */ 1361 DRV_LOG(DEBUG, "Retrying to allocate DevX UAR"); 1362 uar = NULL; 1363 } 1364 /* Check whether we finally succeeded with valid UAR allocation. */ 1365 if (!uar) { 1366 DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)"); 1367 rte_errno = ENOMEM; 1368 } 1369 /* 1370 * Return void * instead of struct mlx5dv_devx_uar * 1371 * is for compatibility with older rdma-core library headers. 1372 */ 1373 exit: 1374 return uar; 1375 } 1376 1377 void 1378 mlx5_devx_uar_release(struct mlx5_uar *uar) 1379 { 1380 if (uar->obj != NULL) 1381 mlx5_glue->devx_free_uar(uar->obj); 1382 memset(uar, 0, sizeof(*uar)); 1383 } 1384 1385 int 1386 mlx5_devx_uar_prepare(struct mlx5_common_device *cdev, struct mlx5_uar *uar) 1387 { 1388 off_t uar_mmap_offset; 1389 const size_t page_size = rte_mem_page_size(); 1390 void *base_addr; 1391 void *uar_obj; 1392 1393 if (page_size == (size_t)-1) { 1394 DRV_LOG(ERR, "Failed to get mem page size"); 1395 rte_errno = ENOMEM; 1396 return -1; 1397 } 1398 uar_obj = mlx5_devx_alloc_uar(cdev); 1399 if (uar_obj == NULL || mlx5_os_get_devx_uar_reg_addr(uar_obj) == NULL) { 1400 rte_errno = errno; 1401 DRV_LOG(ERR, "Failed to allocate UAR."); 1402 return -1; 1403 } 1404 uar->obj = uar_obj; 1405 uar_mmap_offset = mlx5_os_get_devx_uar_mmap_offset(uar_obj); 1406 base_addr = mlx5_os_get_devx_uar_base_addr(uar_obj); 1407 uar->dbnc = mlx5_db_map_type_get(uar_mmap_offset, page_size); 1408 uar->bf_db.db = mlx5_os_get_devx_uar_reg_addr(uar_obj); 1409 uar->cq_db.db = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL); 1410 #ifndef RTE_ARCH_64 1411 rte_spinlock_init(&uar->bf_sl); 1412 rte_spinlock_init(&uar->cq_sl); 1413 uar->bf_db.sl_p = &uar->bf_sl; 1414 uar->cq_db.sl_p = &uar->cq_sl; 1415 #endif /* RTE_ARCH_64 */ 1416 return 0; 1417 } 1418 1419 RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__); 1420