1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2012 6WIND S.A. 3 * Copyright 2012 Mellanox Technologies, Ltd 4 */ 5 6 /** 7 * @file 8 * mlx4 driver initialization. 9 */ 10 11 #include <assert.h> 12 #include <dlfcn.h> 13 #include <errno.h> 14 #include <inttypes.h> 15 #include <stddef.h> 16 #include <stdint.h> 17 #include <stdio.h> 18 #include <stdlib.h> 19 #include <string.h> 20 #include <sys/mman.h> 21 #include <unistd.h> 22 23 /* Verbs headers do not support -pedantic. */ 24 #ifdef PEDANTIC 25 #pragma GCC diagnostic ignored "-Wpedantic" 26 #endif 27 #include <infiniband/verbs.h> 28 #ifdef PEDANTIC 29 #pragma GCC diagnostic error "-Wpedantic" 30 #endif 31 32 #include <rte_common.h> 33 #include <rte_config.h> 34 #include <rte_dev.h> 35 #include <rte_errno.h> 36 #include <rte_ethdev_driver.h> 37 #include <rte_ethdev_pci.h> 38 #include <rte_ether.h> 39 #include <rte_flow.h> 40 #include <rte_interrupts.h> 41 #include <rte_kvargs.h> 42 #include <rte_malloc.h> 43 #include <rte_mbuf.h> 44 45 #include "mlx4.h" 46 #include "mlx4_glue.h" 47 #include "mlx4_flow.h" 48 #include "mlx4_mr.h" 49 #include "mlx4_rxtx.h" 50 #include "mlx4_utils.h" 51 52 static const char *MZ_MLX4_PMD_SHARED_DATA = "mlx4_pmd_shared_data"; 53 54 /* Shared memory between primary and secondary processes. */ 55 struct mlx4_shared_data *mlx4_shared_data; 56 57 /* Spinlock for mlx4_shared_data allocation. */ 58 static rte_spinlock_t mlx4_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 59 60 /* Process local data for secondary processes. */ 61 static struct mlx4_local_data mlx4_local_data; 62 63 /** Driver-specific log messages type. */ 64 int mlx4_logtype; 65 66 /** Configuration structure for device arguments. */ 67 struct mlx4_conf { 68 struct { 69 uint32_t present; /**< Bit-field for existing ports. */ 70 uint32_t enabled; /**< Bit-field for user-enabled ports. */ 71 } ports; 72 int mr_ext_memseg_en; 73 /** Whether memseg should be extended for MR creation. */ 74 }; 75 76 /* Available parameters list. */ 77 const char *pmd_mlx4_init_params[] = { 78 MLX4_PMD_PORT_KVARG, 79 MLX4_MR_EXT_MEMSEG_EN_KVARG, 80 NULL, 81 }; 82 83 static void mlx4_dev_stop(struct rte_eth_dev *dev); 84 85 /** 86 * Initialize shared data between primary and secondary process. 87 * 88 * A memzone is reserved by primary process and secondary processes attach to 89 * the memzone. 90 * 91 * @return 92 * 0 on success, a negative errno value otherwise and rte_errno is set. 93 */ 94 static int 95 mlx4_init_shared_data(void) 96 { 97 const struct rte_memzone *mz; 98 int ret = 0; 99 100 rte_spinlock_lock(&mlx4_shared_data_lock); 101 if (mlx4_shared_data == NULL) { 102 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 103 /* Allocate shared memory. */ 104 mz = rte_memzone_reserve(MZ_MLX4_PMD_SHARED_DATA, 105 sizeof(*mlx4_shared_data), 106 SOCKET_ID_ANY, 0); 107 if (mz == NULL) { 108 ERROR("Cannot allocate mlx4 shared data\n"); 109 ret = -rte_errno; 110 goto error; 111 } 112 mlx4_shared_data = mz->addr; 113 memset(mlx4_shared_data, 0, sizeof(*mlx4_shared_data)); 114 rte_spinlock_init(&mlx4_shared_data->lock); 115 } else { 116 /* Lookup allocated shared memory. */ 117 mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA); 118 if (mz == NULL) { 119 ERROR("Cannot attach mlx4 shared data\n"); 120 ret = -rte_errno; 121 goto error; 122 } 123 mlx4_shared_data = mz->addr; 124 memset(&mlx4_local_data, 0, sizeof(mlx4_local_data)); 125 } 126 } 127 error: 128 rte_spinlock_unlock(&mlx4_shared_data_lock); 129 return ret; 130 } 131 132 #ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS 133 /** 134 * Verbs callback to allocate a memory. This function should allocate the space 135 * according to the size provided residing inside a huge page. 136 * Please note that all allocation must respect the alignment from libmlx4 137 * (i.e. currently sysconf(_SC_PAGESIZE)). 138 * 139 * @param[in] size 140 * The size in bytes of the memory to allocate. 141 * @param[in] data 142 * A pointer to the callback data. 143 * 144 * @return 145 * Allocated buffer, NULL otherwise and rte_errno is set. 146 */ 147 static void * 148 mlx4_alloc_verbs_buf(size_t size, void *data) 149 { 150 struct mlx4_priv *priv = data; 151 void *ret; 152 size_t alignment = sysconf(_SC_PAGESIZE); 153 unsigned int socket = SOCKET_ID_ANY; 154 155 if (priv->verbs_alloc_ctx.type == MLX4_VERBS_ALLOC_TYPE_TX_QUEUE) { 156 const struct txq *txq = priv->verbs_alloc_ctx.obj; 157 158 socket = txq->socket; 159 } else if (priv->verbs_alloc_ctx.type == 160 MLX4_VERBS_ALLOC_TYPE_RX_QUEUE) { 161 const struct rxq *rxq = priv->verbs_alloc_ctx.obj; 162 163 socket = rxq->socket; 164 } 165 assert(data != NULL); 166 ret = rte_malloc_socket(__func__, size, alignment, socket); 167 if (!ret && size) 168 rte_errno = ENOMEM; 169 return ret; 170 } 171 172 /** 173 * Verbs callback to free a memory. 174 * 175 * @param[in] ptr 176 * A pointer to the memory to free. 177 * @param[in] data 178 * A pointer to the callback data. 179 */ 180 static void 181 mlx4_free_verbs_buf(void *ptr, void *data __rte_unused) 182 { 183 assert(data != NULL); 184 rte_free(ptr); 185 } 186 #endif 187 188 /** 189 * Initialize process private data structure. 190 * 191 * @param dev 192 * Pointer to Ethernet device structure. 193 * 194 * @return 195 * 0 on success, a negative errno value otherwise and rte_errno is set. 196 */ 197 static int 198 mlx4_proc_priv_init(struct rte_eth_dev *dev) 199 { 200 struct mlx4_proc_priv *ppriv; 201 size_t ppriv_size; 202 203 /* 204 * UAR register table follows the process private structure. BlueFlame 205 * registers for Tx queues are stored in the table. 206 */ 207 ppriv_size = sizeof(struct mlx4_proc_priv) + 208 dev->data->nb_tx_queues * sizeof(void *); 209 ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size, 210 RTE_CACHE_LINE_SIZE, dev->device->numa_node); 211 if (!ppriv) { 212 rte_errno = ENOMEM; 213 return -rte_errno; 214 } 215 ppriv->uar_table_sz = ppriv_size; 216 dev->process_private = ppriv; 217 return 0; 218 } 219 220 /** 221 * Un-initialize process private data structure. 222 * 223 * @param dev 224 * Pointer to Ethernet device structure. 225 */ 226 static void 227 mlx4_proc_priv_uninit(struct rte_eth_dev *dev) 228 { 229 if (!dev->process_private) 230 return; 231 rte_free(dev->process_private); 232 dev->process_private = NULL; 233 } 234 235 /** 236 * DPDK callback for Ethernet device configuration. 237 * 238 * @param dev 239 * Pointer to Ethernet device structure. 240 * 241 * @return 242 * 0 on success, negative errno value otherwise and rte_errno is set. 243 */ 244 static int 245 mlx4_dev_configure(struct rte_eth_dev *dev) 246 { 247 struct mlx4_priv *priv = dev->data->dev_private; 248 struct rte_flow_error error; 249 int ret; 250 251 /* Prepare internal flow rules. */ 252 ret = mlx4_flow_sync(priv, &error); 253 if (ret) { 254 ERROR("cannot set up internal flow rules (code %d, \"%s\")," 255 " flow error type %d, cause %p, message: %s", 256 -ret, strerror(-ret), error.type, error.cause, 257 error.message ? error.message : "(unspecified)"); 258 goto exit; 259 } 260 ret = mlx4_intr_install(priv); 261 if (ret) { 262 ERROR("%p: interrupt handler installation failed", 263 (void *)dev); 264 goto exit; 265 } 266 ret = mlx4_proc_priv_init(dev); 267 if (ret) { 268 ERROR("%p: process private data allocation failed", 269 (void *)dev); 270 goto exit; 271 } 272 exit: 273 return ret; 274 } 275 276 /** 277 * DPDK callback to start the device. 278 * 279 * Simulate device start by initializing common RSS resources and attaching 280 * all configured flows. 281 * 282 * @param dev 283 * Pointer to Ethernet device structure. 284 * 285 * @return 286 * 0 on success, negative errno value otherwise and rte_errno is set. 287 */ 288 static int 289 mlx4_dev_start(struct rte_eth_dev *dev) 290 { 291 struct mlx4_priv *priv = dev->data->dev_private; 292 struct rte_flow_error error; 293 int ret; 294 295 if (priv->started) 296 return 0; 297 DEBUG("%p: attaching configured flows to all RX queues", (void *)dev); 298 priv->started = 1; 299 ret = mlx4_rss_init(priv); 300 if (ret) { 301 ERROR("%p: cannot initialize RSS resources: %s", 302 (void *)dev, strerror(-ret)); 303 goto err; 304 } 305 #ifndef NDEBUG 306 mlx4_mr_dump_dev(dev); 307 #endif 308 ret = mlx4_rxq_intr_enable(priv); 309 if (ret) { 310 ERROR("%p: interrupt handler installation failed", 311 (void *)dev); 312 goto err; 313 } 314 ret = mlx4_flow_sync(priv, &error); 315 if (ret) { 316 ERROR("%p: cannot attach flow rules (code %d, \"%s\")," 317 " flow error type %d, cause %p, message: %s", 318 (void *)dev, 319 -ret, strerror(-ret), error.type, error.cause, 320 error.message ? error.message : "(unspecified)"); 321 goto err; 322 } 323 rte_wmb(); 324 dev->tx_pkt_burst = mlx4_tx_burst; 325 dev->rx_pkt_burst = mlx4_rx_burst; 326 /* Enable datapath on secondary process. */ 327 mlx4_mp_req_start_rxtx(dev); 328 return 0; 329 err: 330 mlx4_dev_stop(dev); 331 return ret; 332 } 333 334 /** 335 * DPDK callback to stop the device. 336 * 337 * Simulate device stop by detaching all configured flows. 338 * 339 * @param dev 340 * Pointer to Ethernet device structure. 341 */ 342 static void 343 mlx4_dev_stop(struct rte_eth_dev *dev) 344 { 345 struct mlx4_priv *priv = dev->data->dev_private; 346 347 if (!priv->started) 348 return; 349 DEBUG("%p: detaching flows from all RX queues", (void *)dev); 350 priv->started = 0; 351 dev->tx_pkt_burst = mlx4_tx_burst_removed; 352 dev->rx_pkt_burst = mlx4_rx_burst_removed; 353 rte_wmb(); 354 /* Disable datapath on secondary process. */ 355 mlx4_mp_req_stop_rxtx(dev); 356 mlx4_flow_sync(priv, NULL); 357 mlx4_rxq_intr_disable(priv); 358 mlx4_rss_deinit(priv); 359 } 360 361 /** 362 * DPDK callback to close the device. 363 * 364 * Destroy all queues and objects, free memory. 365 * 366 * @param dev 367 * Pointer to Ethernet device structure. 368 */ 369 static void 370 mlx4_dev_close(struct rte_eth_dev *dev) 371 { 372 struct mlx4_priv *priv = dev->data->dev_private; 373 unsigned int i; 374 375 DEBUG("%p: closing device \"%s\"", 376 (void *)dev, 377 ((priv->ctx != NULL) ? priv->ctx->device->name : "")); 378 dev->rx_pkt_burst = mlx4_rx_burst_removed; 379 dev->tx_pkt_burst = mlx4_tx_burst_removed; 380 rte_wmb(); 381 /* Disable datapath on secondary process. */ 382 mlx4_mp_req_stop_rxtx(dev); 383 mlx4_flow_clean(priv); 384 mlx4_rss_deinit(priv); 385 for (i = 0; i != dev->data->nb_rx_queues; ++i) 386 mlx4_rx_queue_release(dev->data->rx_queues[i]); 387 for (i = 0; i != dev->data->nb_tx_queues; ++i) 388 mlx4_tx_queue_release(dev->data->tx_queues[i]); 389 mlx4_proc_priv_uninit(dev); 390 mlx4_mr_release(dev); 391 if (priv->pd != NULL) { 392 assert(priv->ctx != NULL); 393 claim_zero(mlx4_glue->dealloc_pd(priv->pd)); 394 claim_zero(mlx4_glue->close_device(priv->ctx)); 395 } else 396 assert(priv->ctx == NULL); 397 mlx4_intr_uninstall(priv); 398 memset(priv, 0, sizeof(*priv)); 399 } 400 401 static const struct eth_dev_ops mlx4_dev_ops = { 402 .dev_configure = mlx4_dev_configure, 403 .dev_start = mlx4_dev_start, 404 .dev_stop = mlx4_dev_stop, 405 .dev_set_link_down = mlx4_dev_set_link_down, 406 .dev_set_link_up = mlx4_dev_set_link_up, 407 .dev_close = mlx4_dev_close, 408 .link_update = mlx4_link_update, 409 .promiscuous_enable = mlx4_promiscuous_enable, 410 .promiscuous_disable = mlx4_promiscuous_disable, 411 .allmulticast_enable = mlx4_allmulticast_enable, 412 .allmulticast_disable = mlx4_allmulticast_disable, 413 .mac_addr_remove = mlx4_mac_addr_remove, 414 .mac_addr_add = mlx4_mac_addr_add, 415 .mac_addr_set = mlx4_mac_addr_set, 416 .set_mc_addr_list = mlx4_set_mc_addr_list, 417 .stats_get = mlx4_stats_get, 418 .stats_reset = mlx4_stats_reset, 419 .fw_version_get = mlx4_fw_version_get, 420 .dev_infos_get = mlx4_dev_infos_get, 421 .dev_supported_ptypes_get = mlx4_dev_supported_ptypes_get, 422 .vlan_filter_set = mlx4_vlan_filter_set, 423 .rx_queue_setup = mlx4_rx_queue_setup, 424 .tx_queue_setup = mlx4_tx_queue_setup, 425 .rx_queue_release = mlx4_rx_queue_release, 426 .tx_queue_release = mlx4_tx_queue_release, 427 .flow_ctrl_get = mlx4_flow_ctrl_get, 428 .flow_ctrl_set = mlx4_flow_ctrl_set, 429 .mtu_set = mlx4_mtu_set, 430 .filter_ctrl = mlx4_filter_ctrl, 431 .rx_queue_intr_enable = mlx4_rx_intr_enable, 432 .rx_queue_intr_disable = mlx4_rx_intr_disable, 433 .is_removed = mlx4_is_removed, 434 }; 435 436 /* Available operations from secondary process. */ 437 static const struct eth_dev_ops mlx4_dev_sec_ops = { 438 .stats_get = mlx4_stats_get, 439 .stats_reset = mlx4_stats_reset, 440 .fw_version_get = mlx4_fw_version_get, 441 .dev_infos_get = mlx4_dev_infos_get, 442 }; 443 444 /** 445 * Get PCI information from struct ibv_device. 446 * 447 * @param device 448 * Pointer to Ethernet device structure. 449 * @param[out] pci_addr 450 * PCI bus address output buffer. 451 * 452 * @return 453 * 0 on success, negative errno value otherwise and rte_errno is set. 454 */ 455 static int 456 mlx4_ibv_device_to_pci_addr(const struct ibv_device *device, 457 struct rte_pci_addr *pci_addr) 458 { 459 FILE *file; 460 char line[32]; 461 MKSTR(path, "%s/device/uevent", device->ibdev_path); 462 463 file = fopen(path, "rb"); 464 if (file == NULL) { 465 rte_errno = errno; 466 return -rte_errno; 467 } 468 while (fgets(line, sizeof(line), file) == line) { 469 size_t len = strlen(line); 470 int ret; 471 472 /* Truncate long lines. */ 473 if (len == (sizeof(line) - 1)) 474 while (line[(len - 1)] != '\n') { 475 ret = fgetc(file); 476 if (ret == EOF) 477 break; 478 line[(len - 1)] = ret; 479 } 480 /* Extract information. */ 481 if (sscanf(line, 482 "PCI_SLOT_NAME=" 483 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 484 &pci_addr->domain, 485 &pci_addr->bus, 486 &pci_addr->devid, 487 &pci_addr->function) == 4) { 488 ret = 0; 489 break; 490 } 491 } 492 fclose(file); 493 return 0; 494 } 495 496 /** 497 * Verify and store value for device argument. 498 * 499 * @param[in] key 500 * Key argument to verify. 501 * @param[in] val 502 * Value associated with key. 503 * @param[in, out] conf 504 * Shared configuration data. 505 * 506 * @return 507 * 0 on success, negative errno value otherwise and rte_errno is set. 508 */ 509 static int 510 mlx4_arg_parse(const char *key, const char *val, struct mlx4_conf *conf) 511 { 512 unsigned long tmp; 513 514 errno = 0; 515 tmp = strtoul(val, NULL, 0); 516 if (errno) { 517 rte_errno = errno; 518 WARN("%s: \"%s\" is not a valid integer", key, val); 519 return -rte_errno; 520 } 521 if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) { 522 uint32_t ports = rte_log2_u32(conf->ports.present + 1); 523 524 if (tmp >= ports) { 525 ERROR("port index %lu outside range [0,%" PRIu32 ")", 526 tmp, ports); 527 return -EINVAL; 528 } 529 if (!(conf->ports.present & (1 << tmp))) { 530 rte_errno = EINVAL; 531 ERROR("invalid port index %lu", tmp); 532 return -rte_errno; 533 } 534 conf->ports.enabled |= 1 << tmp; 535 } else if (strcmp(MLX4_MR_EXT_MEMSEG_EN_KVARG, key) == 0) { 536 conf->mr_ext_memseg_en = !!tmp; 537 } else { 538 rte_errno = EINVAL; 539 WARN("%s: unknown parameter", key); 540 return -rte_errno; 541 } 542 return 0; 543 } 544 545 /** 546 * Parse device parameters. 547 * 548 * @param devargs 549 * Device arguments structure. 550 * 551 * @return 552 * 0 on success, negative errno value otherwise and rte_errno is set. 553 */ 554 static int 555 mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf) 556 { 557 struct rte_kvargs *kvlist; 558 unsigned int arg_count; 559 int ret = 0; 560 int i; 561 562 if (devargs == NULL) 563 return 0; 564 kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params); 565 if (kvlist == NULL) { 566 rte_errno = EINVAL; 567 ERROR("failed to parse kvargs"); 568 return -rte_errno; 569 } 570 /* Process parameters. */ 571 for (i = 0; pmd_mlx4_init_params[i]; ++i) { 572 arg_count = rte_kvargs_count(kvlist, pmd_mlx4_init_params[i]); 573 while (arg_count-- > 0) { 574 ret = rte_kvargs_process(kvlist, 575 pmd_mlx4_init_params[i], 576 (int (*)(const char *, 577 const char *, 578 void *)) 579 mlx4_arg_parse, 580 conf); 581 if (ret != 0) 582 goto free_kvlist; 583 } 584 } 585 free_kvlist: 586 rte_kvargs_free(kvlist); 587 return ret; 588 } 589 590 /** 591 * Interpret RSS capabilities reported by device. 592 * 593 * This function returns the set of usable Verbs RSS hash fields, kernel 594 * quirks taken into account. 595 * 596 * @param ctx 597 * Verbs context. 598 * @param pd 599 * Verbs protection domain. 600 * @param device_attr_ex 601 * Extended device attributes to interpret. 602 * 603 * @return 604 * Usable RSS hash fields mask in Verbs format. 605 */ 606 static uint64_t 607 mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd, 608 struct ibv_device_attr_ex *device_attr_ex) 609 { 610 uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask; 611 struct ibv_cq *cq = NULL; 612 struct ibv_wq *wq = NULL; 613 struct ibv_rwq_ind_table *ind = NULL; 614 struct ibv_qp *qp = NULL; 615 616 if (!hw_rss_sup) { 617 WARN("no RSS capabilities reported; disabling support for UDP" 618 " RSS and inner VXLAN RSS"); 619 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | 620 IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | 621 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP; 622 } 623 if (!(hw_rss_sup & IBV_RX_HASH_INNER)) 624 return hw_rss_sup; 625 /* 626 * Although reported as supported, missing code in some Linux 627 * versions (v4.15, v4.16) prevents the creation of hash QPs with 628 * inner capability. 629 * 630 * There is no choice but to attempt to instantiate a temporary RSS 631 * context in order to confirm its support. 632 */ 633 cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0); 634 wq = cq ? mlx4_glue->create_wq 635 (ctx, 636 &(struct ibv_wq_init_attr){ 637 .wq_type = IBV_WQT_RQ, 638 .max_wr = 1, 639 .max_sge = 1, 640 .pd = pd, 641 .cq = cq, 642 }) : NULL; 643 ind = wq ? mlx4_glue->create_rwq_ind_table 644 (ctx, 645 &(struct ibv_rwq_ind_table_init_attr){ 646 .log_ind_tbl_size = 0, 647 .ind_tbl = &wq, 648 .comp_mask = 0, 649 }) : NULL; 650 qp = ind ? mlx4_glue->create_qp_ex 651 (ctx, 652 &(struct ibv_qp_init_attr_ex){ 653 .comp_mask = 654 (IBV_QP_INIT_ATTR_PD | 655 IBV_QP_INIT_ATTR_RX_HASH | 656 IBV_QP_INIT_ATTR_IND_TABLE), 657 .qp_type = IBV_QPT_RAW_PACKET, 658 .pd = pd, 659 .rwq_ind_tbl = ind, 660 .rx_hash_conf = { 661 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, 662 .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE, 663 .rx_hash_key = mlx4_rss_hash_key_default, 664 .rx_hash_fields_mask = hw_rss_sup, 665 }, 666 }) : NULL; 667 if (!qp) { 668 WARN("disabling unusable inner RSS capability due to kernel" 669 " quirk"); 670 hw_rss_sup &= ~IBV_RX_HASH_INNER; 671 } else { 672 claim_zero(mlx4_glue->destroy_qp(qp)); 673 } 674 if (ind) 675 claim_zero(mlx4_glue->destroy_rwq_ind_table(ind)); 676 if (wq) 677 claim_zero(mlx4_glue->destroy_wq(wq)); 678 if (cq) 679 claim_zero(mlx4_glue->destroy_cq(cq)); 680 return hw_rss_sup; 681 } 682 683 static struct rte_pci_driver mlx4_driver; 684 685 /** 686 * PMD global initialization. 687 * 688 * Independent from individual device, this function initializes global 689 * per-PMD data structures distinguishing primary and secondary processes. 690 * Hence, each initialization is called once per a process. 691 * 692 * @return 693 * 0 on success, a negative errno value otherwise and rte_errno is set. 694 */ 695 static int 696 mlx4_init_once(void) 697 { 698 struct mlx4_shared_data *sd; 699 struct mlx4_local_data *ld = &mlx4_local_data; 700 int ret = 0; 701 702 if (mlx4_init_shared_data()) 703 return -rte_errno; 704 sd = mlx4_shared_data; 705 assert(sd); 706 rte_spinlock_lock(&sd->lock); 707 switch (rte_eal_process_type()) { 708 case RTE_PROC_PRIMARY: 709 if (sd->init_done) 710 break; 711 LIST_INIT(&sd->mem_event_cb_list); 712 rte_rwlock_init(&sd->mem_event_rwlock); 713 rte_mem_event_callback_register("MLX4_MEM_EVENT_CB", 714 mlx4_mr_mem_event_cb, NULL); 715 ret = mlx4_mp_init_primary(); 716 if (ret) 717 goto out; 718 sd->init_done = 1; 719 break; 720 case RTE_PROC_SECONDARY: 721 if (ld->init_done) 722 break; 723 ret = mlx4_mp_init_secondary(); 724 if (ret) 725 goto out; 726 ++sd->secondary_cnt; 727 ld->init_done = 1; 728 break; 729 default: 730 break; 731 } 732 out: 733 rte_spinlock_unlock(&sd->lock); 734 return ret; 735 } 736 737 /** 738 * DPDK callback to register a PCI device. 739 * 740 * This function creates an Ethernet device for each port of a given 741 * PCI device. 742 * 743 * @param[in] pci_drv 744 * PCI driver structure (mlx4_driver). 745 * @param[in] pci_dev 746 * PCI device information. 747 * 748 * @return 749 * 0 on success, negative errno value otherwise and rte_errno is set. 750 */ 751 static int 752 mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 753 { 754 struct ibv_device **list; 755 struct ibv_device *ibv_dev; 756 int err = 0; 757 struct ibv_context *attr_ctx = NULL; 758 struct ibv_device_attr device_attr; 759 struct ibv_device_attr_ex device_attr_ex; 760 struct mlx4_conf conf = { 761 .ports.present = 0, 762 .mr_ext_memseg_en = 1, 763 }; 764 unsigned int vf; 765 int i; 766 char ifname[IF_NAMESIZE]; 767 768 (void)pci_drv; 769 err = mlx4_init_once(); 770 if (err) { 771 ERROR("unable to init PMD global data: %s", 772 strerror(rte_errno)); 773 return -rte_errno; 774 } 775 assert(pci_drv == &mlx4_driver); 776 list = mlx4_glue->get_device_list(&i); 777 if (list == NULL) { 778 rte_errno = errno; 779 assert(rte_errno); 780 if (rte_errno == ENOSYS) 781 ERROR("cannot list devices, is ib_uverbs loaded?"); 782 return -rte_errno; 783 } 784 assert(i >= 0); 785 /* 786 * For each listed device, check related sysfs entry against 787 * the provided PCI ID. 788 */ 789 while (i != 0) { 790 struct rte_pci_addr pci_addr; 791 792 --i; 793 DEBUG("checking device \"%s\"", list[i]->name); 794 if (mlx4_ibv_device_to_pci_addr(list[i], &pci_addr)) 795 continue; 796 if ((pci_dev->addr.domain != pci_addr.domain) || 797 (pci_dev->addr.bus != pci_addr.bus) || 798 (pci_dev->addr.devid != pci_addr.devid) || 799 (pci_dev->addr.function != pci_addr.function)) 800 continue; 801 vf = (pci_dev->id.device_id == 802 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF); 803 INFO("PCI information matches, using device \"%s\" (VF: %s)", 804 list[i]->name, (vf ? "true" : "false")); 805 attr_ctx = mlx4_glue->open_device(list[i]); 806 err = errno; 807 break; 808 } 809 if (attr_ctx == NULL) { 810 mlx4_glue->free_device_list(list); 811 switch (err) { 812 case 0: 813 rte_errno = ENODEV; 814 ERROR("cannot access device, is mlx4_ib loaded?"); 815 return -rte_errno; 816 case EINVAL: 817 rte_errno = EINVAL; 818 ERROR("cannot use device, are drivers up to date?"); 819 return -rte_errno; 820 } 821 assert(err > 0); 822 rte_errno = err; 823 return -rte_errno; 824 } 825 ibv_dev = list[i]; 826 DEBUG("device opened"); 827 if (mlx4_glue->query_device(attr_ctx, &device_attr)) { 828 err = ENODEV; 829 goto error; 830 } 831 INFO("%u port(s) detected", device_attr.phys_port_cnt); 832 conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1; 833 if (mlx4_args(pci_dev->device.devargs, &conf)) { 834 ERROR("failed to process device arguments"); 835 err = EINVAL; 836 goto error; 837 } 838 /* Use all ports when none are defined */ 839 if (!conf.ports.enabled) 840 conf.ports.enabled = conf.ports.present; 841 /* Retrieve extended device attributes. */ 842 if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) { 843 err = ENODEV; 844 goto error; 845 } 846 assert(device_attr.max_sge >= MLX4_MAX_SGE); 847 for (i = 0; i < device_attr.phys_port_cnt; i++) { 848 uint32_t port = i + 1; /* ports are indexed from one */ 849 struct ibv_context *ctx = NULL; 850 struct ibv_port_attr port_attr; 851 struct ibv_pd *pd = NULL; 852 struct mlx4_priv *priv = NULL; 853 struct rte_eth_dev *eth_dev = NULL; 854 struct rte_ether_addr mac; 855 char name[RTE_ETH_NAME_MAX_LEN]; 856 857 /* If port is not enabled, skip. */ 858 if (!(conf.ports.enabled & (1 << i))) 859 continue; 860 DEBUG("using port %u", port); 861 ctx = mlx4_glue->open_device(ibv_dev); 862 if (ctx == NULL) { 863 err = ENODEV; 864 goto port_error; 865 } 866 snprintf(name, sizeof(name), "%s port %u", 867 mlx4_glue->get_device_name(ibv_dev), port); 868 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 869 eth_dev = rte_eth_dev_attach_secondary(name); 870 if (eth_dev == NULL) { 871 ERROR("can not attach rte ethdev"); 872 rte_errno = ENOMEM; 873 err = rte_errno; 874 goto error; 875 } 876 priv = eth_dev->data->dev_private; 877 if (!priv->verbs_alloc_ctx.enabled) { 878 ERROR("secondary process is not supported" 879 " due to lack of external allocator" 880 " from Verbs"); 881 rte_errno = ENOTSUP; 882 err = rte_errno; 883 goto error; 884 } 885 eth_dev->device = &pci_dev->device; 886 eth_dev->dev_ops = &mlx4_dev_sec_ops; 887 err = mlx4_proc_priv_init(eth_dev); 888 if (err) 889 goto error; 890 /* Receive command fd from primary process. */ 891 err = mlx4_mp_req_verbs_cmd_fd(eth_dev); 892 if (err < 0) { 893 err = rte_errno; 894 goto error; 895 } 896 /* Remap UAR for Tx queues. */ 897 err = mlx4_tx_uar_init_secondary(eth_dev, err); 898 if (err) { 899 err = rte_errno; 900 goto error; 901 } 902 /* 903 * Ethdev pointer is still required as input since 904 * the primary device is not accessible from the 905 * secondary process. 906 */ 907 eth_dev->tx_pkt_burst = mlx4_tx_burst; 908 eth_dev->rx_pkt_burst = mlx4_rx_burst; 909 claim_zero(mlx4_glue->close_device(ctx)); 910 rte_eth_copy_pci_info(eth_dev, pci_dev); 911 rte_eth_dev_probing_finish(eth_dev); 912 continue; 913 } 914 /* Check port status. */ 915 err = mlx4_glue->query_port(ctx, port, &port_attr); 916 if (err) { 917 err = ENODEV; 918 ERROR("port query failed: %s", strerror(err)); 919 goto port_error; 920 } 921 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 922 err = ENOTSUP; 923 ERROR("port %d is not configured in Ethernet mode", 924 port); 925 goto port_error; 926 } 927 if (port_attr.state != IBV_PORT_ACTIVE) 928 DEBUG("port %d is not active: \"%s\" (%d)", 929 port, mlx4_glue->port_state_str(port_attr.state), 930 port_attr.state); 931 /* Make asynchronous FD non-blocking to handle interrupts. */ 932 err = mlx4_fd_set_non_blocking(ctx->async_fd); 933 if (err) { 934 ERROR("cannot make asynchronous FD non-blocking: %s", 935 strerror(err)); 936 goto port_error; 937 } 938 /* Allocate protection domain. */ 939 pd = mlx4_glue->alloc_pd(ctx); 940 if (pd == NULL) { 941 err = ENOMEM; 942 ERROR("PD allocation failure"); 943 goto port_error; 944 } 945 /* from rte_ethdev.c */ 946 priv = rte_zmalloc("ethdev private structure", 947 sizeof(*priv), 948 RTE_CACHE_LINE_SIZE); 949 if (priv == NULL) { 950 err = ENOMEM; 951 ERROR("priv allocation failure"); 952 goto port_error; 953 } 954 priv->ctx = ctx; 955 priv->device_attr = device_attr; 956 priv->port = port; 957 priv->pd = pd; 958 priv->mtu = RTE_ETHER_MTU; 959 priv->vf = vf; 960 priv->hw_csum = !!(device_attr.device_cap_flags & 961 IBV_DEVICE_RAW_IP_CSUM); 962 DEBUG("checksum offloading is %ssupported", 963 (priv->hw_csum ? "" : "not ")); 964 /* Only ConnectX-3 Pro supports tunneling. */ 965 priv->hw_csum_l2tun = 966 priv->hw_csum && 967 (device_attr.vendor_part_id == 968 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO); 969 DEBUG("L2 tunnel checksum offloads are %ssupported", 970 priv->hw_csum_l2tun ? "" : "not "); 971 priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd, 972 &device_attr_ex); 973 DEBUG("supported RSS hash fields mask: %016" PRIx64, 974 priv->hw_rss_sup); 975 priv->hw_rss_max_qps = 976 device_attr_ex.rss_caps.max_rwq_indirection_table_size; 977 DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps); 978 priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & 979 IBV_RAW_PACKET_CAP_SCATTER_FCS); 980 DEBUG("FCS stripping toggling is %ssupported", 981 priv->hw_fcs_strip ? "" : "not "); 982 priv->tso = 983 ((device_attr_ex.tso_caps.max_tso > 0) && 984 (device_attr_ex.tso_caps.supported_qpts & 985 (1 << IBV_QPT_RAW_PACKET))); 986 if (priv->tso) 987 priv->tso_max_payload_sz = 988 device_attr_ex.tso_caps.max_tso; 989 DEBUG("TSO is %ssupported", 990 priv->tso ? "" : "not "); 991 priv->mr_ext_memseg_en = conf.mr_ext_memseg_en; 992 /* Configure the first MAC address by default. */ 993 err = mlx4_get_mac(priv, &mac.addr_bytes); 994 if (err) { 995 ERROR("cannot get MAC address, is mlx4_en loaded?" 996 " (error: %s)", strerror(err)); 997 goto port_error; 998 } 999 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 1000 priv->port, 1001 mac.addr_bytes[0], mac.addr_bytes[1], 1002 mac.addr_bytes[2], mac.addr_bytes[3], 1003 mac.addr_bytes[4], mac.addr_bytes[5]); 1004 /* Register MAC address. */ 1005 priv->mac[0] = mac; 1006 1007 if (mlx4_get_ifname(priv, &ifname) == 0) { 1008 DEBUG("port %u ifname is \"%s\"", 1009 priv->port, ifname); 1010 priv->if_index = if_nametoindex(ifname); 1011 } else { 1012 DEBUG("port %u ifname is unknown", priv->port); 1013 } 1014 1015 /* Get actual MTU if possible. */ 1016 mlx4_mtu_get(priv, &priv->mtu); 1017 DEBUG("port %u MTU is %u", priv->port, priv->mtu); 1018 eth_dev = rte_eth_dev_allocate(name); 1019 if (eth_dev == NULL) { 1020 err = ENOMEM; 1021 ERROR("can not allocate rte ethdev"); 1022 goto port_error; 1023 } 1024 eth_dev->data->dev_private = priv; 1025 eth_dev->data->mac_addrs = priv->mac; 1026 eth_dev->device = &pci_dev->device; 1027 rte_eth_copy_pci_info(eth_dev, pci_dev); 1028 /* Initialize local interrupt handle for current port. */ 1029 priv->intr_handle = (struct rte_intr_handle){ 1030 .fd = -1, 1031 .type = RTE_INTR_HANDLE_EXT, 1032 }; 1033 /* 1034 * Override ethdev interrupt handle pointer with private 1035 * handle instead of that of the parent PCI device used by 1036 * default. This prevents it from being shared between all 1037 * ports of the same PCI device since each of them is 1038 * associated its own Verbs context. 1039 * 1040 * Rx interrupts in particular require this as the PMD has 1041 * no control over the registration of queue interrupts 1042 * besides setting up eth_dev->intr_handle, the rest is 1043 * handled by rte_intr_rx_ctl(). 1044 */ 1045 eth_dev->intr_handle = &priv->intr_handle; 1046 priv->dev_data = eth_dev->data; 1047 eth_dev->dev_ops = &mlx4_dev_ops; 1048 #ifdef HAVE_IBV_MLX4_BUF_ALLOCATORS 1049 /* Hint libmlx4 to use PMD allocator for data plane resources */ 1050 struct mlx4dv_ctx_allocators alctr = { 1051 .alloc = &mlx4_alloc_verbs_buf, 1052 .free = &mlx4_free_verbs_buf, 1053 .data = priv, 1054 }; 1055 err = mlx4_glue->dv_set_context_attr 1056 (ctx, MLX4DV_SET_CTX_ATTR_BUF_ALLOCATORS, 1057 (void *)((uintptr_t)&alctr)); 1058 if (err) 1059 WARN("Verbs external allocator is not supported"); 1060 else 1061 priv->verbs_alloc_ctx.enabled = 1; 1062 #endif 1063 /* Bring Ethernet device up. */ 1064 DEBUG("forcing Ethernet interface up"); 1065 mlx4_dev_set_link_up(eth_dev); 1066 /* Update link status once if waiting for LSC. */ 1067 if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) 1068 mlx4_link_update(eth_dev, 0); 1069 /* 1070 * Once the device is added to the list of memory event 1071 * callback, its global MR cache table cannot be expanded 1072 * on the fly because of deadlock. If it overflows, lookup 1073 * should be done by searching MR list linearly, which is slow. 1074 */ 1075 err = mlx4_mr_btree_init(&priv->mr.cache, 1076 MLX4_MR_BTREE_CACHE_N * 2, 1077 eth_dev->device->numa_node); 1078 if (err) { 1079 /* rte_errno is already set. */ 1080 goto port_error; 1081 } 1082 /* Add device to memory callback list. */ 1083 rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock); 1084 LIST_INSERT_HEAD(&mlx4_shared_data->mem_event_cb_list, 1085 priv, mem_event_cb); 1086 rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock); 1087 rte_eth_dev_probing_finish(eth_dev); 1088 continue; 1089 port_error: 1090 rte_free(priv); 1091 if (eth_dev != NULL) 1092 eth_dev->data->dev_private = NULL; 1093 if (pd) 1094 claim_zero(mlx4_glue->dealloc_pd(pd)); 1095 if (ctx) 1096 claim_zero(mlx4_glue->close_device(ctx)); 1097 if (eth_dev != NULL) { 1098 /* mac_addrs must not be freed because part of dev_private */ 1099 eth_dev->data->mac_addrs = NULL; 1100 rte_eth_dev_release_port(eth_dev); 1101 } 1102 break; 1103 } 1104 /* 1105 * XXX if something went wrong in the loop above, there is a resource 1106 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as 1107 * long as the dpdk does not provide a way to deallocate a ethdev and a 1108 * way to enumerate the registered ethdevs to free the previous ones. 1109 */ 1110 error: 1111 if (attr_ctx) 1112 claim_zero(mlx4_glue->close_device(attr_ctx)); 1113 if (list) 1114 mlx4_glue->free_device_list(list); 1115 if (err) 1116 rte_errno = err; 1117 return -err; 1118 } 1119 1120 static const struct rte_pci_id mlx4_pci_id_map[] = { 1121 { 1122 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1123 PCI_DEVICE_ID_MELLANOX_CONNECTX3) 1124 }, 1125 { 1126 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1127 PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO) 1128 }, 1129 { 1130 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1131 PCI_DEVICE_ID_MELLANOX_CONNECTX3VF) 1132 }, 1133 { 1134 .vendor_id = 0 1135 } 1136 }; 1137 1138 static struct rte_pci_driver mlx4_driver = { 1139 .driver = { 1140 .name = MLX4_DRIVER_NAME 1141 }, 1142 .id_table = mlx4_pci_id_map, 1143 .probe = mlx4_pci_probe, 1144 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV, 1145 }; 1146 1147 #ifdef RTE_IBVERBS_LINK_DLOPEN 1148 1149 /** 1150 * Suffix RTE_EAL_PMD_PATH with "-glue". 1151 * 1152 * This function performs a sanity check on RTE_EAL_PMD_PATH before 1153 * suffixing its last component. 1154 * 1155 * @param buf[out] 1156 * Output buffer, should be large enough otherwise NULL is returned. 1157 * @param size 1158 * Size of @p out. 1159 * 1160 * @return 1161 * Pointer to @p buf or @p NULL in case suffix cannot be appended. 1162 */ 1163 static char * 1164 mlx4_glue_path(char *buf, size_t size) 1165 { 1166 static const char *const bad[] = { "/", ".", "..", NULL }; 1167 const char *path = RTE_EAL_PMD_PATH; 1168 size_t len = strlen(path); 1169 size_t off; 1170 int i; 1171 1172 while (len && path[len - 1] == '/') 1173 --len; 1174 for (off = len; off && path[off - 1] != '/'; --off) 1175 ; 1176 for (i = 0; bad[i]; ++i) 1177 if (!strncmp(path + off, bad[i], (int)(len - off))) 1178 goto error; 1179 i = snprintf(buf, size, "%.*s-glue", (int)len, path); 1180 if (i == -1 || (size_t)i >= size) 1181 goto error; 1182 return buf; 1183 error: 1184 ERROR("unable to append \"-glue\" to last component of" 1185 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," 1186 " please re-configure DPDK"); 1187 return NULL; 1188 } 1189 1190 /** 1191 * Initialization routine for run-time dependency on rdma-core. 1192 */ 1193 static int 1194 mlx4_glue_init(void) 1195 { 1196 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; 1197 const char *path[] = { 1198 /* 1199 * A basic security check is necessary before trusting 1200 * MLX4_GLUE_PATH, which may override RTE_EAL_PMD_PATH. 1201 */ 1202 (geteuid() == getuid() && getegid() == getgid() ? 1203 getenv("MLX4_GLUE_PATH") : NULL), 1204 /* 1205 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed 1206 * variant, otherwise let dlopen() look up libraries on its 1207 * own. 1208 */ 1209 (*RTE_EAL_PMD_PATH ? 1210 mlx4_glue_path(glue_path, sizeof(glue_path)) : ""), 1211 }; 1212 unsigned int i = 0; 1213 void *handle = NULL; 1214 void **sym; 1215 const char *dlmsg; 1216 1217 while (!handle && i != RTE_DIM(path)) { 1218 const char *end; 1219 size_t len; 1220 int ret; 1221 1222 if (!path[i]) { 1223 ++i; 1224 continue; 1225 } 1226 end = strpbrk(path[i], ":;"); 1227 if (!end) 1228 end = path[i] + strlen(path[i]); 1229 len = end - path[i]; 1230 ret = 0; 1231 do { 1232 char name[ret + 1]; 1233 1234 ret = snprintf(name, sizeof(name), "%.*s%s" MLX4_GLUE, 1235 (int)len, path[i], 1236 (!len || *(end - 1) == '/') ? "" : "/"); 1237 if (ret == -1) 1238 break; 1239 if (sizeof(name) != (size_t)ret + 1) 1240 continue; 1241 DEBUG("looking for rdma-core glue as \"%s\"", name); 1242 handle = dlopen(name, RTLD_LAZY); 1243 break; 1244 } while (1); 1245 path[i] = end + 1; 1246 if (!*end) 1247 ++i; 1248 } 1249 if (!handle) { 1250 rte_errno = EINVAL; 1251 dlmsg = dlerror(); 1252 if (dlmsg) 1253 WARN("cannot load glue library: %s", dlmsg); 1254 goto glue_error; 1255 } 1256 sym = dlsym(handle, "mlx4_glue"); 1257 if (!sym || !*sym) { 1258 rte_errno = EINVAL; 1259 dlmsg = dlerror(); 1260 if (dlmsg) 1261 ERROR("cannot resolve glue symbol: %s", dlmsg); 1262 goto glue_error; 1263 } 1264 mlx4_glue = *sym; 1265 return 0; 1266 glue_error: 1267 if (handle) 1268 dlclose(handle); 1269 WARN("cannot initialize PMD due to missing run-time" 1270 " dependency on rdma-core libraries (libibverbs," 1271 " libmlx4)"); 1272 return -rte_errno; 1273 } 1274 1275 #endif 1276 1277 /** 1278 * Driver initialization routine. 1279 */ 1280 RTE_INIT(rte_mlx4_pmd_init) 1281 { 1282 /* Initialize driver log type. */ 1283 mlx4_logtype = rte_log_register("pmd.net.mlx4"); 1284 if (mlx4_logtype >= 0) 1285 rte_log_set_level(mlx4_logtype, RTE_LOG_NOTICE); 1286 1287 /* 1288 * MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we 1289 * want to get success errno value in case of calling them 1290 * when the device was removed. 1291 */ 1292 setenv("MLX4_DEVICE_FATAL_CLEANUP", "1", 1); 1293 /* 1294 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 1295 * huge pages. Calling ibv_fork_init() during init allows 1296 * applications to use fork() safely for purposes other than 1297 * using this PMD, which is not supported in forked processes. 1298 */ 1299 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 1300 #ifdef RTE_IBVERBS_LINK_DLOPEN 1301 if (mlx4_glue_init()) 1302 return; 1303 assert(mlx4_glue); 1304 #endif 1305 #ifndef NDEBUG 1306 /* Glue structure must not contain any NULL pointers. */ 1307 { 1308 unsigned int i; 1309 1310 for (i = 0; i != sizeof(*mlx4_glue) / sizeof(void *); ++i) 1311 assert(((const void *const *)mlx4_glue)[i]); 1312 } 1313 #endif 1314 if (strcmp(mlx4_glue->version, MLX4_GLUE_VERSION)) { 1315 ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required", 1316 mlx4_glue->version, MLX4_GLUE_VERSION); 1317 return; 1318 } 1319 mlx4_glue->fork_init(); 1320 rte_pci_register(&mlx4_driver); 1321 } 1322 1323 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__); 1324 RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map); 1325 RTE_PMD_REGISTER_KMOD_DEP(net_mlx4, 1326 "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib"); 1327