1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox. 4 */ 5 6 #include <stddef.h> 7 #include <unistd.h> 8 #include <string.h> 9 #include <assert.h> 10 #include <dlfcn.h> 11 #include <stdint.h> 12 #include <stdlib.h> 13 #include <errno.h> 14 #include <net/if.h> 15 #include <sys/mman.h> 16 17 /* Verbs header. */ 18 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 19 #ifdef PEDANTIC 20 #pragma GCC diagnostic ignored "-Wpedantic" 21 #endif 22 #include <infiniband/verbs.h> 23 #ifdef PEDANTIC 24 #pragma GCC diagnostic error "-Wpedantic" 25 #endif 26 27 #include <rte_malloc.h> 28 #include <rte_ethdev_driver.h> 29 #include <rte_ethdev_pci.h> 30 #include <rte_pci.h> 31 #include <rte_bus_pci.h> 32 #include <rte_common.h> 33 #include <rte_config.h> 34 #include <rte_eal_memconfig.h> 35 #include <rte_kvargs.h> 36 37 #include "mlx5.h" 38 #include "mlx5_utils.h" 39 #include "mlx5_rxtx.h" 40 #include "mlx5_autoconf.h" 41 #include "mlx5_defs.h" 42 #include "mlx5_glue.h" 43 44 /* Device parameter to enable RX completion queue compression. */ 45 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 46 47 /* Device parameter to configure inline send. */ 48 #define MLX5_TXQ_INLINE "txq_inline" 49 50 /* 51 * Device parameter to configure the number of TX queues threshold for 52 * enabling inline send. 53 */ 54 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 55 56 /* Device parameter to enable multi-packet send WQEs. */ 57 #define MLX5_TXQ_MPW_EN "txq_mpw_en" 58 59 /* Device parameter to include 2 dsegs in the title WQEBB. */ 60 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 61 62 /* Device parameter to limit the size of inlining packet. */ 63 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 64 65 /* Device parameter to enable hardware Tx vector. */ 66 #define MLX5_TX_VEC_EN "tx_vec_en" 67 68 /* Device parameter to enable hardware Rx vector. */ 69 #define MLX5_RX_VEC_EN "rx_vec_en" 70 71 #ifndef HAVE_IBV_MLX5_MOD_MPW 72 #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2) 73 #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3) 74 #endif 75 76 #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP 77 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) 78 #endif 79 80 /** Driver-specific log messages type. */ 81 int mlx5_logtype; 82 83 /** 84 * Retrieve integer value from environment variable. 85 * 86 * @param[in] name 87 * Environment variable name. 88 * 89 * @return 90 * Integer value, 0 if the variable is not set. 91 */ 92 int 93 mlx5_getenv_int(const char *name) 94 { 95 const char *val = getenv(name); 96 97 if (val == NULL) 98 return 0; 99 return atoi(val); 100 } 101 102 /** 103 * Verbs callback to allocate a memory. This function should allocate the space 104 * according to the size provided residing inside a huge page. 105 * Please note that all allocation must respect the alignment from libmlx5 106 * (i.e. currently sysconf(_SC_PAGESIZE)). 107 * 108 * @param[in] size 109 * The size in bytes of the memory to allocate. 110 * @param[in] data 111 * A pointer to the callback data. 112 * 113 * @return 114 * Allocated buffer, NULL otherwise and rte_errno is set. 115 */ 116 static void * 117 mlx5_alloc_verbs_buf(size_t size, void *data) 118 { 119 struct priv *priv = data; 120 void *ret; 121 size_t alignment = sysconf(_SC_PAGESIZE); 122 unsigned int socket = SOCKET_ID_ANY; 123 124 if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) { 125 const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 126 127 socket = ctrl->socket; 128 } else if (priv->verbs_alloc_ctx.type == 129 MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) { 130 const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj; 131 132 socket = ctrl->socket; 133 } 134 assert(data != NULL); 135 ret = rte_malloc_socket(__func__, size, alignment, socket); 136 if (!ret && size) 137 rte_errno = ENOMEM; 138 return ret; 139 } 140 141 /** 142 * Verbs callback to free a memory. 143 * 144 * @param[in] ptr 145 * A pointer to the memory to free. 146 * @param[in] data 147 * A pointer to the callback data. 148 */ 149 static void 150 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) 151 { 152 assert(data != NULL); 153 rte_free(ptr); 154 } 155 156 /** 157 * DPDK callback to close the device. 158 * 159 * Destroy all queues and objects, free memory. 160 * 161 * @param dev 162 * Pointer to Ethernet device structure. 163 */ 164 static void 165 mlx5_dev_close(struct rte_eth_dev *dev) 166 { 167 struct priv *priv = dev->data->dev_private; 168 unsigned int i; 169 int ret; 170 171 DRV_LOG(DEBUG, "port %u closing device \"%s\"", 172 dev->data->port_id, 173 ((priv->ctx != NULL) ? priv->ctx->device->name : "")); 174 /* In case mlx5_dev_stop() has not been called. */ 175 mlx5_dev_interrupt_handler_uninstall(dev); 176 mlx5_traffic_disable(dev); 177 /* Prevent crashes when queues are still in use. */ 178 dev->rx_pkt_burst = removed_rx_burst; 179 dev->tx_pkt_burst = removed_tx_burst; 180 if (priv->rxqs != NULL) { 181 /* XXX race condition if mlx5_rx_burst() is still running. */ 182 usleep(1000); 183 for (i = 0; (i != priv->rxqs_n); ++i) 184 mlx5_rxq_release(dev, i); 185 priv->rxqs_n = 0; 186 priv->rxqs = NULL; 187 } 188 if (priv->txqs != NULL) { 189 /* XXX race condition if mlx5_tx_burst() is still running. */ 190 usleep(1000); 191 for (i = 0; (i != priv->txqs_n); ++i) 192 mlx5_txq_release(dev, i); 193 priv->txqs_n = 0; 194 priv->txqs = NULL; 195 } 196 if (priv->pd != NULL) { 197 assert(priv->ctx != NULL); 198 claim_zero(mlx5_glue->dealloc_pd(priv->pd)); 199 claim_zero(mlx5_glue->close_device(priv->ctx)); 200 } else 201 assert(priv->ctx == NULL); 202 if (priv->rss_conf.rss_key != NULL) 203 rte_free(priv->rss_conf.rss_key); 204 if (priv->reta_idx != NULL) 205 rte_free(priv->reta_idx); 206 if (priv->primary_socket) 207 mlx5_socket_uninit(dev); 208 ret = mlx5_hrxq_ibv_verify(dev); 209 if (ret) 210 DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 211 dev->data->port_id); 212 ret = mlx5_ind_table_ibv_verify(dev); 213 if (ret) 214 DRV_LOG(WARNING, "port %u some indirection table still remain", 215 dev->data->port_id); 216 ret = mlx5_rxq_ibv_verify(dev); 217 if (ret) 218 DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain", 219 dev->data->port_id); 220 ret = mlx5_rxq_verify(dev); 221 if (ret) 222 DRV_LOG(WARNING, "port %u some Rx queues still remain", 223 dev->data->port_id); 224 ret = mlx5_txq_ibv_verify(dev); 225 if (ret) 226 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 227 dev->data->port_id); 228 ret = mlx5_txq_verify(dev); 229 if (ret) 230 DRV_LOG(WARNING, "port %u some Tx queues still remain", 231 dev->data->port_id); 232 ret = mlx5_flow_verify(dev); 233 if (ret) 234 DRV_LOG(WARNING, "port %u some flows still remain", 235 dev->data->port_id); 236 ret = mlx5_mr_verify(dev); 237 if (ret) 238 DRV_LOG(WARNING, "port %u some memory region still remain", 239 dev->data->port_id); 240 memset(priv, 0, sizeof(*priv)); 241 } 242 243 const struct eth_dev_ops mlx5_dev_ops = { 244 .dev_configure = mlx5_dev_configure, 245 .dev_start = mlx5_dev_start, 246 .dev_stop = mlx5_dev_stop, 247 .dev_set_link_down = mlx5_set_link_down, 248 .dev_set_link_up = mlx5_set_link_up, 249 .dev_close = mlx5_dev_close, 250 .promiscuous_enable = mlx5_promiscuous_enable, 251 .promiscuous_disable = mlx5_promiscuous_disable, 252 .allmulticast_enable = mlx5_allmulticast_enable, 253 .allmulticast_disable = mlx5_allmulticast_disable, 254 .link_update = mlx5_link_update, 255 .stats_get = mlx5_stats_get, 256 .stats_reset = mlx5_stats_reset, 257 .xstats_get = mlx5_xstats_get, 258 .xstats_reset = mlx5_xstats_reset, 259 .xstats_get_names = mlx5_xstats_get_names, 260 .dev_infos_get = mlx5_dev_infos_get, 261 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 262 .vlan_filter_set = mlx5_vlan_filter_set, 263 .rx_queue_setup = mlx5_rx_queue_setup, 264 .tx_queue_setup = mlx5_tx_queue_setup, 265 .rx_queue_release = mlx5_rx_queue_release, 266 .tx_queue_release = mlx5_tx_queue_release, 267 .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 268 .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 269 .mac_addr_remove = mlx5_mac_addr_remove, 270 .mac_addr_add = mlx5_mac_addr_add, 271 .mac_addr_set = mlx5_mac_addr_set, 272 .mtu_set = mlx5_dev_set_mtu, 273 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 274 .vlan_offload_set = mlx5_vlan_offload_set, 275 .reta_update = mlx5_dev_rss_reta_update, 276 .reta_query = mlx5_dev_rss_reta_query, 277 .rss_hash_update = mlx5_rss_hash_update, 278 .rss_hash_conf_get = mlx5_rss_hash_conf_get, 279 .filter_ctrl = mlx5_dev_filter_ctrl, 280 .rx_descriptor_status = mlx5_rx_descriptor_status, 281 .tx_descriptor_status = mlx5_tx_descriptor_status, 282 .rx_queue_intr_enable = mlx5_rx_intr_enable, 283 .rx_queue_intr_disable = mlx5_rx_intr_disable, 284 .is_removed = mlx5_is_removed, 285 }; 286 287 static const struct eth_dev_ops mlx5_dev_sec_ops = { 288 .stats_get = mlx5_stats_get, 289 .stats_reset = mlx5_stats_reset, 290 .xstats_get = mlx5_xstats_get, 291 .xstats_reset = mlx5_xstats_reset, 292 .xstats_get_names = mlx5_xstats_get_names, 293 .dev_infos_get = mlx5_dev_infos_get, 294 .rx_descriptor_status = mlx5_rx_descriptor_status, 295 .tx_descriptor_status = mlx5_tx_descriptor_status, 296 }; 297 298 /* Available operators in flow isolated mode. */ 299 const struct eth_dev_ops mlx5_dev_ops_isolate = { 300 .dev_configure = mlx5_dev_configure, 301 .dev_start = mlx5_dev_start, 302 .dev_stop = mlx5_dev_stop, 303 .dev_set_link_down = mlx5_set_link_down, 304 .dev_set_link_up = mlx5_set_link_up, 305 .dev_close = mlx5_dev_close, 306 .link_update = mlx5_link_update, 307 .stats_get = mlx5_stats_get, 308 .stats_reset = mlx5_stats_reset, 309 .xstats_get = mlx5_xstats_get, 310 .xstats_reset = mlx5_xstats_reset, 311 .xstats_get_names = mlx5_xstats_get_names, 312 .dev_infos_get = mlx5_dev_infos_get, 313 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 314 .vlan_filter_set = mlx5_vlan_filter_set, 315 .rx_queue_setup = mlx5_rx_queue_setup, 316 .tx_queue_setup = mlx5_tx_queue_setup, 317 .rx_queue_release = mlx5_rx_queue_release, 318 .tx_queue_release = mlx5_tx_queue_release, 319 .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 320 .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 321 .mac_addr_remove = mlx5_mac_addr_remove, 322 .mac_addr_add = mlx5_mac_addr_add, 323 .mac_addr_set = mlx5_mac_addr_set, 324 .mtu_set = mlx5_dev_set_mtu, 325 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 326 .vlan_offload_set = mlx5_vlan_offload_set, 327 .filter_ctrl = mlx5_dev_filter_ctrl, 328 .rx_descriptor_status = mlx5_rx_descriptor_status, 329 .tx_descriptor_status = mlx5_tx_descriptor_status, 330 .rx_queue_intr_enable = mlx5_rx_intr_enable, 331 .rx_queue_intr_disable = mlx5_rx_intr_disable, 332 .is_removed = mlx5_is_removed, 333 }; 334 335 static struct { 336 struct rte_pci_addr pci_addr; /* associated PCI address */ 337 uint32_t ports; /* physical ports bitfield. */ 338 } mlx5_dev[32]; 339 340 /** 341 * Get device index in mlx5_dev[] from PCI bus address. 342 * 343 * @param[in] pci_addr 344 * PCI bus address to look for. 345 * 346 * @return 347 * mlx5_dev[] index on success, -1 on failure. 348 */ 349 static int 350 mlx5_dev_idx(struct rte_pci_addr *pci_addr) 351 { 352 unsigned int i; 353 int ret = -1; 354 355 assert(pci_addr != NULL); 356 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) { 357 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) && 358 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) && 359 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) && 360 (mlx5_dev[i].pci_addr.function == pci_addr->function)) 361 return i; 362 if ((mlx5_dev[i].ports == 0) && (ret == -1)) 363 ret = i; 364 } 365 return ret; 366 } 367 368 /** 369 * Verify and store value for device argument. 370 * 371 * @param[in] key 372 * Key argument to verify. 373 * @param[in] val 374 * Value associated with key. 375 * @param opaque 376 * User data. 377 * 378 * @return 379 * 0 on success, a negative errno value otherwise and rte_errno is set. 380 */ 381 static int 382 mlx5_args_check(const char *key, const char *val, void *opaque) 383 { 384 struct mlx5_dev_config *config = opaque; 385 unsigned long tmp; 386 387 errno = 0; 388 tmp = strtoul(val, NULL, 0); 389 if (errno) { 390 rte_errno = errno; 391 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 392 return -rte_errno; 393 } 394 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 395 config->cqe_comp = !!tmp; 396 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 397 config->txq_inline = tmp; 398 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 399 config->txqs_inline = tmp; 400 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 401 config->mps = !!tmp ? config->mps : 0; 402 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 403 config->mpw_hdr_dseg = !!tmp; 404 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 405 config->inline_max_packet_sz = tmp; 406 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 407 config->tx_vec_en = !!tmp; 408 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 409 config->rx_vec_en = !!tmp; 410 } else { 411 DRV_LOG(WARNING, "%s: unknown parameter", key); 412 rte_errno = EINVAL; 413 return -rte_errno; 414 } 415 return 0; 416 } 417 418 /** 419 * Parse device parameters. 420 * 421 * @param config 422 * Pointer to device configuration structure. 423 * @param devargs 424 * Device arguments structure. 425 * 426 * @return 427 * 0 on success, a negative errno value otherwise and rte_errno is set. 428 */ 429 static int 430 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 431 { 432 const char **params = (const char *[]){ 433 MLX5_RXQ_CQE_COMP_EN, 434 MLX5_TXQ_INLINE, 435 MLX5_TXQS_MIN_INLINE, 436 MLX5_TXQ_MPW_EN, 437 MLX5_TXQ_MPW_HDR_DSEG_EN, 438 MLX5_TXQ_MAX_INLINE_LEN, 439 MLX5_TX_VEC_EN, 440 MLX5_RX_VEC_EN, 441 NULL, 442 }; 443 struct rte_kvargs *kvlist; 444 int ret = 0; 445 int i; 446 447 if (devargs == NULL) 448 return 0; 449 /* Following UGLY cast is done to pass checkpatch. */ 450 kvlist = rte_kvargs_parse(devargs->args, params); 451 if (kvlist == NULL) 452 return 0; 453 /* Process parameters. */ 454 for (i = 0; (params[i] != NULL); ++i) { 455 if (rte_kvargs_count(kvlist, params[i])) { 456 ret = rte_kvargs_process(kvlist, params[i], 457 mlx5_args_check, config); 458 if (ret) { 459 rte_errno = EINVAL; 460 rte_kvargs_free(kvlist); 461 return -rte_errno; 462 } 463 } 464 } 465 rte_kvargs_free(kvlist); 466 return 0; 467 } 468 469 static struct rte_pci_driver mlx5_driver; 470 471 /* 472 * Reserved UAR address space for TXQ UAR(hw doorbell) mapping, process 473 * local resource used by both primary and secondary to avoid duplicate 474 * reservation. 475 * The space has to be available on both primary and secondary process, 476 * TXQ UAR maps to this area using fixed mmap w/o double check. 477 */ 478 static void *uar_base; 479 480 /** 481 * Reserve UAR address space for primary process. 482 * 483 * @param[in] dev 484 * Pointer to Ethernet device. 485 * 486 * @return 487 * 0 on success, a negative errno value otherwise and rte_errno is set. 488 */ 489 static int 490 mlx5_uar_init_primary(struct rte_eth_dev *dev) 491 { 492 struct priv *priv = dev->data->dev_private; 493 void *addr = (void *)0; 494 int i; 495 const struct rte_mem_config *mcfg; 496 497 if (uar_base) { /* UAR address space mapped. */ 498 priv->uar_base = uar_base; 499 return 0; 500 } 501 /* find out lower bound of hugepage segments */ 502 mcfg = rte_eal_get_configuration()->mem_config; 503 for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) { 504 if (addr) 505 addr = RTE_MIN(addr, mcfg->memseg[i].addr); 506 else 507 addr = mcfg->memseg[i].addr; 508 } 509 /* keep distance to hugepages to minimize potential conflicts. */ 510 addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE); 511 /* anonymous mmap, no real memory consumption. */ 512 addr = mmap(addr, MLX5_UAR_SIZE, 513 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 514 if (addr == MAP_FAILED) { 515 DRV_LOG(ERR, 516 "port %u failed to reserve UAR address space, please" 517 " adjust MLX5_UAR_SIZE or try --base-virtaddr", 518 dev->data->port_id); 519 rte_errno = ENOMEM; 520 return -rte_errno; 521 } 522 /* Accept either same addr or a new addr returned from mmap if target 523 * range occupied. 524 */ 525 DRV_LOG(INFO, "port %u reserved UAR address space: %p", 526 dev->data->port_id, addr); 527 priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */ 528 uar_base = addr; /* process local, don't reserve again. */ 529 return 0; 530 } 531 532 /** 533 * Reserve UAR address space for secondary process, align with 534 * primary process. 535 * 536 * @param[in] dev 537 * Pointer to Ethernet device. 538 * 539 * @return 540 * 0 on success, a negative errno value otherwise and rte_errno is set. 541 */ 542 static int 543 mlx5_uar_init_secondary(struct rte_eth_dev *dev) 544 { 545 struct priv *priv = dev->data->dev_private; 546 void *addr; 547 548 assert(priv->uar_base); 549 if (uar_base) { /* already reserved. */ 550 assert(uar_base == priv->uar_base); 551 return 0; 552 } 553 /* anonymous mmap, no real memory consumption. */ 554 addr = mmap(priv->uar_base, MLX5_UAR_SIZE, 555 PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 556 if (addr == MAP_FAILED) { 557 DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu", 558 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE); 559 rte_errno = ENXIO; 560 return -rte_errno; 561 } 562 if (priv->uar_base != addr) { 563 DRV_LOG(ERR, 564 "port %u UAR address %p size %llu occupied, please" 565 " adjust MLX5_UAR_OFFSET or try EAL parameter" 566 " --base-virtaddr", 567 dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE); 568 rte_errno = ENXIO; 569 return -rte_errno; 570 } 571 uar_base = addr; /* process local, don't reserve again */ 572 DRV_LOG(INFO, "port %u reserved UAR address space: %p", 573 dev->data->port_id, addr); 574 return 0; 575 } 576 577 /** 578 * DPDK callback to register a PCI device. 579 * 580 * This function creates an Ethernet device for each port of a given 581 * PCI device. 582 * 583 * @param[in] pci_drv 584 * PCI driver structure (mlx5_driver). 585 * @param[in] pci_dev 586 * PCI device information. 587 * 588 * @return 589 * 0 on success, a negative errno value otherwise and rte_errno is set. 590 */ 591 static int 592 mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 593 struct rte_pci_device *pci_dev) 594 { 595 struct ibv_device **list = NULL; 596 struct ibv_device *ibv_dev; 597 int err = 0; 598 struct ibv_context *attr_ctx = NULL; 599 struct ibv_device_attr_ex device_attr; 600 unsigned int mps; 601 unsigned int cqe_comp; 602 unsigned int tunnel_en = 0; 603 int idx; 604 int i; 605 struct mlx5dv_context attrs_out = {0}; 606 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT 607 struct ibv_counter_set_description cs_desc; 608 #endif 609 610 assert(pci_drv == &mlx5_driver); 611 /* Get mlx5_dev[] index. */ 612 idx = mlx5_dev_idx(&pci_dev->addr); 613 if (idx == -1) { 614 DRV_LOG(ERR, "this driver cannot support any more adapters"); 615 err = ENOMEM; 616 goto error; 617 } 618 DRV_LOG(DEBUG, "using driver device index %d", idx); 619 /* Save PCI address. */ 620 mlx5_dev[idx].pci_addr = pci_dev->addr; 621 list = mlx5_glue->get_device_list(&i); 622 if (list == NULL) { 623 assert(errno); 624 err = errno; 625 if (errno == ENOSYS) 626 DRV_LOG(ERR, 627 "cannot list devices, is ib_uverbs loaded?"); 628 goto error; 629 } 630 assert(i >= 0); 631 /* 632 * For each listed device, check related sysfs entry against 633 * the provided PCI ID. 634 */ 635 while (i != 0) { 636 struct rte_pci_addr pci_addr; 637 638 --i; 639 DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name); 640 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) 641 continue; 642 if ((pci_dev->addr.domain != pci_addr.domain) || 643 (pci_dev->addr.bus != pci_addr.bus) || 644 (pci_dev->addr.devid != pci_addr.devid) || 645 (pci_dev->addr.function != pci_addr.function)) 646 continue; 647 DRV_LOG(INFO, "PCI information matches, using device \"%s\"", 648 list[i]->name); 649 attr_ctx = mlx5_glue->open_device(list[i]); 650 rte_errno = errno; 651 err = rte_errno; 652 break; 653 } 654 if (attr_ctx == NULL) { 655 mlx5_glue->free_device_list(list); 656 switch (err) { 657 case 0: 658 DRV_LOG(ERR, 659 "cannot access device, is mlx5_ib loaded?"); 660 err = ENODEV; 661 goto error; 662 case EINVAL: 663 DRV_LOG(ERR, 664 "cannot use device, are drivers up to date?"); 665 goto error; 666 } 667 } 668 ibv_dev = list[i]; 669 DRV_LOG(DEBUG, "device opened"); 670 /* 671 * Multi-packet send is supported by ConnectX-4 Lx PF as well 672 * as all ConnectX-5 devices. 673 */ 674 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 675 attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; 676 #endif 677 mlx5_glue->dv_query_device(attr_ctx, &attrs_out); 678 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) { 679 if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) { 680 DRV_LOG(DEBUG, "enhanced MPW is supported"); 681 mps = MLX5_MPW_ENHANCED; 682 } else { 683 DRV_LOG(DEBUG, "MPW is supported"); 684 mps = MLX5_MPW; 685 } 686 } else { 687 DRV_LOG(DEBUG, "MPW isn't supported"); 688 mps = MLX5_MPW_DISABLED; 689 } 690 if (RTE_CACHE_LINE_SIZE == 128 && 691 !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) 692 cqe_comp = 0; 693 else 694 cqe_comp = 1; 695 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 696 if (attrs_out.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { 697 tunnel_en = ((attrs_out.tunnel_offloads_caps & 698 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && 699 (attrs_out.tunnel_offloads_caps & 700 MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); 701 } 702 DRV_LOG(DEBUG, "tunnel offloading is %ssupported", 703 tunnel_en ? "" : "not "); 704 #else 705 DRV_LOG(WARNING, 706 "tunnel offloading disabled due to old OFED/rdma-core version"); 707 #endif 708 if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) { 709 err = errno; 710 goto error; 711 } 712 DRV_LOG(INFO, "%u port(s) detected", 713 device_attr.orig_attr.phys_port_cnt); 714 for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) { 715 char name[RTE_ETH_NAME_MAX_LEN]; 716 int len; 717 uint32_t port = i + 1; /* ports are indexed from one */ 718 uint32_t test = (1 << i); 719 struct ibv_context *ctx = NULL; 720 struct ibv_port_attr port_attr; 721 struct ibv_pd *pd = NULL; 722 struct priv *priv = NULL; 723 struct rte_eth_dev *eth_dev = NULL; 724 struct ibv_device_attr_ex device_attr_ex; 725 struct ether_addr mac; 726 struct mlx5_dev_config config = { 727 .cqe_comp = cqe_comp, 728 .mps = mps, 729 .tunnel_en = tunnel_en, 730 .tx_vec_en = 1, 731 .rx_vec_en = 1, 732 .mpw_hdr_dseg = 0, 733 .txq_inline = MLX5_ARG_UNSET, 734 .txqs_inline = MLX5_ARG_UNSET, 735 .inline_max_packet_sz = MLX5_ARG_UNSET, 736 }; 737 738 len = snprintf(name, sizeof(name), PCI_PRI_FMT, 739 pci_dev->addr.domain, pci_dev->addr.bus, 740 pci_dev->addr.devid, pci_dev->addr.function); 741 if (device_attr.orig_attr.phys_port_cnt > 1) 742 snprintf(name + len, sizeof(name), " port %u", i); 743 mlx5_dev[idx].ports |= test; 744 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 745 eth_dev = rte_eth_dev_attach_secondary(name); 746 if (eth_dev == NULL) { 747 DRV_LOG(ERR, "can not attach rte ethdev"); 748 rte_errno = ENOMEM; 749 err = rte_errno; 750 goto error; 751 } 752 eth_dev->device = &pci_dev->device; 753 eth_dev->dev_ops = &mlx5_dev_sec_ops; 754 err = mlx5_uar_init_secondary(eth_dev); 755 if (err) 756 goto error; 757 /* Receive command fd from primary process */ 758 err = mlx5_socket_connect(eth_dev); 759 if (err) 760 goto error; 761 /* Remap UAR for Tx queues. */ 762 err = mlx5_tx_uar_remap(eth_dev, err); 763 if (err) 764 goto error; 765 /* 766 * Ethdev pointer is still required as input since 767 * the primary device is not accessible from the 768 * secondary process. 769 */ 770 eth_dev->rx_pkt_burst = 771 mlx5_select_rx_function(eth_dev); 772 eth_dev->tx_pkt_burst = 773 mlx5_select_tx_function(eth_dev); 774 continue; 775 } 776 DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test); 777 ctx = mlx5_glue->open_device(ibv_dev); 778 if (ctx == NULL) { 779 err = ENODEV; 780 goto port_error; 781 } 782 /* Check port status. */ 783 err = mlx5_glue->query_port(ctx, port, &port_attr); 784 if (err) { 785 DRV_LOG(ERR, "port query failed: %s", strerror(err)); 786 goto port_error; 787 } 788 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 789 DRV_LOG(ERR, 790 "port %d is not configured in Ethernet mode", 791 port); 792 err = EINVAL; 793 goto port_error; 794 } 795 if (port_attr.state != IBV_PORT_ACTIVE) 796 DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)", 797 port, 798 mlx5_glue->port_state_str(port_attr.state), 799 port_attr.state); 800 /* Allocate protection domain. */ 801 pd = mlx5_glue->alloc_pd(ctx); 802 if (pd == NULL) { 803 DRV_LOG(ERR, "PD allocation failure"); 804 err = ENOMEM; 805 goto port_error; 806 } 807 mlx5_dev[idx].ports |= test; 808 /* from rte_ethdev.c */ 809 priv = rte_zmalloc("ethdev private structure", 810 sizeof(*priv), 811 RTE_CACHE_LINE_SIZE); 812 if (priv == NULL) { 813 DRV_LOG(ERR, "priv allocation failure"); 814 err = ENOMEM; 815 goto port_error; 816 } 817 priv->ctx = ctx; 818 strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path, 819 sizeof(priv->ibdev_path)); 820 priv->device_attr = device_attr; 821 priv->port = port; 822 priv->pd = pd; 823 priv->mtu = ETHER_MTU; 824 err = mlx5_args(&config, pci_dev->device.devargs); 825 if (err) { 826 DRV_LOG(ERR, "failed to process device arguments: %s", 827 strerror(err)); 828 goto port_error; 829 } 830 if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) { 831 DRV_LOG(ERR, "ibv_query_device_ex() failed"); 832 err = errno; 833 goto port_error; 834 } 835 config.hw_csum = !!(device_attr_ex.device_cap_flags_ex & 836 IBV_DEVICE_RAW_IP_CSUM); 837 DRV_LOG(DEBUG, "checksum offloading is %ssupported", 838 (config.hw_csum ? "" : "not ")); 839 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT 840 config.flow_counter_en = !!(device_attr.max_counter_sets); 841 mlx5_glue->describe_counter_set(ctx, 0, &cs_desc); 842 DRV_LOG(DEBUG, 843 "counter type = %d, num of cs = %ld, attributes = %d", 844 cs_desc.counter_type, cs_desc.num_of_cs, 845 cs_desc.attributes); 846 #endif 847 config.ind_table_max_size = 848 device_attr_ex.rss_caps.max_rwq_indirection_table_size; 849 /* Remove this check once DPDK supports larger/variable 850 * indirection tables. */ 851 if (config.ind_table_max_size > 852 (unsigned int)ETH_RSS_RETA_SIZE_512) 853 config.ind_table_max_size = ETH_RSS_RETA_SIZE_512; 854 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 855 config.ind_table_max_size); 856 config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps & 857 IBV_RAW_PACKET_CAP_CVLAN_STRIPPING); 858 DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 859 (config.hw_vlan_strip ? "" : "not ")); 860 861 config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps & 862 IBV_RAW_PACKET_CAP_SCATTER_FCS); 863 DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported", 864 (config.hw_fcs_strip ? "" : "not ")); 865 866 #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING 867 config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align; 868 #endif 869 DRV_LOG(DEBUG, 870 "hardware Rx end alignment padding is %ssupported", 871 (config.hw_padding ? "" : "not ")); 872 config.tso = ((device_attr_ex.tso_caps.max_tso > 0) && 873 (device_attr_ex.tso_caps.supported_qpts & 874 (1 << IBV_QPT_RAW_PACKET))); 875 if (config.tso) 876 config.tso_max_payload_sz = 877 device_attr_ex.tso_caps.max_tso; 878 if (config.mps && !mps) { 879 DRV_LOG(ERR, 880 "multi-packet send not supported on this device" 881 " (" MLX5_TXQ_MPW_EN ")"); 882 err = ENOTSUP; 883 goto port_error; 884 } 885 DRV_LOG(INFO, "%s MPS is %s", 886 config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "", 887 config.mps != MLX5_MPW_DISABLED ? "enabled" : 888 "disabled"); 889 if (config.cqe_comp && !cqe_comp) { 890 DRV_LOG(WARNING, "Rx CQE compression isn't supported"); 891 config.cqe_comp = 0; 892 } 893 eth_dev = rte_eth_dev_allocate(name); 894 if (eth_dev == NULL) { 895 DRV_LOG(ERR, "can not allocate rte ethdev"); 896 err = ENOMEM; 897 goto port_error; 898 } 899 eth_dev->data->dev_private = priv; 900 priv->dev = eth_dev; 901 eth_dev->data->mac_addrs = priv->mac; 902 eth_dev->device = &pci_dev->device; 903 rte_eth_copy_pci_info(eth_dev, pci_dev); 904 eth_dev->device->driver = &mlx5_driver.driver; 905 err = mlx5_uar_init_primary(eth_dev); 906 if (err) 907 goto port_error; 908 /* Configure the first MAC address by default. */ 909 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 910 DRV_LOG(ERR, 911 "port %u cannot get MAC address, is mlx5_en" 912 " loaded? (errno: %s)", 913 eth_dev->data->port_id, strerror(errno)); 914 err = ENODEV; 915 goto port_error; 916 } 917 DRV_LOG(INFO, 918 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 919 eth_dev->data->port_id, 920 mac.addr_bytes[0], mac.addr_bytes[1], 921 mac.addr_bytes[2], mac.addr_bytes[3], 922 mac.addr_bytes[4], mac.addr_bytes[5]); 923 #ifndef NDEBUG 924 { 925 char ifname[IF_NAMESIZE]; 926 927 if (mlx5_get_ifname(eth_dev, &ifname) == 0) 928 DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 929 eth_dev->data->port_id, ifname); 930 else 931 DRV_LOG(DEBUG, "port %u ifname is unknown", 932 eth_dev->data->port_id); 933 } 934 #endif 935 /* Get actual MTU if possible. */ 936 err = mlx5_get_mtu(eth_dev, &priv->mtu); 937 if (err) 938 goto port_error; 939 DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id, 940 priv->mtu); 941 /* 942 * Initialize burst functions to prevent crashes before link-up. 943 */ 944 eth_dev->rx_pkt_burst = removed_rx_burst; 945 eth_dev->tx_pkt_burst = removed_tx_burst; 946 eth_dev->dev_ops = &mlx5_dev_ops; 947 /* Register MAC address. */ 948 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 949 TAILQ_INIT(&priv->flows); 950 TAILQ_INIT(&priv->ctrl_flows); 951 /* Hint libmlx5 to use PMD allocator for data plane resources */ 952 struct mlx5dv_ctx_allocators alctr = { 953 .alloc = &mlx5_alloc_verbs_buf, 954 .free = &mlx5_free_verbs_buf, 955 .data = priv, 956 }; 957 mlx5_glue->dv_set_context_attr(ctx, 958 MLX5DV_CTX_ATTR_BUF_ALLOCATORS, 959 (void *)((uintptr_t)&alctr)); 960 /* Bring Ethernet device up. */ 961 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", 962 eth_dev->data->port_id); 963 mlx5_set_link_up(eth_dev); 964 /* Store device configuration on private structure. */ 965 priv->config = config; 966 continue; 967 port_error: 968 if (priv) 969 rte_free(priv); 970 if (pd) 971 claim_zero(mlx5_glue->dealloc_pd(pd)); 972 if (ctx) 973 claim_zero(mlx5_glue->close_device(ctx)); 974 break; 975 } 976 /* 977 * XXX if something went wrong in the loop above, there is a resource 978 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as 979 * long as the dpdk does not provide a way to deallocate a ethdev and a 980 * way to enumerate the registered ethdevs to free the previous ones. 981 */ 982 /* no port found, complain */ 983 if (!mlx5_dev[idx].ports) { 984 rte_errno = ENODEV; 985 err = rte_errno; 986 } 987 error: 988 if (attr_ctx) 989 claim_zero(mlx5_glue->close_device(attr_ctx)); 990 if (list) 991 mlx5_glue->free_device_list(list); 992 if (err) { 993 rte_errno = err; 994 return -rte_errno; 995 } 996 return 0; 997 } 998 999 static const struct rte_pci_id mlx5_pci_id_map[] = { 1000 { 1001 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1002 PCI_DEVICE_ID_MELLANOX_CONNECTX4) 1003 }, 1004 { 1005 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1006 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 1007 }, 1008 { 1009 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1010 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 1011 }, 1012 { 1013 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1014 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 1015 }, 1016 { 1017 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1018 PCI_DEVICE_ID_MELLANOX_CONNECTX5) 1019 }, 1020 { 1021 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1022 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 1023 }, 1024 { 1025 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1026 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 1027 }, 1028 { 1029 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 1030 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 1031 }, 1032 { 1033 .vendor_id = 0 1034 } 1035 }; 1036 1037 static struct rte_pci_driver mlx5_driver = { 1038 .driver = { 1039 .name = MLX5_DRIVER_NAME 1040 }, 1041 .id_table = mlx5_pci_id_map, 1042 .probe = mlx5_pci_probe, 1043 .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV, 1044 }; 1045 1046 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS 1047 1048 /** 1049 * Suffix RTE_EAL_PMD_PATH with "-glue". 1050 * 1051 * This function performs a sanity check on RTE_EAL_PMD_PATH before 1052 * suffixing its last component. 1053 * 1054 * @param buf[out] 1055 * Output buffer, should be large enough otherwise NULL is returned. 1056 * @param size 1057 * Size of @p out. 1058 * 1059 * @return 1060 * Pointer to @p buf or @p NULL in case suffix cannot be appended. 1061 */ 1062 static char * 1063 mlx5_glue_path(char *buf, size_t size) 1064 { 1065 static const char *const bad[] = { "/", ".", "..", NULL }; 1066 const char *path = RTE_EAL_PMD_PATH; 1067 size_t len = strlen(path); 1068 size_t off; 1069 int i; 1070 1071 while (len && path[len - 1] == '/') 1072 --len; 1073 for (off = len; off && path[off - 1] != '/'; --off) 1074 ; 1075 for (i = 0; bad[i]; ++i) 1076 if (!strncmp(path + off, bad[i], (int)(len - off))) 1077 goto error; 1078 i = snprintf(buf, size, "%.*s-glue", (int)len, path); 1079 if (i == -1 || (size_t)i >= size) 1080 goto error; 1081 return buf; 1082 error: 1083 DRV_LOG(ERR, 1084 "unable to append \"-glue\" to last component of" 1085 " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\")," 1086 " please re-configure DPDK"); 1087 return NULL; 1088 } 1089 1090 /** 1091 * Initialization routine for run-time dependency on rdma-core. 1092 */ 1093 static int 1094 mlx5_glue_init(void) 1095 { 1096 char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")]; 1097 const char *path[] = { 1098 /* 1099 * A basic security check is necessary before trusting 1100 * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH. 1101 */ 1102 (geteuid() == getuid() && getegid() == getgid() ? 1103 getenv("MLX5_GLUE_PATH") : NULL), 1104 /* 1105 * When RTE_EAL_PMD_PATH is set, use its glue-suffixed 1106 * variant, otherwise let dlopen() look up libraries on its 1107 * own. 1108 */ 1109 (*RTE_EAL_PMD_PATH ? 1110 mlx5_glue_path(glue_path, sizeof(glue_path)) : ""), 1111 }; 1112 unsigned int i = 0; 1113 void *handle = NULL; 1114 void **sym; 1115 const char *dlmsg; 1116 1117 while (!handle && i != RTE_DIM(path)) { 1118 const char *end; 1119 size_t len; 1120 int ret; 1121 1122 if (!path[i]) { 1123 ++i; 1124 continue; 1125 } 1126 end = strpbrk(path[i], ":;"); 1127 if (!end) 1128 end = path[i] + strlen(path[i]); 1129 len = end - path[i]; 1130 ret = 0; 1131 do { 1132 char name[ret + 1]; 1133 1134 ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE, 1135 (int)len, path[i], 1136 (!len || *(end - 1) == '/') ? "" : "/"); 1137 if (ret == -1) 1138 break; 1139 if (sizeof(name) != (size_t)ret + 1) 1140 continue; 1141 DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"", 1142 name); 1143 handle = dlopen(name, RTLD_LAZY); 1144 break; 1145 } while (1); 1146 path[i] = end + 1; 1147 if (!*end) 1148 ++i; 1149 } 1150 if (!handle) { 1151 rte_errno = EINVAL; 1152 dlmsg = dlerror(); 1153 if (dlmsg) 1154 DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg); 1155 goto glue_error; 1156 } 1157 sym = dlsym(handle, "mlx5_glue"); 1158 if (!sym || !*sym) { 1159 rte_errno = EINVAL; 1160 dlmsg = dlerror(); 1161 if (dlmsg) 1162 DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg); 1163 goto glue_error; 1164 } 1165 mlx5_glue = *sym; 1166 return 0; 1167 glue_error: 1168 if (handle) 1169 dlclose(handle); 1170 DRV_LOG(WARNING, 1171 "cannot initialize PMD due to missing run-time dependency on" 1172 " rdma-core libraries (libibverbs, libmlx5)"); 1173 return -rte_errno; 1174 } 1175 1176 #endif 1177 1178 /** 1179 * Driver initialization routine. 1180 */ 1181 RTE_INIT(rte_mlx5_pmd_init); 1182 static void 1183 rte_mlx5_pmd_init(void) 1184 { 1185 /* Build the static table for ptype conversion. */ 1186 mlx5_set_ptype_table(); 1187 /* 1188 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 1189 * huge pages. Calling ibv_fork_init() during init allows 1190 * applications to use fork() safely for purposes other than 1191 * using this PMD, which is not supported in forked processes. 1192 */ 1193 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 1194 /* Match the size of Rx completion entry to the size of a cacheline. */ 1195 if (RTE_CACHE_LINE_SIZE == 128) 1196 setenv("MLX5_CQE_SIZE", "128", 0); 1197 #ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS 1198 if (mlx5_glue_init()) 1199 return; 1200 assert(mlx5_glue); 1201 #endif 1202 #ifndef NDEBUG 1203 /* Glue structure must not contain any NULL pointers. */ 1204 { 1205 unsigned int i; 1206 1207 for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i) 1208 assert(((const void *const *)mlx5_glue)[i]); 1209 } 1210 #endif 1211 if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) { 1212 DRV_LOG(ERR, 1213 "rdma-core glue \"%s\" mismatch: \"%s\" is required", 1214 mlx5_glue->version, MLX5_GLUE_VERSION); 1215 return; 1216 } 1217 mlx5_glue->fork_init(); 1218 rte_pci_register(&mlx5_driver); 1219 } 1220 1221 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 1222 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 1223 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 1224 1225 /** Initialize driver log type. */ 1226 RTE_INIT(vdev_netvsc_init_log) 1227 { 1228 mlx5_logtype = rte_log_register("pmd.net.mlx5"); 1229 if (mlx5_logtype >= 0) 1230 rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE); 1231 } 1232