1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stddef.h> 35 #include <unistd.h> 36 #include <string.h> 37 #include <assert.h> 38 #include <stdint.h> 39 #include <stdlib.h> 40 #include <errno.h> 41 #include <net/if.h> 42 43 /* Verbs header. */ 44 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 45 #ifdef PEDANTIC 46 #pragma GCC diagnostic ignored "-Wpedantic" 47 #endif 48 #include <infiniband/verbs.h> 49 #ifdef PEDANTIC 50 #pragma GCC diagnostic error "-Wpedantic" 51 #endif 52 53 /* DPDK headers don't like -pedantic. */ 54 #ifdef PEDANTIC 55 #pragma GCC diagnostic ignored "-Wpedantic" 56 #endif 57 #include <rte_malloc.h> 58 #include <rte_ethdev.h> 59 #include <rte_pci.h> 60 #include <rte_common.h> 61 #include <rte_kvargs.h> 62 #ifdef PEDANTIC 63 #pragma GCC diagnostic error "-Wpedantic" 64 #endif 65 66 #include "mlx5.h" 67 #include "mlx5_utils.h" 68 #include "mlx5_rxtx.h" 69 #include "mlx5_autoconf.h" 70 #include "mlx5_defs.h" 71 72 /* Device parameter to enable RX completion queue compression. */ 73 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 74 75 /* Device parameter to configure inline send. */ 76 #define MLX5_TXQ_INLINE "txq_inline" 77 78 /* 79 * Device parameter to configure the number of TX queues threshold for 80 * enabling inline send. 81 */ 82 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 83 84 /* Device parameter to enable multi-packet send WQEs. */ 85 #define MLX5_TXQ_MPW_EN "txq_mpw_en" 86 87 /** 88 * Retrieve integer value from environment variable. 89 * 90 * @param[in] name 91 * Environment variable name. 92 * 93 * @return 94 * Integer value, 0 if the variable is not set. 95 */ 96 int 97 mlx5_getenv_int(const char *name) 98 { 99 const char *val = getenv(name); 100 101 if (val == NULL) 102 return 0; 103 return atoi(val); 104 } 105 106 /** 107 * DPDK callback to close the device. 108 * 109 * Destroy all queues and objects, free memory. 110 * 111 * @param dev 112 * Pointer to Ethernet device structure. 113 */ 114 static void 115 mlx5_dev_close(struct rte_eth_dev *dev) 116 { 117 struct priv *priv = mlx5_get_priv(dev); 118 unsigned int i; 119 120 priv_lock(priv); 121 DEBUG("%p: closing device \"%s\"", 122 (void *)dev, 123 ((priv->ctx != NULL) ? priv->ctx->device->name : "")); 124 /* In case mlx5_dev_stop() has not been called. */ 125 priv_dev_interrupt_handler_uninstall(priv, dev); 126 priv_special_flow_disable_all(priv); 127 priv_mac_addrs_disable(priv); 128 priv_destroy_hash_rxqs(priv); 129 130 /* Remove flow director elements. */ 131 priv_fdir_disable(priv); 132 priv_fdir_delete_filters_list(priv); 133 134 /* Prevent crashes when queues are still in use. */ 135 dev->rx_pkt_burst = removed_rx_burst; 136 dev->tx_pkt_burst = removed_tx_burst; 137 if (priv->rxqs != NULL) { 138 /* XXX race condition if mlx5_rx_burst() is still running. */ 139 usleep(1000); 140 for (i = 0; (i != priv->rxqs_n); ++i) { 141 struct rxq *rxq = (*priv->rxqs)[i]; 142 struct rxq_ctrl *rxq_ctrl; 143 144 if (rxq == NULL) 145 continue; 146 rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); 147 (*priv->rxqs)[i] = NULL; 148 rxq_cleanup(rxq_ctrl); 149 rte_free(rxq_ctrl); 150 } 151 priv->rxqs_n = 0; 152 priv->rxqs = NULL; 153 } 154 if (priv->txqs != NULL) { 155 /* XXX race condition if mlx5_tx_burst() is still running. */ 156 usleep(1000); 157 for (i = 0; (i != priv->txqs_n); ++i) { 158 struct txq *txq = (*priv->txqs)[i]; 159 struct txq_ctrl *txq_ctrl; 160 161 if (txq == NULL) 162 continue; 163 txq_ctrl = container_of(txq, struct txq_ctrl, txq); 164 (*priv->txqs)[i] = NULL; 165 txq_cleanup(txq_ctrl); 166 rte_free(txq_ctrl); 167 } 168 priv->txqs_n = 0; 169 priv->txqs = NULL; 170 } 171 if (priv->pd != NULL) { 172 assert(priv->ctx != NULL); 173 claim_zero(ibv_dealloc_pd(priv->pd)); 174 claim_zero(ibv_close_device(priv->ctx)); 175 } else 176 assert(priv->ctx == NULL); 177 if (priv->rss_conf != NULL) { 178 for (i = 0; (i != hash_rxq_init_n); ++i) 179 rte_free((*priv->rss_conf)[i]); 180 rte_free(priv->rss_conf); 181 } 182 if (priv->reta_idx != NULL) 183 rte_free(priv->reta_idx); 184 priv_unlock(priv); 185 memset(priv, 0, sizeof(*priv)); 186 } 187 188 static const struct eth_dev_ops mlx5_dev_ops = { 189 .dev_configure = mlx5_dev_configure, 190 .dev_start = mlx5_dev_start, 191 .dev_stop = mlx5_dev_stop, 192 .dev_set_link_down = mlx5_set_link_down, 193 .dev_set_link_up = mlx5_set_link_up, 194 .dev_close = mlx5_dev_close, 195 .promiscuous_enable = mlx5_promiscuous_enable, 196 .promiscuous_disable = mlx5_promiscuous_disable, 197 .allmulticast_enable = mlx5_allmulticast_enable, 198 .allmulticast_disable = mlx5_allmulticast_disable, 199 .link_update = mlx5_link_update, 200 .stats_get = mlx5_stats_get, 201 .stats_reset = mlx5_stats_reset, 202 .dev_infos_get = mlx5_dev_infos_get, 203 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 204 .vlan_filter_set = mlx5_vlan_filter_set, 205 .rx_queue_setup = mlx5_rx_queue_setup, 206 .tx_queue_setup = mlx5_tx_queue_setup, 207 .rx_queue_release = mlx5_rx_queue_release, 208 .tx_queue_release = mlx5_tx_queue_release, 209 .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 210 .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 211 .mac_addr_remove = mlx5_mac_addr_remove, 212 .mac_addr_add = mlx5_mac_addr_add, 213 .mac_addr_set = mlx5_mac_addr_set, 214 .mtu_set = mlx5_dev_set_mtu, 215 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 216 .vlan_offload_set = mlx5_vlan_offload_set, 217 .reta_update = mlx5_dev_rss_reta_update, 218 .reta_query = mlx5_dev_rss_reta_query, 219 .rss_hash_update = mlx5_rss_hash_update, 220 .rss_hash_conf_get = mlx5_rss_hash_conf_get, 221 .filter_ctrl = mlx5_dev_filter_ctrl, 222 }; 223 224 static struct { 225 struct rte_pci_addr pci_addr; /* associated PCI address */ 226 uint32_t ports; /* physical ports bitfield. */ 227 } mlx5_dev[32]; 228 229 /** 230 * Get device index in mlx5_dev[] from PCI bus address. 231 * 232 * @param[in] pci_addr 233 * PCI bus address to look for. 234 * 235 * @return 236 * mlx5_dev[] index on success, -1 on failure. 237 */ 238 static int 239 mlx5_dev_idx(struct rte_pci_addr *pci_addr) 240 { 241 unsigned int i; 242 int ret = -1; 243 244 assert(pci_addr != NULL); 245 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) { 246 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) && 247 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) && 248 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) && 249 (mlx5_dev[i].pci_addr.function == pci_addr->function)) 250 return i; 251 if ((mlx5_dev[i].ports == 0) && (ret == -1)) 252 ret = i; 253 } 254 return ret; 255 } 256 257 /** 258 * Verify and store value for device argument. 259 * 260 * @param[in] key 261 * Key argument to verify. 262 * @param[in] val 263 * Value associated with key. 264 * @param opaque 265 * User data. 266 * 267 * @return 268 * 0 on success, negative errno value on failure. 269 */ 270 static int 271 mlx5_args_check(const char *key, const char *val, void *opaque) 272 { 273 struct priv *priv = opaque; 274 unsigned long tmp; 275 276 errno = 0; 277 tmp = strtoul(val, NULL, 0); 278 if (errno) { 279 WARN("%s: \"%s\" is not a valid integer", key, val); 280 return errno; 281 } 282 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 283 priv->cqe_comp = !!tmp; 284 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 285 priv->txq_inline = tmp; 286 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 287 priv->txqs_inline = tmp; 288 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 289 priv->mps = !!tmp; 290 } else { 291 WARN("%s: unknown parameter", key); 292 return -EINVAL; 293 } 294 return 0; 295 } 296 297 /** 298 * Parse device parameters. 299 * 300 * @param priv 301 * Pointer to private structure. 302 * @param devargs 303 * Device arguments structure. 304 * 305 * @return 306 * 0 on success, errno value on failure. 307 */ 308 static int 309 mlx5_args(struct priv *priv, struct rte_devargs *devargs) 310 { 311 const char **params = (const char *[]){ 312 MLX5_RXQ_CQE_COMP_EN, 313 MLX5_TXQ_INLINE, 314 MLX5_TXQS_MIN_INLINE, 315 MLX5_TXQ_MPW_EN, 316 NULL, 317 }; 318 struct rte_kvargs *kvlist; 319 int ret = 0; 320 int i; 321 322 if (devargs == NULL) 323 return 0; 324 /* Following UGLY cast is done to pass checkpatch. */ 325 kvlist = rte_kvargs_parse(devargs->args, params); 326 if (kvlist == NULL) 327 return 0; 328 /* Process parameters. */ 329 for (i = 0; (params[i] != NULL); ++i) { 330 if (rte_kvargs_count(kvlist, params[i])) { 331 ret = rte_kvargs_process(kvlist, params[i], 332 mlx5_args_check, priv); 333 if (ret != 0) 334 return ret; 335 } 336 } 337 rte_kvargs_free(kvlist); 338 return 0; 339 } 340 341 static struct eth_driver mlx5_driver; 342 343 /** 344 * DPDK callback to register a PCI device. 345 * 346 * This function creates an Ethernet device for each port of a given 347 * PCI device. 348 * 349 * @param[in] pci_drv 350 * PCI driver structure (mlx5_driver). 351 * @param[in] pci_dev 352 * PCI device information. 353 * 354 * @return 355 * 0 on success, negative errno value on failure. 356 */ 357 static int 358 mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 359 { 360 struct ibv_device **list; 361 struct ibv_device *ibv_dev; 362 int err = 0; 363 struct ibv_context *attr_ctx = NULL; 364 struct ibv_device_attr device_attr; 365 unsigned int sriov; 366 unsigned int mps; 367 int idx; 368 int i; 369 370 (void)pci_drv; 371 assert(pci_drv == &mlx5_driver.pci_drv); 372 /* Get mlx5_dev[] index. */ 373 idx = mlx5_dev_idx(&pci_dev->addr); 374 if (idx == -1) { 375 ERROR("this driver cannot support any more adapters"); 376 return -ENOMEM; 377 } 378 DEBUG("using driver device index %d", idx); 379 380 /* Save PCI address. */ 381 mlx5_dev[idx].pci_addr = pci_dev->addr; 382 list = ibv_get_device_list(&i); 383 if (list == NULL) { 384 assert(errno); 385 if (errno == ENOSYS) { 386 WARN("cannot list devices, is ib_uverbs loaded?"); 387 return 0; 388 } 389 return -errno; 390 } 391 assert(i >= 0); 392 /* 393 * For each listed device, check related sysfs entry against 394 * the provided PCI ID. 395 */ 396 while (i != 0) { 397 struct rte_pci_addr pci_addr; 398 399 --i; 400 DEBUG("checking device \"%s\"", list[i]->name); 401 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) 402 continue; 403 if ((pci_dev->addr.domain != pci_addr.domain) || 404 (pci_dev->addr.bus != pci_addr.bus) || 405 (pci_dev->addr.devid != pci_addr.devid) || 406 (pci_dev->addr.function != pci_addr.function)) 407 continue; 408 sriov = ((pci_dev->id.device_id == 409 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || 410 (pci_dev->id.device_id == 411 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)); 412 /* Multi-packet send is only supported by ConnectX-4 Lx PF. */ 413 mps = (pci_dev->id.device_id == 414 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX); 415 INFO("PCI information matches, using device \"%s\"" 416 " (SR-IOV: %s, MPS: %s)", 417 list[i]->name, 418 sriov ? "true" : "false", 419 mps ? "true" : "false"); 420 attr_ctx = ibv_open_device(list[i]); 421 err = errno; 422 break; 423 } 424 if (attr_ctx == NULL) { 425 ibv_free_device_list(list); 426 switch (err) { 427 case 0: 428 WARN("cannot access device, is mlx5_ib loaded?"); 429 return 0; 430 case EINVAL: 431 WARN("cannot use device, are drivers up to date?"); 432 return 0; 433 } 434 assert(err > 0); 435 return -err; 436 } 437 ibv_dev = list[i]; 438 439 DEBUG("device opened"); 440 if (ibv_query_device(attr_ctx, &device_attr)) 441 goto error; 442 INFO("%u port(s) detected", device_attr.phys_port_cnt); 443 444 for (i = 0; i < device_attr.phys_port_cnt; i++) { 445 uint32_t port = i + 1; /* ports are indexed from one */ 446 uint32_t test = (1 << i); 447 struct ibv_context *ctx = NULL; 448 struct ibv_port_attr port_attr; 449 struct ibv_pd *pd = NULL; 450 struct priv *priv = NULL; 451 struct rte_eth_dev *eth_dev; 452 struct ibv_exp_device_attr exp_device_attr; 453 struct ether_addr mac; 454 uint16_t num_vfs = 0; 455 456 exp_device_attr.comp_mask = 457 IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS | 458 IBV_EXP_DEVICE_ATTR_RX_HASH | 459 IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS | 460 IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN | 461 0; 462 463 DEBUG("using port %u (%08" PRIx32 ")", port, test); 464 465 ctx = ibv_open_device(ibv_dev); 466 if (ctx == NULL) 467 goto port_error; 468 469 /* Check port status. */ 470 err = ibv_query_port(ctx, port, &port_attr); 471 if (err) { 472 ERROR("port query failed: %s", strerror(err)); 473 goto port_error; 474 } 475 476 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 477 ERROR("port %d is not configured in Ethernet mode", 478 port); 479 goto port_error; 480 } 481 482 if (port_attr.state != IBV_PORT_ACTIVE) 483 DEBUG("port %d is not active: \"%s\" (%d)", 484 port, ibv_port_state_str(port_attr.state), 485 port_attr.state); 486 487 /* Allocate protection domain. */ 488 pd = ibv_alloc_pd(ctx); 489 if (pd == NULL) { 490 ERROR("PD allocation failure"); 491 err = ENOMEM; 492 goto port_error; 493 } 494 495 mlx5_dev[idx].ports |= test; 496 497 /* from rte_ethdev.c */ 498 priv = rte_zmalloc("ethdev private structure", 499 sizeof(*priv), 500 RTE_CACHE_LINE_SIZE); 501 if (priv == NULL) { 502 ERROR("priv allocation failure"); 503 err = ENOMEM; 504 goto port_error; 505 } 506 507 priv->ctx = ctx; 508 priv->device_attr = device_attr; 509 priv->port = port; 510 priv->pd = pd; 511 priv->mtu = ETHER_MTU; 512 priv->mps = mps; /* Enable MPW by default if supported. */ 513 priv->cqe_comp = 1; /* Enable compression by default. */ 514 err = mlx5_args(priv, pci_dev->device.devargs); 515 if (err) { 516 ERROR("failed to process device arguments: %s", 517 strerror(err)); 518 goto port_error; 519 } 520 if (ibv_exp_query_device(ctx, &exp_device_attr)) { 521 ERROR("ibv_exp_query_device() failed"); 522 goto port_error; 523 } 524 525 priv->hw_csum = 526 ((exp_device_attr.exp_device_cap_flags & 527 IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) && 528 (exp_device_attr.exp_device_cap_flags & 529 IBV_EXP_DEVICE_RX_CSUM_IP_PKT)); 530 DEBUG("checksum offloading is %ssupported", 531 (priv->hw_csum ? "" : "not ")); 532 533 priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags & 534 IBV_EXP_DEVICE_VXLAN_SUPPORT); 535 DEBUG("L2 tunnel checksum offloads are %ssupported", 536 (priv->hw_csum_l2tun ? "" : "not ")); 537 538 priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size; 539 /* Remove this check once DPDK supports larger/variable 540 * indirection tables. */ 541 if (priv->ind_table_max_size > (unsigned int)RSS_INDIRECTION_TABLE_SIZE) 542 priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; 543 DEBUG("maximum RX indirection table size is %u", 544 priv->ind_table_max_size); 545 priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap & 546 IBV_EXP_RECEIVE_WQ_CVLAN_STRIP); 547 DEBUG("VLAN stripping is %ssupported", 548 (priv->hw_vlan_strip ? "" : "not ")); 549 550 priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags & 551 IBV_EXP_DEVICE_SCATTER_FCS); 552 DEBUG("FCS stripping configuration is %ssupported", 553 (priv->hw_fcs_strip ? "" : "not ")); 554 555 priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align; 556 DEBUG("hardware RX end alignment padding is %ssupported", 557 (priv->hw_padding ? "" : "not ")); 558 559 priv_get_num_vfs(priv, &num_vfs); 560 priv->sriov = (num_vfs || sriov); 561 if (priv->mps && !mps) { 562 ERROR("multi-packet send not supported on this device" 563 " (" MLX5_TXQ_MPW_EN ")"); 564 err = ENOTSUP; 565 goto port_error; 566 } 567 /* Allocate and register default RSS hash keys. */ 568 priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, 569 sizeof((*priv->rss_conf)[0]), 0); 570 if (priv->rss_conf == NULL) { 571 err = ENOMEM; 572 goto port_error; 573 } 574 err = rss_hash_rss_conf_new_key(priv, 575 rss_hash_default_key, 576 rss_hash_default_key_len, 577 ETH_RSS_PROTO_MASK); 578 if (err) 579 goto port_error; 580 /* Configure the first MAC address by default. */ 581 if (priv_get_mac(priv, &mac.addr_bytes)) { 582 ERROR("cannot get MAC address, is mlx5_en loaded?" 583 " (errno: %s)", strerror(errno)); 584 goto port_error; 585 } 586 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 587 priv->port, 588 mac.addr_bytes[0], mac.addr_bytes[1], 589 mac.addr_bytes[2], mac.addr_bytes[3], 590 mac.addr_bytes[4], mac.addr_bytes[5]); 591 /* Register MAC address. */ 592 claim_zero(priv_mac_addr_add(priv, 0, 593 (const uint8_t (*)[ETHER_ADDR_LEN]) 594 mac.addr_bytes)); 595 /* Initialize FD filters list. */ 596 err = fdir_init_filters_list(priv); 597 if (err) 598 goto port_error; 599 #ifndef NDEBUG 600 { 601 char ifname[IF_NAMESIZE]; 602 603 if (priv_get_ifname(priv, &ifname) == 0) 604 DEBUG("port %u ifname is \"%s\"", 605 priv->port, ifname); 606 else 607 DEBUG("port %u ifname is unknown", priv->port); 608 } 609 #endif 610 /* Get actual MTU if possible. */ 611 priv_get_mtu(priv, &priv->mtu); 612 DEBUG("port %u MTU is %u", priv->port, priv->mtu); 613 614 /* from rte_ethdev.c */ 615 { 616 char name[RTE_ETH_NAME_MAX_LEN]; 617 618 snprintf(name, sizeof(name), "%s port %u", 619 ibv_get_device_name(ibv_dev), port); 620 eth_dev = rte_eth_dev_allocate(name); 621 } 622 if (eth_dev == NULL) { 623 ERROR("can not allocate rte ethdev"); 624 err = ENOMEM; 625 goto port_error; 626 } 627 628 /* Secondary processes have to use local storage for their 629 * private data as well as a copy of eth_dev->data, but this 630 * pointer must not be modified before burst functions are 631 * actually called. */ 632 if (mlx5_is_secondary()) { 633 struct mlx5_secondary_data *sd = 634 &mlx5_secondary_data[eth_dev->data->port_id]; 635 sd->primary_priv = eth_dev->data->dev_private; 636 if (sd->primary_priv == NULL) { 637 ERROR("no private data for port %u", 638 eth_dev->data->port_id); 639 err = EINVAL; 640 goto port_error; 641 } 642 sd->shared_dev_data = eth_dev->data; 643 rte_spinlock_init(&sd->lock); 644 memcpy(sd->data.name, sd->shared_dev_data->name, 645 sizeof(sd->data.name)); 646 sd->data.dev_private = priv; 647 sd->data.rx_mbuf_alloc_failed = 0; 648 sd->data.mtu = ETHER_MTU; 649 sd->data.port_id = sd->shared_dev_data->port_id; 650 sd->data.mac_addrs = priv->mac; 651 eth_dev->tx_pkt_burst = mlx5_tx_burst_secondary_setup; 652 eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup; 653 } else { 654 eth_dev->data->dev_private = priv; 655 eth_dev->data->mac_addrs = priv->mac; 656 } 657 658 eth_dev->pci_dev = pci_dev; 659 rte_eth_copy_pci_info(eth_dev, pci_dev); 660 eth_dev->driver = &mlx5_driver; 661 priv->dev = eth_dev; 662 eth_dev->dev_ops = &mlx5_dev_ops; 663 664 /* Bring Ethernet device up. */ 665 DEBUG("forcing Ethernet interface up"); 666 priv_set_flags(priv, ~IFF_UP, IFF_UP); 667 mlx5_link_update_unlocked(priv->dev, 1); 668 continue; 669 670 port_error: 671 if (priv) { 672 rte_free(priv->rss_conf); 673 rte_free(priv); 674 } 675 if (pd) 676 claim_zero(ibv_dealloc_pd(pd)); 677 if (ctx) 678 claim_zero(ibv_close_device(ctx)); 679 break; 680 } 681 682 /* 683 * XXX if something went wrong in the loop above, there is a resource 684 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as 685 * long as the dpdk does not provide a way to deallocate a ethdev and a 686 * way to enumerate the registered ethdevs to free the previous ones. 687 */ 688 689 /* no port found, complain */ 690 if (!mlx5_dev[idx].ports) { 691 err = ENODEV; 692 goto error; 693 } 694 695 error: 696 if (attr_ctx) 697 claim_zero(ibv_close_device(attr_ctx)); 698 if (list) 699 ibv_free_device_list(list); 700 assert(err >= 0); 701 return -err; 702 } 703 704 static const struct rte_pci_id mlx5_pci_id_map[] = { 705 { 706 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 707 PCI_DEVICE_ID_MELLANOX_CONNECTX4) 708 }, 709 { 710 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 711 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 712 }, 713 { 714 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 715 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 716 }, 717 { 718 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 719 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 720 }, 721 { 722 .vendor_id = 0 723 } 724 }; 725 726 static struct eth_driver mlx5_driver = { 727 .pci_drv = { 728 .driver = { 729 .name = MLX5_DRIVER_NAME 730 }, 731 .id_table = mlx5_pci_id_map, 732 .probe = mlx5_pci_probe, 733 .drv_flags = RTE_PCI_DRV_INTR_LSC, 734 }, 735 .dev_private_size = sizeof(struct priv) 736 }; 737 738 /** 739 * Driver initialization routine. 740 */ 741 RTE_INIT(rte_mlx5_pmd_init); 742 static void 743 rte_mlx5_pmd_init(void) 744 { 745 /* 746 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 747 * huge pages. Calling ibv_fork_init() during init allows 748 * applications to use fork() safely for purposes other than 749 * using this PMD, which is not supported in forked processes. 750 */ 751 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 752 ibv_fork_init(); 753 rte_eal_pci_register(&mlx5_driver.pci_drv); 754 } 755 756 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 757 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 758 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 759