1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stddef.h> 35 #include <unistd.h> 36 #include <string.h> 37 #include <assert.h> 38 #include <stdint.h> 39 #include <stdlib.h> 40 #include <net/if.h> 41 42 /* Verbs header. */ 43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 44 #ifdef PEDANTIC 45 #pragma GCC diagnostic ignored "-pedantic" 46 #endif 47 #include <infiniband/verbs.h> 48 #ifdef PEDANTIC 49 #pragma GCC diagnostic error "-pedantic" 50 #endif 51 52 /* DPDK headers don't like -pedantic. */ 53 #ifdef PEDANTIC 54 #pragma GCC diagnostic ignored "-pedantic" 55 #endif 56 #include <rte_malloc.h> 57 #include <rte_ethdev.h> 58 #include <rte_pci.h> 59 #include <rte_common.h> 60 #ifdef PEDANTIC 61 #pragma GCC diagnostic error "-pedantic" 62 #endif 63 64 #include "mlx5.h" 65 #include "mlx5_utils.h" 66 #include "mlx5_rxtx.h" 67 #include "mlx5_autoconf.h" 68 #include "mlx5_defs.h" 69 70 /** 71 * Retrieve integer value from environment variable. 72 * 73 * @param[in] name 74 * Environment variable name. 75 * 76 * @return 77 * Integer value, 0 if the variable is not set. 78 */ 79 int 80 mlx5_getenv_int(const char *name) 81 { 82 const char *val = getenv(name); 83 84 if (val == NULL) 85 return 0; 86 return atoi(val); 87 } 88 89 /** 90 * DPDK callback to close the device. 91 * 92 * Destroy all queues and objects, free memory. 93 * 94 * @param dev 95 * Pointer to Ethernet device structure. 96 */ 97 static void 98 mlx5_dev_close(struct rte_eth_dev *dev) 99 { 100 struct priv *priv = mlx5_get_priv(dev); 101 void *tmp; 102 unsigned int i; 103 104 priv_lock(priv); 105 DEBUG("%p: closing device \"%s\"", 106 (void *)dev, 107 ((priv->ctx != NULL) ? priv->ctx->device->name : "")); 108 /* In case mlx5_dev_stop() has not been called. */ 109 priv_dev_interrupt_handler_uninstall(priv, dev); 110 priv_special_flow_disable_all(priv); 111 priv_mac_addrs_disable(priv); 112 priv_destroy_hash_rxqs(priv); 113 114 /* Remove flow director elements. */ 115 priv_fdir_disable(priv); 116 priv_fdir_delete_filters_list(priv); 117 118 /* Prevent crashes when queues are still in use. */ 119 dev->rx_pkt_burst = removed_rx_burst; 120 dev->tx_pkt_burst = removed_tx_burst; 121 if (priv->rxqs != NULL) { 122 /* XXX race condition if mlx5_rx_burst() is still running. */ 123 usleep(1000); 124 for (i = 0; (i != priv->rxqs_n); ++i) { 125 tmp = (*priv->rxqs)[i]; 126 if (tmp == NULL) 127 continue; 128 (*priv->rxqs)[i] = NULL; 129 rxq_cleanup(tmp); 130 rte_free(tmp); 131 } 132 priv->rxqs_n = 0; 133 priv->rxqs = NULL; 134 } 135 if (priv->txqs != NULL) { 136 /* XXX race condition if mlx5_tx_burst() is still running. */ 137 usleep(1000); 138 for (i = 0; (i != priv->txqs_n); ++i) { 139 tmp = (*priv->txqs)[i]; 140 if (tmp == NULL) 141 continue; 142 (*priv->txqs)[i] = NULL; 143 txq_cleanup(tmp); 144 rte_free(tmp); 145 } 146 priv->txqs_n = 0; 147 priv->txqs = NULL; 148 } 149 if (priv->pd != NULL) { 150 assert(priv->ctx != NULL); 151 claim_zero(ibv_dealloc_pd(priv->pd)); 152 claim_zero(ibv_close_device(priv->ctx)); 153 } else 154 assert(priv->ctx == NULL); 155 if (priv->rss_conf != NULL) { 156 for (i = 0; (i != hash_rxq_init_n); ++i) 157 rte_free((*priv->rss_conf)[i]); 158 rte_free(priv->rss_conf); 159 } 160 if (priv->reta_idx != NULL) 161 rte_free(priv->reta_idx); 162 priv_unlock(priv); 163 memset(priv, 0, sizeof(*priv)); 164 } 165 166 static const struct eth_dev_ops mlx5_dev_ops = { 167 .dev_configure = mlx5_dev_configure, 168 .dev_start = mlx5_dev_start, 169 .dev_stop = mlx5_dev_stop, 170 .dev_set_link_down = mlx5_set_link_down, 171 .dev_set_link_up = mlx5_set_link_up, 172 .dev_close = mlx5_dev_close, 173 .promiscuous_enable = mlx5_promiscuous_enable, 174 .promiscuous_disable = mlx5_promiscuous_disable, 175 .allmulticast_enable = mlx5_allmulticast_enable, 176 .allmulticast_disable = mlx5_allmulticast_disable, 177 .link_update = mlx5_link_update, 178 .stats_get = mlx5_stats_get, 179 .stats_reset = mlx5_stats_reset, 180 .dev_infos_get = mlx5_dev_infos_get, 181 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 182 .vlan_filter_set = mlx5_vlan_filter_set, 183 .rx_queue_setup = mlx5_rx_queue_setup, 184 .tx_queue_setup = mlx5_tx_queue_setup, 185 .rx_queue_release = mlx5_rx_queue_release, 186 .tx_queue_release = mlx5_tx_queue_release, 187 .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 188 .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 189 .mac_addr_remove = mlx5_mac_addr_remove, 190 .mac_addr_add = mlx5_mac_addr_add, 191 .mac_addr_set = mlx5_mac_addr_set, 192 .mtu_set = mlx5_dev_set_mtu, 193 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS 194 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 195 .vlan_offload_set = mlx5_vlan_offload_set, 196 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ 197 .reta_update = mlx5_dev_rss_reta_update, 198 .reta_query = mlx5_dev_rss_reta_query, 199 .rss_hash_update = mlx5_rss_hash_update, 200 .rss_hash_conf_get = mlx5_rss_hash_conf_get, 201 #ifdef MLX5_FDIR_SUPPORT 202 .filter_ctrl = mlx5_dev_filter_ctrl, 203 #endif /* MLX5_FDIR_SUPPORT */ 204 }; 205 206 static struct { 207 struct rte_pci_addr pci_addr; /* associated PCI address */ 208 uint32_t ports; /* physical ports bitfield. */ 209 } mlx5_dev[32]; 210 211 /** 212 * Get device index in mlx5_dev[] from PCI bus address. 213 * 214 * @param[in] pci_addr 215 * PCI bus address to look for. 216 * 217 * @return 218 * mlx5_dev[] index on success, -1 on failure. 219 */ 220 static int 221 mlx5_dev_idx(struct rte_pci_addr *pci_addr) 222 { 223 unsigned int i; 224 int ret = -1; 225 226 assert(pci_addr != NULL); 227 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) { 228 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) && 229 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) && 230 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) && 231 (mlx5_dev[i].pci_addr.function == pci_addr->function)) 232 return i; 233 if ((mlx5_dev[i].ports == 0) && (ret == -1)) 234 ret = i; 235 } 236 return ret; 237 } 238 239 static struct eth_driver mlx5_driver; 240 241 /** 242 * DPDK callback to register a PCI device. 243 * 244 * This function creates an Ethernet device for each port of a given 245 * PCI device. 246 * 247 * @param[in] pci_drv 248 * PCI driver structure (mlx5_driver). 249 * @param[in] pci_dev 250 * PCI device information. 251 * 252 * @return 253 * 0 on success, negative errno value on failure. 254 */ 255 static int 256 mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 257 { 258 struct ibv_device **list; 259 struct ibv_device *ibv_dev; 260 int err = 0; 261 struct ibv_context *attr_ctx = NULL; 262 struct ibv_device_attr device_attr; 263 unsigned int vf; 264 unsigned int mps; 265 int idx; 266 int i; 267 268 (void)pci_drv; 269 assert(pci_drv == &mlx5_driver.pci_drv); 270 /* Get mlx5_dev[] index. */ 271 idx = mlx5_dev_idx(&pci_dev->addr); 272 if (idx == -1) { 273 ERROR("this driver cannot support any more adapters"); 274 return -ENOMEM; 275 } 276 DEBUG("using driver device index %d", idx); 277 278 /* Save PCI address. */ 279 mlx5_dev[idx].pci_addr = pci_dev->addr; 280 list = ibv_get_device_list(&i); 281 if (list == NULL) { 282 assert(errno); 283 if (errno == ENOSYS) { 284 WARN("cannot list devices, is ib_uverbs loaded?"); 285 return 0; 286 } 287 return -errno; 288 } 289 assert(i >= 0); 290 /* 291 * For each listed device, check related sysfs entry against 292 * the provided PCI ID. 293 */ 294 while (i != 0) { 295 struct rte_pci_addr pci_addr; 296 297 --i; 298 DEBUG("checking device \"%s\"", list[i]->name); 299 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) 300 continue; 301 if ((pci_dev->addr.domain != pci_addr.domain) || 302 (pci_dev->addr.bus != pci_addr.bus) || 303 (pci_dev->addr.devid != pci_addr.devid) || 304 (pci_dev->addr.function != pci_addr.function)) 305 continue; 306 vf = ((pci_dev->id.device_id == 307 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || 308 (pci_dev->id.device_id == 309 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)); 310 /* Multi-packet send is only supported by ConnectX-4 Lx PF. */ 311 mps = (pci_dev->id.device_id == 312 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX); 313 INFO("PCI information matches, using device \"%s\" (VF: %s," 314 " MPS: %s)", 315 list[i]->name, 316 vf ? "true" : "false", 317 mps ? "true" : "false"); 318 attr_ctx = ibv_open_device(list[i]); 319 err = errno; 320 break; 321 } 322 if (attr_ctx == NULL) { 323 ibv_free_device_list(list); 324 switch (err) { 325 case 0: 326 WARN("cannot access device, is mlx5_ib loaded?"); 327 return 0; 328 case EINVAL: 329 WARN("cannot use device, are drivers up to date?"); 330 return 0; 331 } 332 assert(err > 0); 333 return -err; 334 } 335 ibv_dev = list[i]; 336 337 DEBUG("device opened"); 338 if (ibv_query_device(attr_ctx, &device_attr)) 339 goto error; 340 INFO("%u port(s) detected", device_attr.phys_port_cnt); 341 342 for (i = 0; i < device_attr.phys_port_cnt; i++) { 343 uint32_t port = i + 1; /* ports are indexed from one */ 344 uint32_t test = (1 << i); 345 struct ibv_context *ctx = NULL; 346 struct ibv_port_attr port_attr; 347 struct ibv_pd *pd = NULL; 348 struct priv *priv = NULL; 349 struct rte_eth_dev *eth_dev; 350 #ifdef HAVE_EXP_QUERY_DEVICE 351 struct ibv_exp_device_attr exp_device_attr; 352 #endif /* HAVE_EXP_QUERY_DEVICE */ 353 struct ether_addr mac; 354 355 #ifdef HAVE_EXP_QUERY_DEVICE 356 exp_device_attr.comp_mask = 357 IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS | 358 IBV_EXP_DEVICE_ATTR_RX_HASH | 359 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS 360 IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS | 361 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ 362 #ifdef HAVE_VERBS_RX_END_PADDING 363 IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN | 364 #endif /* HAVE_VERBS_RX_END_PADDING */ 365 0; 366 #endif /* HAVE_EXP_QUERY_DEVICE */ 367 368 DEBUG("using port %u (%08" PRIx32 ")", port, test); 369 370 ctx = ibv_open_device(ibv_dev); 371 if (ctx == NULL) 372 goto port_error; 373 374 /* Check port status. */ 375 err = ibv_query_port(ctx, port, &port_attr); 376 if (err) { 377 ERROR("port query failed: %s", strerror(err)); 378 goto port_error; 379 } 380 381 if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) { 382 ERROR("port %d is not configured in Ethernet mode", 383 port); 384 goto port_error; 385 } 386 387 if (port_attr.state != IBV_PORT_ACTIVE) 388 DEBUG("port %d is not active: \"%s\" (%d)", 389 port, ibv_port_state_str(port_attr.state), 390 port_attr.state); 391 392 /* Allocate protection domain. */ 393 pd = ibv_alloc_pd(ctx); 394 if (pd == NULL) { 395 ERROR("PD allocation failure"); 396 err = ENOMEM; 397 goto port_error; 398 } 399 400 mlx5_dev[idx].ports |= test; 401 402 /* from rte_ethdev.c */ 403 priv = rte_zmalloc("ethdev private structure", 404 sizeof(*priv), 405 RTE_CACHE_LINE_SIZE); 406 if (priv == NULL) { 407 ERROR("priv allocation failure"); 408 err = ENOMEM; 409 goto port_error; 410 } 411 412 priv->ctx = ctx; 413 priv->device_attr = device_attr; 414 priv->port = port; 415 priv->pd = pd; 416 priv->mtu = ETHER_MTU; 417 #ifdef HAVE_EXP_QUERY_DEVICE 418 if (ibv_exp_query_device(ctx, &exp_device_attr)) { 419 ERROR("ibv_exp_query_device() failed"); 420 goto port_error; 421 } 422 423 priv->hw_csum = 424 ((exp_device_attr.exp_device_cap_flags & 425 IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) && 426 (exp_device_attr.exp_device_cap_flags & 427 IBV_EXP_DEVICE_RX_CSUM_IP_PKT)); 428 DEBUG("checksum offloading is %ssupported", 429 (priv->hw_csum ? "" : "not ")); 430 431 priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags & 432 IBV_EXP_DEVICE_VXLAN_SUPPORT); 433 DEBUG("L2 tunnel checksum offloads are %ssupported", 434 (priv->hw_csum_l2tun ? "" : "not ")); 435 436 priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size; 437 /* Remove this check once DPDK supports larger/variable 438 * indirection tables. */ 439 if (priv->ind_table_max_size > (unsigned int)RSS_INDIRECTION_TABLE_SIZE) 440 priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; 441 DEBUG("maximum RX indirection table size is %u", 442 priv->ind_table_max_size); 443 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS 444 priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap & 445 IBV_EXP_RECEIVE_WQ_CVLAN_STRIP); 446 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ 447 DEBUG("VLAN stripping is %ssupported", 448 (priv->hw_vlan_strip ? "" : "not ")); 449 450 #ifdef HAVE_VERBS_FCS 451 priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags & 452 IBV_EXP_DEVICE_SCATTER_FCS); 453 #endif /* HAVE_VERBS_FCS */ 454 DEBUG("FCS stripping configuration is %ssupported", 455 (priv->hw_fcs_strip ? "" : "not ")); 456 457 #ifdef HAVE_VERBS_RX_END_PADDING 458 priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align; 459 #endif /* HAVE_VERBS_RX_END_PADDING */ 460 DEBUG("hardware RX end alignment padding is %ssupported", 461 (priv->hw_padding ? "" : "not ")); 462 463 #else /* HAVE_EXP_QUERY_DEVICE */ 464 priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; 465 #endif /* HAVE_EXP_QUERY_DEVICE */ 466 467 priv->vf = vf; 468 priv->mps = mps; 469 /* Allocate and register default RSS hash keys. */ 470 priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, 471 sizeof((*priv->rss_conf)[0]), 0); 472 if (priv->rss_conf == NULL) { 473 err = ENOMEM; 474 goto port_error; 475 } 476 err = rss_hash_rss_conf_new_key(priv, 477 rss_hash_default_key, 478 rss_hash_default_key_len, 479 ETH_RSS_PROTO_MASK); 480 if (err) 481 goto port_error; 482 /* Configure the first MAC address by default. */ 483 if (priv_get_mac(priv, &mac.addr_bytes)) { 484 ERROR("cannot get MAC address, is mlx5_en loaded?" 485 " (errno: %s)", strerror(errno)); 486 goto port_error; 487 } 488 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 489 priv->port, 490 mac.addr_bytes[0], mac.addr_bytes[1], 491 mac.addr_bytes[2], mac.addr_bytes[3], 492 mac.addr_bytes[4], mac.addr_bytes[5]); 493 /* Register MAC address. */ 494 claim_zero(priv_mac_addr_add(priv, 0, 495 (const uint8_t (*)[ETHER_ADDR_LEN]) 496 mac.addr_bytes)); 497 /* Initialize FD filters list. */ 498 err = fdir_init_filters_list(priv); 499 if (err) 500 goto port_error; 501 #ifndef NDEBUG 502 { 503 char ifname[IF_NAMESIZE]; 504 505 if (priv_get_ifname(priv, &ifname) == 0) 506 DEBUG("port %u ifname is \"%s\"", 507 priv->port, ifname); 508 else 509 DEBUG("port %u ifname is unknown", priv->port); 510 } 511 #endif 512 /* Get actual MTU if possible. */ 513 priv_get_mtu(priv, &priv->mtu); 514 DEBUG("port %u MTU is %u", priv->port, priv->mtu); 515 516 /* from rte_ethdev.c */ 517 { 518 char name[RTE_ETH_NAME_MAX_LEN]; 519 520 snprintf(name, sizeof(name), "%s port %u", 521 ibv_get_device_name(ibv_dev), port); 522 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI); 523 } 524 if (eth_dev == NULL) { 525 ERROR("can not allocate rte ethdev"); 526 err = ENOMEM; 527 goto port_error; 528 } 529 530 /* Secondary processes have to use local storage for their 531 * private data as well as a copy of eth_dev->data, but this 532 * pointer must not be modified before burst functions are 533 * actually called. */ 534 if (mlx5_is_secondary()) { 535 struct mlx5_secondary_data *sd = 536 &mlx5_secondary_data[eth_dev->data->port_id]; 537 sd->primary_priv = eth_dev->data->dev_private; 538 if (sd->primary_priv == NULL) { 539 ERROR("no private data for port %u", 540 eth_dev->data->port_id); 541 err = EINVAL; 542 goto port_error; 543 } 544 sd->shared_dev_data = eth_dev->data; 545 rte_spinlock_init(&sd->lock); 546 memcpy(sd->data.name, sd->shared_dev_data->name, 547 sizeof(sd->data.name)); 548 sd->data.dev_private = priv; 549 sd->data.rx_mbuf_alloc_failed = 0; 550 sd->data.mtu = ETHER_MTU; 551 sd->data.port_id = sd->shared_dev_data->port_id; 552 sd->data.mac_addrs = priv->mac; 553 eth_dev->tx_pkt_burst = mlx5_tx_burst_secondary_setup; 554 eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup; 555 } else { 556 eth_dev->data->dev_private = priv; 557 eth_dev->data->rx_mbuf_alloc_failed = 0; 558 eth_dev->data->mtu = ETHER_MTU; 559 eth_dev->data->mac_addrs = priv->mac; 560 } 561 562 eth_dev->pci_dev = pci_dev; 563 rte_eth_copy_pci_info(eth_dev, pci_dev); 564 eth_dev->driver = &mlx5_driver; 565 priv->dev = eth_dev; 566 eth_dev->dev_ops = &mlx5_dev_ops; 567 568 TAILQ_INIT(ð_dev->link_intr_cbs); 569 570 /* Bring Ethernet device up. */ 571 DEBUG("forcing Ethernet interface up"); 572 priv_set_flags(priv, ~IFF_UP, IFF_UP); 573 continue; 574 575 port_error: 576 if (priv) { 577 rte_free(priv->rss_conf); 578 rte_free(priv); 579 } 580 if (pd) 581 claim_zero(ibv_dealloc_pd(pd)); 582 if (ctx) 583 claim_zero(ibv_close_device(ctx)); 584 break; 585 } 586 587 /* 588 * XXX if something went wrong in the loop above, there is a resource 589 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as 590 * long as the dpdk does not provide a way to deallocate a ethdev and a 591 * way to enumerate the registered ethdevs to free the previous ones. 592 */ 593 594 /* no port found, complain */ 595 if (!mlx5_dev[idx].ports) { 596 err = ENODEV; 597 goto error; 598 } 599 600 error: 601 if (attr_ctx) 602 claim_zero(ibv_close_device(attr_ctx)); 603 if (list) 604 ibv_free_device_list(list); 605 assert(err >= 0); 606 return -err; 607 } 608 609 static const struct rte_pci_id mlx5_pci_id_map[] = { 610 { 611 .vendor_id = PCI_VENDOR_ID_MELLANOX, 612 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4, 613 .subsystem_vendor_id = PCI_ANY_ID, 614 .subsystem_device_id = PCI_ANY_ID 615 }, 616 { 617 .vendor_id = PCI_VENDOR_ID_MELLANOX, 618 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4VF, 619 .subsystem_vendor_id = PCI_ANY_ID, 620 .subsystem_device_id = PCI_ANY_ID 621 }, 622 { 623 .vendor_id = PCI_VENDOR_ID_MELLANOX, 624 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LX, 625 .subsystem_vendor_id = PCI_ANY_ID, 626 .subsystem_device_id = PCI_ANY_ID 627 }, 628 { 629 .vendor_id = PCI_VENDOR_ID_MELLANOX, 630 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF, 631 .subsystem_vendor_id = PCI_ANY_ID, 632 .subsystem_device_id = PCI_ANY_ID 633 }, 634 { 635 .vendor_id = 0 636 } 637 }; 638 639 static struct eth_driver mlx5_driver = { 640 .pci_drv = { 641 .name = MLX5_DRIVER_NAME, 642 .id_table = mlx5_pci_id_map, 643 .devinit = mlx5_pci_devinit, 644 .drv_flags = RTE_PCI_DRV_INTR_LSC, 645 }, 646 .dev_private_size = sizeof(struct priv) 647 }; 648 649 /** 650 * Driver initialization routine. 651 */ 652 static int 653 rte_mlx5_pmd_init(const char *name, const char *args) 654 { 655 (void)name; 656 (void)args; 657 /* 658 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 659 * huge pages. Calling ibv_fork_init() during init allows 660 * applications to use fork() safely for purposes other than 661 * using this PMD, which is not supported in forked processes. 662 */ 663 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 664 ibv_fork_init(); 665 rte_eal_pci_register(&mlx5_driver.pci_drv); 666 return 0; 667 } 668 669 static struct rte_driver rte_mlx5_driver = { 670 .type = PMD_PDEV, 671 .name = MLX5_DRIVER_NAME, 672 .init = rte_mlx5_pmd_init, 673 }; 674 675 PMD_REGISTER_DRIVER(rte_mlx5_driver) 676