1 /*- 2 * BSD LICENSE 3 * 4 * Copyright 2015 6WIND S.A. 5 * Copyright 2015 Mellanox. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of 6WIND S.A. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stddef.h> 35 #include <unistd.h> 36 #include <string.h> 37 #include <assert.h> 38 #include <stdint.h> 39 #include <stdlib.h> 40 #include <net/if.h> 41 42 /* Verbs header. */ 43 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 44 #ifdef PEDANTIC 45 #pragma GCC diagnostic ignored "-pedantic" 46 #endif 47 #include <infiniband/verbs.h> 48 #ifdef PEDANTIC 49 #pragma GCC diagnostic error "-pedantic" 50 #endif 51 52 /* DPDK headers don't like -pedantic. */ 53 #ifdef PEDANTIC 54 #pragma GCC diagnostic ignored "-pedantic" 55 #endif 56 #include <rte_malloc.h> 57 #include <rte_ethdev.h> 58 #include <rte_pci.h> 59 #include <rte_common.h> 60 #ifdef PEDANTIC 61 #pragma GCC diagnostic error "-pedantic" 62 #endif 63 64 #include "mlx5.h" 65 #include "mlx5_utils.h" 66 #include "mlx5_rxtx.h" 67 #include "mlx5_autoconf.h" 68 69 /** 70 * DPDK callback to close the device. 71 * 72 * Destroy all queues and objects, free memory. 73 * 74 * @param dev 75 * Pointer to Ethernet device structure. 76 */ 77 static void 78 mlx5_dev_close(struct rte_eth_dev *dev) 79 { 80 struct priv *priv = dev->data->dev_private; 81 void *tmp; 82 unsigned int i; 83 84 priv_lock(priv); 85 DEBUG("%p: closing device \"%s\"", 86 (void *)dev, 87 ((priv->ctx != NULL) ? priv->ctx->device->name : "")); 88 /* In case mlx5_dev_stop() has not been called. */ 89 priv_allmulticast_disable(priv); 90 priv_promiscuous_disable(priv); 91 priv_mac_addrs_disable(priv); 92 priv_destroy_hash_rxqs(priv); 93 /* Prevent crashes when queues are still in use. */ 94 dev->rx_pkt_burst = removed_rx_burst; 95 dev->tx_pkt_burst = removed_tx_burst; 96 if (priv->rxqs != NULL) { 97 /* XXX race condition if mlx5_rx_burst() is still running. */ 98 usleep(1000); 99 for (i = 0; (i != priv->rxqs_n); ++i) { 100 tmp = (*priv->rxqs)[i]; 101 if (tmp == NULL) 102 continue; 103 (*priv->rxqs)[i] = NULL; 104 rxq_cleanup(tmp); 105 rte_free(tmp); 106 } 107 priv->rxqs_n = 0; 108 priv->rxqs = NULL; 109 } 110 if (priv->txqs != NULL) { 111 /* XXX race condition if mlx5_tx_burst() is still running. */ 112 usleep(1000); 113 for (i = 0; (i != priv->txqs_n); ++i) { 114 tmp = (*priv->txqs)[i]; 115 if (tmp == NULL) 116 continue; 117 (*priv->txqs)[i] = NULL; 118 txq_cleanup(tmp); 119 rte_free(tmp); 120 } 121 priv->txqs_n = 0; 122 priv->txqs = NULL; 123 } 124 if (priv->pd != NULL) { 125 assert(priv->ctx != NULL); 126 claim_zero(ibv_dealloc_pd(priv->pd)); 127 claim_zero(ibv_close_device(priv->ctx)); 128 } else 129 assert(priv->ctx == NULL); 130 if (priv->rss_conf != NULL) { 131 for (i = 0; (i != hash_rxq_init_n); ++i) 132 rte_free((*priv->rss_conf)[i]); 133 rte_free(priv->rss_conf); 134 } 135 priv_unlock(priv); 136 memset(priv, 0, sizeof(*priv)); 137 } 138 139 static const struct eth_dev_ops mlx5_dev_ops = { 140 .dev_configure = mlx5_dev_configure, 141 .dev_start = mlx5_dev_start, 142 .dev_stop = mlx5_dev_stop, 143 .dev_close = mlx5_dev_close, 144 .promiscuous_enable = mlx5_promiscuous_enable, 145 .promiscuous_disable = mlx5_promiscuous_disable, 146 .allmulticast_enable = mlx5_allmulticast_enable, 147 .allmulticast_disable = mlx5_allmulticast_disable, 148 .link_update = mlx5_link_update, 149 .stats_get = mlx5_stats_get, 150 .stats_reset = mlx5_stats_reset, 151 .dev_infos_get = mlx5_dev_infos_get, 152 .vlan_filter_set = mlx5_vlan_filter_set, 153 .rx_queue_setup = mlx5_rx_queue_setup, 154 .tx_queue_setup = mlx5_tx_queue_setup, 155 .rx_queue_release = mlx5_rx_queue_release, 156 .tx_queue_release = mlx5_tx_queue_release, 157 .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 158 .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 159 .mac_addr_remove = mlx5_mac_addr_remove, 160 .mac_addr_add = mlx5_mac_addr_add, 161 .mtu_set = mlx5_dev_set_mtu, 162 .rss_hash_update = mlx5_rss_hash_update, 163 .rss_hash_conf_get = mlx5_rss_hash_conf_get, 164 }; 165 166 static struct { 167 struct rte_pci_addr pci_addr; /* associated PCI address */ 168 uint32_t ports; /* physical ports bitfield. */ 169 } mlx5_dev[32]; 170 171 /** 172 * Get device index in mlx5_dev[] from PCI bus address. 173 * 174 * @param[in] pci_addr 175 * PCI bus address to look for. 176 * 177 * @return 178 * mlx5_dev[] index on success, -1 on failure. 179 */ 180 static int 181 mlx5_dev_idx(struct rte_pci_addr *pci_addr) 182 { 183 unsigned int i; 184 int ret = -1; 185 186 assert(pci_addr != NULL); 187 for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) { 188 if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) && 189 (mlx5_dev[i].pci_addr.bus == pci_addr->bus) && 190 (mlx5_dev[i].pci_addr.devid == pci_addr->devid) && 191 (mlx5_dev[i].pci_addr.function == pci_addr->function)) 192 return i; 193 if ((mlx5_dev[i].ports == 0) && (ret == -1)) 194 ret = i; 195 } 196 return ret; 197 } 198 199 static struct eth_driver mlx5_driver; 200 201 /** 202 * DPDK callback to register a PCI device. 203 * 204 * This function creates an Ethernet device for each port of a given 205 * PCI device. 206 * 207 * @param[in] pci_drv 208 * PCI driver structure (mlx5_driver). 209 * @param[in] pci_dev 210 * PCI device information. 211 * 212 * @return 213 * 0 on success, negative errno value on failure. 214 */ 215 static int 216 mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) 217 { 218 struct ibv_device **list; 219 struct ibv_device *ibv_dev; 220 int err = 0; 221 struct ibv_context *attr_ctx = NULL; 222 struct ibv_device_attr device_attr; 223 unsigned int vf; 224 int idx; 225 int i; 226 227 (void)pci_drv; 228 assert(pci_drv == &mlx5_driver.pci_drv); 229 /* Get mlx5_dev[] index. */ 230 idx = mlx5_dev_idx(&pci_dev->addr); 231 if (idx == -1) { 232 ERROR("this driver cannot support any more adapters"); 233 return -ENOMEM; 234 } 235 DEBUG("using driver device index %d", idx); 236 237 /* Save PCI address. */ 238 mlx5_dev[idx].pci_addr = pci_dev->addr; 239 list = ibv_get_device_list(&i); 240 if (list == NULL) { 241 assert(errno); 242 if (errno == ENOSYS) { 243 WARN("cannot list devices, is ib_uverbs loaded?"); 244 return 0; 245 } 246 return -errno; 247 } 248 assert(i >= 0); 249 /* 250 * For each listed device, check related sysfs entry against 251 * the provided PCI ID. 252 */ 253 while (i != 0) { 254 struct rte_pci_addr pci_addr; 255 256 --i; 257 DEBUG("checking device \"%s\"", list[i]->name); 258 if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr)) 259 continue; 260 if ((pci_dev->addr.domain != pci_addr.domain) || 261 (pci_dev->addr.bus != pci_addr.bus) || 262 (pci_dev->addr.devid != pci_addr.devid) || 263 (pci_dev->addr.function != pci_addr.function)) 264 continue; 265 vf = ((pci_dev->id.device_id == 266 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || 267 (pci_dev->id.device_id == 268 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)); 269 INFO("PCI information matches, using device \"%s\" (VF: %s)", 270 list[i]->name, (vf ? "true" : "false")); 271 attr_ctx = ibv_open_device(list[i]); 272 err = errno; 273 break; 274 } 275 if (attr_ctx == NULL) { 276 ibv_free_device_list(list); 277 switch (err) { 278 case 0: 279 WARN("cannot access device, is mlx5_ib loaded?"); 280 return 0; 281 case EINVAL: 282 WARN("cannot use device, are drivers up to date?"); 283 return 0; 284 } 285 assert(err > 0); 286 return -err; 287 } 288 ibv_dev = list[i]; 289 290 DEBUG("device opened"); 291 if (ibv_query_device(attr_ctx, &device_attr)) 292 goto error; 293 INFO("%u port(s) detected", device_attr.phys_port_cnt); 294 295 for (i = 0; i < device_attr.phys_port_cnt; i++) { 296 uint32_t port = i + 1; /* ports are indexed from one */ 297 uint32_t test = (1 << i); 298 struct ibv_context *ctx = NULL; 299 struct ibv_port_attr port_attr; 300 struct ibv_pd *pd = NULL; 301 struct priv *priv = NULL; 302 struct rte_eth_dev *eth_dev; 303 #ifdef HAVE_EXP_QUERY_DEVICE 304 struct ibv_exp_device_attr exp_device_attr; 305 #endif /* HAVE_EXP_QUERY_DEVICE */ 306 struct ether_addr mac; 307 308 #ifdef HAVE_EXP_QUERY_DEVICE 309 exp_device_attr.comp_mask = 310 IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS | 311 IBV_EXP_DEVICE_ATTR_RX_HASH; 312 #endif /* HAVE_EXP_QUERY_DEVICE */ 313 314 DEBUG("using port %u (%08" PRIx32 ")", port, test); 315 316 ctx = ibv_open_device(ibv_dev); 317 if (ctx == NULL) 318 goto port_error; 319 320 /* Check port status. */ 321 err = ibv_query_port(ctx, port, &port_attr); 322 if (err) { 323 ERROR("port query failed: %s", strerror(err)); 324 goto port_error; 325 } 326 if (port_attr.state != IBV_PORT_ACTIVE) 327 DEBUG("port %d is not active: \"%s\" (%d)", 328 port, ibv_port_state_str(port_attr.state), 329 port_attr.state); 330 331 /* Allocate protection domain. */ 332 pd = ibv_alloc_pd(ctx); 333 if (pd == NULL) { 334 ERROR("PD allocation failure"); 335 err = ENOMEM; 336 goto port_error; 337 } 338 339 mlx5_dev[idx].ports |= test; 340 341 /* from rte_ethdev.c */ 342 priv = rte_zmalloc("ethdev private structure", 343 sizeof(*priv), 344 RTE_CACHE_LINE_SIZE); 345 if (priv == NULL) { 346 ERROR("priv allocation failure"); 347 err = ENOMEM; 348 goto port_error; 349 } 350 351 priv->ctx = ctx; 352 priv->device_attr = device_attr; 353 priv->port = port; 354 priv->pd = pd; 355 priv->mtu = ETHER_MTU; 356 #ifdef HAVE_EXP_QUERY_DEVICE 357 if (ibv_exp_query_device(ctx, &exp_device_attr)) { 358 ERROR("ibv_exp_query_device() failed"); 359 goto port_error; 360 } 361 362 priv->hw_csum = 363 ((exp_device_attr.exp_device_cap_flags & 364 IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) && 365 (exp_device_attr.exp_device_cap_flags & 366 IBV_EXP_DEVICE_RX_CSUM_IP_PKT)); 367 DEBUG("checksum offloading is %ssupported", 368 (priv->hw_csum ? "" : "not ")); 369 370 priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags & 371 IBV_EXP_DEVICE_VXLAN_SUPPORT); 372 DEBUG("L2 tunnel checksum offloads are %ssupported", 373 (priv->hw_csum_l2tun ? "" : "not ")); 374 375 priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size; 376 DEBUG("maximum RX indirection table size is %u", 377 priv->ind_table_max_size); 378 379 #else /* HAVE_EXP_QUERY_DEVICE */ 380 priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; 381 #endif /* HAVE_EXP_QUERY_DEVICE */ 382 383 priv->vf = vf; 384 /* Allocate and register default RSS hash keys. */ 385 priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, 386 sizeof((*priv->rss_conf)[0]), 0); 387 if (priv->rss_conf == NULL) { 388 err = ENOMEM; 389 goto port_error; 390 } 391 err = rss_hash_rss_conf_new_key(priv, 392 rss_hash_default_key, 393 rss_hash_default_key_len, 394 ETH_RSS_PROTO_MASK); 395 if (err) 396 goto port_error; 397 /* Configure the first MAC address by default. */ 398 if (priv_get_mac(priv, &mac.addr_bytes)) { 399 ERROR("cannot get MAC address, is mlx5_en loaded?" 400 " (errno: %s)", strerror(errno)); 401 goto port_error; 402 } 403 INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 404 priv->port, 405 mac.addr_bytes[0], mac.addr_bytes[1], 406 mac.addr_bytes[2], mac.addr_bytes[3], 407 mac.addr_bytes[4], mac.addr_bytes[5]); 408 /* Register MAC and broadcast addresses. */ 409 claim_zero(priv_mac_addr_add(priv, 0, 410 (const uint8_t (*)[ETHER_ADDR_LEN]) 411 mac.addr_bytes)); 412 claim_zero(priv_mac_addr_add(priv, (RTE_DIM(priv->mac) - 1), 413 &(const uint8_t [ETHER_ADDR_LEN]) 414 { "\xff\xff\xff\xff\xff\xff" })); 415 #ifndef NDEBUG 416 { 417 char ifname[IF_NAMESIZE]; 418 419 if (priv_get_ifname(priv, &ifname) == 0) 420 DEBUG("port %u ifname is \"%s\"", 421 priv->port, ifname); 422 else 423 DEBUG("port %u ifname is unknown", priv->port); 424 } 425 #endif 426 /* Get actual MTU if possible. */ 427 priv_get_mtu(priv, &priv->mtu); 428 DEBUG("port %u MTU is %u", priv->port, priv->mtu); 429 430 /* from rte_ethdev.c */ 431 { 432 char name[RTE_ETH_NAME_MAX_LEN]; 433 434 snprintf(name, sizeof(name), "%s port %u", 435 ibv_get_device_name(ibv_dev), port); 436 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI); 437 } 438 if (eth_dev == NULL) { 439 ERROR("can not allocate rte ethdev"); 440 err = ENOMEM; 441 goto port_error; 442 } 443 444 eth_dev->data->dev_private = priv; 445 eth_dev->pci_dev = pci_dev; 446 eth_dev->driver = &mlx5_driver; 447 eth_dev->data->rx_mbuf_alloc_failed = 0; 448 eth_dev->data->mtu = ETHER_MTU; 449 450 priv->dev = eth_dev; 451 eth_dev->dev_ops = &mlx5_dev_ops; 452 eth_dev->data->mac_addrs = priv->mac; 453 454 /* Bring Ethernet device up. */ 455 DEBUG("forcing Ethernet interface up"); 456 priv_set_flags(priv, ~IFF_UP, IFF_UP); 457 continue; 458 459 port_error: 460 rte_free(priv->rss_conf); 461 rte_free(priv); 462 if (pd) 463 claim_zero(ibv_dealloc_pd(pd)); 464 if (ctx) 465 claim_zero(ibv_close_device(ctx)); 466 break; 467 } 468 469 /* 470 * XXX if something went wrong in the loop above, there is a resource 471 * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as 472 * long as the dpdk does not provide a way to deallocate a ethdev and a 473 * way to enumerate the registered ethdevs to free the previous ones. 474 */ 475 476 /* no port found, complain */ 477 if (!mlx5_dev[idx].ports) { 478 err = ENODEV; 479 goto error; 480 } 481 482 error: 483 if (attr_ctx) 484 claim_zero(ibv_close_device(attr_ctx)); 485 if (list) 486 ibv_free_device_list(list); 487 assert(err >= 0); 488 return -err; 489 } 490 491 static const struct rte_pci_id mlx5_pci_id_map[] = { 492 { 493 .vendor_id = PCI_VENDOR_ID_MELLANOX, 494 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4, 495 .subsystem_vendor_id = PCI_ANY_ID, 496 .subsystem_device_id = PCI_ANY_ID 497 }, 498 { 499 .vendor_id = PCI_VENDOR_ID_MELLANOX, 500 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4VF, 501 .subsystem_vendor_id = PCI_ANY_ID, 502 .subsystem_device_id = PCI_ANY_ID 503 }, 504 { 505 .vendor_id = PCI_VENDOR_ID_MELLANOX, 506 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LX, 507 .subsystem_vendor_id = PCI_ANY_ID, 508 .subsystem_device_id = PCI_ANY_ID 509 }, 510 { 511 .vendor_id = PCI_VENDOR_ID_MELLANOX, 512 .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF, 513 .subsystem_vendor_id = PCI_ANY_ID, 514 .subsystem_device_id = PCI_ANY_ID 515 }, 516 { 517 .vendor_id = 0 518 } 519 }; 520 521 static struct eth_driver mlx5_driver = { 522 .pci_drv = { 523 .name = MLX5_DRIVER_NAME, 524 .id_table = mlx5_pci_id_map, 525 .devinit = mlx5_pci_devinit, 526 }, 527 .dev_private_size = sizeof(struct priv) 528 }; 529 530 /** 531 * Driver initialization routine. 532 */ 533 static int 534 rte_mlx5_pmd_init(const char *name, const char *args) 535 { 536 (void)name; 537 (void)args; 538 /* 539 * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use 540 * huge pages. Calling ibv_fork_init() during init allows 541 * applications to use fork() safely for purposes other than 542 * using this PMD, which is not supported in forked processes. 543 */ 544 setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); 545 ibv_fork_init(); 546 rte_eal_pci_register(&mlx5_driver.pci_drv); 547 return 0; 548 } 549 550 static struct rte_driver rte_mlx5_driver = { 551 .type = PMD_PDEV, 552 .name = MLX5_DRIVER_NAME, 553 .init = rte_mlx5_pmd_init, 554 }; 555 556 PMD_REGISTER_DRIVER(rte_mlx5_driver) 557