1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox. 4 */ 5 6 #define _GNU_SOURCE 7 8 #include <stddef.h> 9 #include <assert.h> 10 #include <inttypes.h> 11 #include <unistd.h> 12 #include <stdint.h> 13 #include <stdio.h> 14 #include <string.h> 15 #include <stdlib.h> 16 #include <errno.h> 17 #include <dirent.h> 18 #include <net/if.h> 19 #include <sys/ioctl.h> 20 #include <sys/socket.h> 21 #include <sys/utsname.h> 22 #include <netinet/in.h> 23 #include <linux/ethtool.h> 24 #include <linux/sockios.h> 25 #include <linux/version.h> 26 #include <fcntl.h> 27 #include <stdalign.h> 28 #include <sys/un.h> 29 30 #include <rte_atomic.h> 31 #include <rte_ethdev_driver.h> 32 #include <rte_bus_pci.h> 33 #include <rte_mbuf.h> 34 #include <rte_common.h> 35 #include <rte_interrupts.h> 36 #include <rte_alarm.h> 37 #include <rte_malloc.h> 38 39 #include "mlx5.h" 40 #include "mlx5_glue.h" 41 #include "mlx5_rxtx.h" 42 #include "mlx5_utils.h" 43 44 /* Add defines in case the running kernel is not the same as user headers. */ 45 #ifndef ETHTOOL_GLINKSETTINGS 46 struct ethtool_link_settings { 47 uint32_t cmd; 48 uint32_t speed; 49 uint8_t duplex; 50 uint8_t port; 51 uint8_t phy_address; 52 uint8_t autoneg; 53 uint8_t mdio_support; 54 uint8_t eth_to_mdix; 55 uint8_t eth_tp_mdix_ctrl; 56 int8_t link_mode_masks_nwords; 57 uint32_t reserved[8]; 58 uint32_t link_mode_masks[]; 59 }; 60 61 #define ETHTOOL_GLINKSETTINGS 0x0000004c 62 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 63 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 64 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 65 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 66 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 67 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 68 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 69 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 70 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 71 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 72 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 73 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 74 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 75 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 76 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 77 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 78 #endif 79 #ifndef HAVE_ETHTOOL_LINK_MODE_25G 80 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 81 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 82 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 83 #endif 84 #ifndef HAVE_ETHTOOL_LINK_MODE_50G 85 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 86 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 87 #endif 88 #ifndef HAVE_ETHTOOL_LINK_MODE_100G 89 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 90 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 91 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 92 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 93 #endif 94 95 /** 96 * Get interface name from private structure. 97 * 98 * @param[in] dev 99 * Pointer to Ethernet device. 100 * @param[out] ifname 101 * Interface name output buffer. 102 * 103 * @return 104 * 0 on success, a negative errno value otherwise and rte_errno is set. 105 */ 106 int 107 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) 108 { 109 struct priv *priv = dev->data->dev_private; 110 DIR *dir; 111 struct dirent *dent; 112 unsigned int dev_type = 0; 113 unsigned int dev_port_prev = ~0u; 114 char match[IF_NAMESIZE] = ""; 115 116 { 117 MKSTR(path, "%s/device/net", priv->ibdev_path); 118 119 dir = opendir(path); 120 if (dir == NULL) { 121 rte_errno = errno; 122 return -rte_errno; 123 } 124 } 125 while ((dent = readdir(dir)) != NULL) { 126 char *name = dent->d_name; 127 FILE *file; 128 unsigned int dev_port; 129 int r; 130 131 if ((name[0] == '.') && 132 ((name[1] == '\0') || 133 ((name[1] == '.') && (name[2] == '\0')))) 134 continue; 135 136 MKSTR(path, "%s/device/net/%s/%s", 137 priv->ibdev_path, name, 138 (dev_type ? "dev_id" : "dev_port")); 139 140 file = fopen(path, "rb"); 141 if (file == NULL) { 142 if (errno != ENOENT) 143 continue; 144 /* 145 * Switch to dev_id when dev_port does not exist as 146 * is the case with Linux kernel versions < 3.15. 147 */ 148 try_dev_id: 149 match[0] = '\0'; 150 if (dev_type) 151 break; 152 dev_type = 1; 153 dev_port_prev = ~0u; 154 rewinddir(dir); 155 continue; 156 } 157 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); 158 fclose(file); 159 if (r != 1) 160 continue; 161 /* 162 * Switch to dev_id when dev_port returns the same value for 163 * all ports. May happen when using a MOFED release older than 164 * 3.0 with a Linux kernel >= 3.15. 165 */ 166 if (dev_port == dev_port_prev) 167 goto try_dev_id; 168 dev_port_prev = dev_port; 169 if (dev_port == (priv->port - 1u)) 170 snprintf(match, sizeof(match), "%s", name); 171 } 172 closedir(dir); 173 if (match[0] == '\0') { 174 rte_errno = ENOENT; 175 return -rte_errno; 176 } 177 strncpy(*ifname, match, sizeof(*ifname)); 178 return 0; 179 } 180 181 /** 182 * Perform ifreq ioctl() on associated Ethernet device. 183 * 184 * @param[in] dev 185 * Pointer to Ethernet device. 186 * @param req 187 * Request number to pass to ioctl(). 188 * @param[out] ifr 189 * Interface request structure output buffer. 190 * 191 * @return 192 * 0 on success, a negative errno value otherwise and rte_errno is set. 193 */ 194 int 195 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) 196 { 197 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 198 int ret = 0; 199 200 if (sock == -1) { 201 rte_errno = errno; 202 return -rte_errno; 203 } 204 ret = mlx5_get_ifname(dev, &ifr->ifr_name); 205 if (ret) 206 goto error; 207 ret = ioctl(sock, req, ifr); 208 if (ret == -1) { 209 rte_errno = errno; 210 goto error; 211 } 212 close(sock); 213 return 0; 214 error: 215 close(sock); 216 return -rte_errno; 217 } 218 219 /** 220 * Get device MTU. 221 * 222 * @param dev 223 * Pointer to Ethernet device. 224 * @param[out] mtu 225 * MTU value output buffer. 226 * 227 * @return 228 * 0 on success, a negative errno value otherwise and rte_errno is set. 229 */ 230 int 231 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 232 { 233 struct ifreq request; 234 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); 235 236 if (ret) 237 return ret; 238 *mtu = request.ifr_mtu; 239 return 0; 240 } 241 242 /** 243 * Set device MTU. 244 * 245 * @param dev 246 * Pointer to Ethernet device. 247 * @param mtu 248 * MTU value to set. 249 * 250 * @return 251 * 0 on success, a negative errno value otherwise and rte_errno is set. 252 */ 253 static int 254 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 255 { 256 struct ifreq request = { .ifr_mtu = mtu, }; 257 258 return mlx5_ifreq(dev, SIOCSIFMTU, &request); 259 } 260 261 /** 262 * Set device flags. 263 * 264 * @param dev 265 * Pointer to Ethernet device. 266 * @param keep 267 * Bitmask for flags that must remain untouched. 268 * @param flags 269 * Bitmask for flags to modify. 270 * 271 * @return 272 * 0 on success, a negative errno value otherwise and rte_errno is set. 273 */ 274 int 275 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 276 { 277 struct ifreq request; 278 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); 279 280 if (ret) 281 return ret; 282 request.ifr_flags &= keep; 283 request.ifr_flags |= flags & ~keep; 284 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); 285 } 286 287 /** 288 * DPDK callback for Ethernet device configuration. 289 * 290 * @param dev 291 * Pointer to Ethernet device structure. 292 * 293 * @return 294 * 0 on success, a negative errno value otherwise and rte_errno is set. 295 */ 296 int 297 mlx5_dev_configure(struct rte_eth_dev *dev) 298 { 299 struct priv *priv = dev->data->dev_private; 300 unsigned int rxqs_n = dev->data->nb_rx_queues; 301 unsigned int txqs_n = dev->data->nb_tx_queues; 302 unsigned int i; 303 unsigned int j; 304 unsigned int reta_idx_n; 305 const uint8_t use_app_rss_key = 306 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; 307 uint64_t supp_tx_offloads = mlx5_get_tx_port_offloads(dev); 308 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 309 uint64_t supp_rx_offloads = 310 (mlx5_get_rx_port_offloads() | 311 mlx5_get_rx_queue_offloads(dev)); 312 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 313 int ret = 0; 314 315 if ((tx_offloads & supp_tx_offloads) != tx_offloads) { 316 DRV_LOG(ERR, 317 "port %u some Tx offloads are not supported requested" 318 " 0x%" PRIx64 " supported 0x%" PRIx64, 319 dev->data->port_id, tx_offloads, supp_tx_offloads); 320 rte_errno = ENOTSUP; 321 return -rte_errno; 322 } 323 if ((rx_offloads & supp_rx_offloads) != rx_offloads) { 324 DRV_LOG(ERR, 325 "port %u some Rx offloads are not supported requested" 326 " 0x%" PRIx64 " supported 0x%" PRIx64, 327 dev->data->port_id, rx_offloads, supp_rx_offloads); 328 rte_errno = ENOTSUP; 329 return -rte_errno; 330 } 331 if (use_app_rss_key && 332 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != 333 rss_hash_default_key_len)) { 334 /* MLX5 RSS only support 40bytes key. */ 335 rte_errno = EINVAL; 336 return -rte_errno; 337 } 338 priv->rss_conf.rss_key = 339 rte_realloc(priv->rss_conf.rss_key, 340 rss_hash_default_key_len, 0); 341 if (!priv->rss_conf.rss_key) { 342 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", 343 dev->data->port_id, rxqs_n); 344 rte_errno = ENOMEM; 345 return -rte_errno; 346 } 347 memcpy(priv->rss_conf.rss_key, 348 use_app_rss_key ? 349 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : 350 rss_hash_default_key, 351 rss_hash_default_key_len); 352 priv->rss_conf.rss_key_len = rss_hash_default_key_len; 353 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; 354 priv->rxqs = (void *)dev->data->rx_queues; 355 priv->txqs = (void *)dev->data->tx_queues; 356 if (txqs_n != priv->txqs_n) { 357 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", 358 dev->data->port_id, priv->txqs_n, txqs_n); 359 priv->txqs_n = txqs_n; 360 } 361 if (rxqs_n > priv->config.ind_table_max_size) { 362 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", 363 dev->data->port_id, rxqs_n); 364 rte_errno = EINVAL; 365 return -rte_errno; 366 } 367 if (rxqs_n == priv->rxqs_n) 368 return 0; 369 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", 370 dev->data->port_id, priv->rxqs_n, rxqs_n); 371 priv->rxqs_n = rxqs_n; 372 /* If the requested number of RX queues is not a power of two, use the 373 * maximum indirection table size for better balancing. 374 * The result is always rounded to the next power of two. */ 375 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? 376 priv->config.ind_table_max_size : 377 rxqs_n)); 378 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); 379 if (ret) 380 return ret; 381 /* When the number of RX queues is not a power of two, the remaining 382 * table entries are padded with reused WQs and hashes are not spread 383 * uniformly. */ 384 for (i = 0, j = 0; (i != reta_idx_n); ++i) { 385 (*priv->reta_idx)[i] = j; 386 if (++j == rxqs_n) 387 j = 0; 388 } 389 return 0; 390 } 391 392 /** 393 * DPDK callback to get information about the device. 394 * 395 * @param dev 396 * Pointer to Ethernet device structure. 397 * @param[out] info 398 * Info structure output buffer. 399 */ 400 void 401 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 402 { 403 struct priv *priv = dev->data->dev_private; 404 struct mlx5_dev_config *config = &priv->config; 405 unsigned int max; 406 char ifname[IF_NAMESIZE]; 407 408 info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 409 /* FIXME: we should ask the device for these values. */ 410 info->min_rx_bufsize = 32; 411 info->max_rx_pktlen = 65536; 412 /* 413 * Since we need one CQ per QP, the limit is the minimum number 414 * between the two values. 415 */ 416 max = RTE_MIN(priv->device_attr.orig_attr.max_cq, 417 priv->device_attr.orig_attr.max_qp); 418 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ 419 if (max >= 65535) 420 max = 65535; 421 info->max_rx_queues = max; 422 info->max_tx_queues = max; 423 info->max_mac_addrs = RTE_DIM(priv->mac); 424 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); 425 info->rx_offload_capa = (mlx5_get_rx_port_offloads() | 426 info->rx_queue_offload_capa); 427 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); 428 if (mlx5_get_ifname(dev, &ifname) == 0) 429 info->if_index = if_nametoindex(ifname); 430 info->reta_size = priv->reta_idx_n ? 431 priv->reta_idx_n : config->ind_table_max_size; 432 info->hash_key_size = priv->rss_conf.rss_key_len; 433 info->speed_capa = priv->link_speed_capa; 434 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; 435 } 436 437 /** 438 * Get supported packet types. 439 * 440 * @param dev 441 * Pointer to Ethernet device structure. 442 * 443 * @return 444 * A pointer to the supported Packet types array. 445 */ 446 const uint32_t * 447 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) 448 { 449 static const uint32_t ptypes[] = { 450 /* refers to rxq_cq_to_pkt_type() */ 451 RTE_PTYPE_L2_ETHER, 452 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 453 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 454 RTE_PTYPE_L4_NONFRAG, 455 RTE_PTYPE_L4_FRAG, 456 RTE_PTYPE_L4_TCP, 457 RTE_PTYPE_L4_UDP, 458 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 459 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 460 RTE_PTYPE_INNER_L4_NONFRAG, 461 RTE_PTYPE_INNER_L4_FRAG, 462 RTE_PTYPE_INNER_L4_TCP, 463 RTE_PTYPE_INNER_L4_UDP, 464 RTE_PTYPE_UNKNOWN 465 }; 466 467 if (dev->rx_pkt_burst == mlx5_rx_burst || 468 dev->rx_pkt_burst == mlx5_rx_burst_vec) 469 return ptypes; 470 return NULL; 471 } 472 473 /** 474 * DPDK callback to retrieve physical link information. 475 * 476 * @param dev 477 * Pointer to Ethernet device structure. 478 * 479 * @return 480 * 0 on success, a negative errno value otherwise and rte_errno is set. 481 */ 482 static int 483 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) 484 { 485 struct priv *priv = dev->data->dev_private; 486 struct ethtool_cmd edata = { 487 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 488 }; 489 struct ifreq ifr; 490 struct rte_eth_link dev_link; 491 int link_speed = 0; 492 int ret; 493 494 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 495 if (ret) { 496 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 497 dev->data->port_id, strerror(rte_errno)); 498 return ret; 499 } 500 memset(&dev_link, 0, sizeof(dev_link)); 501 dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && 502 (ifr.ifr_flags & IFF_RUNNING)); 503 ifr.ifr_data = (void *)&edata; 504 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 505 if (ret) { 506 DRV_LOG(WARNING, 507 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", 508 dev->data->port_id, strerror(rte_errno)); 509 return ret; 510 } 511 link_speed = ethtool_cmd_speed(&edata); 512 if (link_speed == -1) 513 dev_link.link_speed = 0; 514 else 515 dev_link.link_speed = link_speed; 516 priv->link_speed_capa = 0; 517 if (edata.supported & SUPPORTED_Autoneg) 518 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 519 if (edata.supported & (SUPPORTED_1000baseT_Full | 520 SUPPORTED_1000baseKX_Full)) 521 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 522 if (edata.supported & SUPPORTED_10000baseKR_Full) 523 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 524 if (edata.supported & (SUPPORTED_40000baseKR4_Full | 525 SUPPORTED_40000baseCR4_Full | 526 SUPPORTED_40000baseSR4_Full | 527 SUPPORTED_40000baseLR4_Full)) 528 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 529 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 530 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 531 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 532 ETH_LINK_SPEED_FIXED); 533 if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { 534 /* Link status changed. */ 535 dev->data->dev_link = dev_link; 536 return 0; 537 } 538 /* Link status is still the same. */ 539 rte_errno = EAGAIN; 540 return -rte_errno; 541 } 542 543 /** 544 * Retrieve physical link information (unlocked version using new ioctl). 545 * 546 * @param dev 547 * Pointer to Ethernet device structure. 548 * 549 * @return 550 * 0 on success, a negative errno value otherwise and rte_errno is set. 551 */ 552 static int 553 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) 554 { 555 struct priv *priv = dev->data->dev_private; 556 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 557 struct ifreq ifr; 558 struct rte_eth_link dev_link; 559 uint64_t sc; 560 int ret; 561 562 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 563 if (ret) { 564 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 565 dev->data->port_id, strerror(rte_errno)); 566 return ret; 567 } 568 memset(&dev_link, 0, sizeof(dev_link)); 569 dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && 570 (ifr.ifr_flags & IFF_RUNNING)); 571 ifr.ifr_data = (void *)&gcmd; 572 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 573 if (ret) { 574 DRV_LOG(DEBUG, 575 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" 576 " failed: %s", 577 dev->data->port_id, strerror(rte_errno)); 578 return ret; 579 } 580 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 581 582 alignas(struct ethtool_link_settings) 583 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 584 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 585 struct ethtool_link_settings *ecmd = (void *)data; 586 587 *ecmd = gcmd; 588 ifr.ifr_data = (void *)ecmd; 589 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 590 if (ret) { 591 DRV_LOG(DEBUG, 592 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" 593 " failed: %s", 594 dev->data->port_id, strerror(rte_errno)); 595 return ret; 596 } 597 dev_link.link_speed = ecmd->speed; 598 sc = ecmd->link_mode_masks[0] | 599 ((uint64_t)ecmd->link_mode_masks[1] << 32); 600 priv->link_speed_capa = 0; 601 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) 602 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 603 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 604 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 605 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 606 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 607 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 608 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 609 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 610 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 611 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 612 priv->link_speed_capa |= ETH_LINK_SPEED_20G; 613 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 614 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 615 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 616 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 617 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 618 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 619 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 620 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 621 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 622 priv->link_speed_capa |= ETH_LINK_SPEED_56G; 623 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 624 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 625 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 626 priv->link_speed_capa |= ETH_LINK_SPEED_25G; 627 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 628 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 629 priv->link_speed_capa |= ETH_LINK_SPEED_50G; 630 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 631 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 632 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 633 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 634 priv->link_speed_capa |= ETH_LINK_SPEED_100G; 635 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 636 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 637 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 638 ETH_LINK_SPEED_FIXED); 639 if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { 640 /* Link status changed. */ 641 dev->data->dev_link = dev_link; 642 return 0; 643 } 644 /* Link status is still the same. */ 645 rte_errno = EAGAIN; 646 return -rte_errno; 647 } 648 649 /** 650 * Enable receiving and transmitting traffic. 651 * 652 * @param dev 653 * Pointer to Ethernet device. 654 */ 655 static void 656 mlx5_link_start(struct rte_eth_dev *dev) 657 { 658 struct priv *priv = dev->data->dev_private; 659 int ret; 660 661 dev->tx_pkt_burst = mlx5_select_tx_function(dev); 662 dev->rx_pkt_burst = mlx5_select_rx_function(dev); 663 ret = mlx5_traffic_enable(dev); 664 if (ret) { 665 DRV_LOG(ERR, 666 "port %u error occurred while configuring control" 667 " flows: %s", 668 dev->data->port_id, strerror(rte_errno)); 669 return; 670 } 671 ret = mlx5_flow_start(dev, &priv->flows); 672 if (ret) 673 DRV_LOG(ERR, 674 "port %u error occurred while configuring flows: %s", 675 dev->data->port_id, strerror(rte_errno)); 676 } 677 678 /** 679 * Disable receiving and transmitting traffic. 680 * 681 * @param dev 682 * Pointer to Ethernet device. 683 */ 684 static void 685 mlx5_link_stop(struct rte_eth_dev *dev) 686 { 687 struct priv *priv = dev->data->dev_private; 688 689 mlx5_flow_stop(dev, &priv->flows); 690 mlx5_traffic_disable(dev); 691 dev->rx_pkt_burst = removed_rx_burst; 692 dev->tx_pkt_burst = removed_tx_burst; 693 } 694 695 /** 696 * Querying the link status till it changes to the desired state. 697 * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. 698 * 699 * @param dev 700 * Pointer to Ethernet device. 701 * @param status 702 * Link desired status. 703 * 704 * @return 705 * 0 on success, a negative errno value otherwise and rte_errno is set. 706 */ 707 int 708 mlx5_force_link_status_change(struct rte_eth_dev *dev, int status) 709 { 710 int try = 0; 711 712 while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { 713 mlx5_link_update(dev, 0); 714 if (dev->data->dev_link.link_status == status) 715 return 0; 716 try++; 717 sleep(1); 718 } 719 rte_errno = EAGAIN; 720 return -rte_errno; 721 } 722 723 /** 724 * DPDK callback to retrieve physical link information. 725 * 726 * @param dev 727 * Pointer to Ethernet device structure. 728 * @param wait_to_complete 729 * Wait for request completion (ignored). 730 * 731 * @return 732 * 0 on success, a negative errno value otherwise and rte_errno is set. 733 */ 734 int 735 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 736 { 737 struct utsname utsname; 738 int ver[3]; 739 int ret; 740 struct rte_eth_link dev_link = dev->data->dev_link; 741 742 if (uname(&utsname) == -1 || 743 sscanf(utsname.release, "%d.%d.%d", 744 &ver[0], &ver[1], &ver[2]) != 3 || 745 KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) 746 ret = mlx5_link_update_unlocked_gset(dev); 747 else 748 ret = mlx5_link_update_unlocked_gs(dev); 749 if (ret) 750 return ret; 751 /* If lsc interrupt is disabled, should always be ready for traffic. */ 752 if (!dev->data->dev_conf.intr_conf.lsc) { 753 mlx5_link_start(dev); 754 return 0; 755 } 756 /* Re-select burst callbacks only if link status has been changed. */ 757 if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { 758 if (dev->data->dev_link.link_status == ETH_LINK_UP) 759 mlx5_link_start(dev); 760 else 761 mlx5_link_stop(dev); 762 } 763 return 0; 764 } 765 766 /** 767 * DPDK callback to change the MTU. 768 * 769 * @param dev 770 * Pointer to Ethernet device structure. 771 * @param in_mtu 772 * New MTU. 773 * 774 * @return 775 * 0 on success, a negative errno value otherwise and rte_errno is set. 776 */ 777 int 778 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 779 { 780 struct priv *priv = dev->data->dev_private; 781 uint16_t kern_mtu = 0; 782 int ret; 783 784 ret = mlx5_get_mtu(dev, &kern_mtu); 785 if (ret) 786 return ret; 787 /* Set kernel interface MTU first. */ 788 ret = mlx5_set_mtu(dev, mtu); 789 if (ret) 790 return ret; 791 ret = mlx5_get_mtu(dev, &kern_mtu); 792 if (ret) 793 return ret; 794 if (kern_mtu == mtu) { 795 priv->mtu = mtu; 796 DRV_LOG(DEBUG, "port %u adapter MTU set to %u", 797 dev->data->port_id, mtu); 798 return 0; 799 } 800 rte_errno = EAGAIN; 801 return -rte_errno; 802 } 803 804 /** 805 * DPDK callback to get flow control status. 806 * 807 * @param dev 808 * Pointer to Ethernet device structure. 809 * @param[out] fc_conf 810 * Flow control output buffer. 811 * 812 * @return 813 * 0 on success, a negative errno value otherwise and rte_errno is set. 814 */ 815 int 816 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 817 { 818 struct ifreq ifr; 819 struct ethtool_pauseparam ethpause = { 820 .cmd = ETHTOOL_GPAUSEPARAM 821 }; 822 int ret; 823 824 ifr.ifr_data = (void *)ðpause; 825 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 826 if (ret) { 827 DRV_LOG(WARNING, 828 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 829 " %s", 830 dev->data->port_id, strerror(rte_errno)); 831 return ret; 832 } 833 fc_conf->autoneg = ethpause.autoneg; 834 if (ethpause.rx_pause && ethpause.tx_pause) 835 fc_conf->mode = RTE_FC_FULL; 836 else if (ethpause.rx_pause) 837 fc_conf->mode = RTE_FC_RX_PAUSE; 838 else if (ethpause.tx_pause) 839 fc_conf->mode = RTE_FC_TX_PAUSE; 840 else 841 fc_conf->mode = RTE_FC_NONE; 842 return 0; 843 } 844 845 /** 846 * DPDK callback to modify flow control parameters. 847 * 848 * @param dev 849 * Pointer to Ethernet device structure. 850 * @param[in] fc_conf 851 * Flow control parameters. 852 * 853 * @return 854 * 0 on success, a negative errno value otherwise and rte_errno is set. 855 */ 856 int 857 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 858 { 859 struct ifreq ifr; 860 struct ethtool_pauseparam ethpause = { 861 .cmd = ETHTOOL_SPAUSEPARAM 862 }; 863 int ret; 864 865 ifr.ifr_data = (void *)ðpause; 866 ethpause.autoneg = fc_conf->autoneg; 867 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 868 (fc_conf->mode & RTE_FC_RX_PAUSE)) 869 ethpause.rx_pause = 1; 870 else 871 ethpause.rx_pause = 0; 872 873 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 874 (fc_conf->mode & RTE_FC_TX_PAUSE)) 875 ethpause.tx_pause = 1; 876 else 877 ethpause.tx_pause = 0; 878 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 879 if (ret) { 880 DRV_LOG(WARNING, 881 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 882 " failed: %s", 883 dev->data->port_id, strerror(rte_errno)); 884 return ret; 885 } 886 return 0; 887 } 888 889 /** 890 * Get PCI information from struct ibv_device. 891 * 892 * @param device 893 * Pointer to Ethernet device structure. 894 * @param[out] pci_addr 895 * PCI bus address output buffer. 896 * 897 * @return 898 * 0 on success, a negative errno value otherwise and rte_errno is set. 899 */ 900 int 901 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 902 struct rte_pci_addr *pci_addr) 903 { 904 FILE *file; 905 char line[32]; 906 MKSTR(path, "%s/device/uevent", device->ibdev_path); 907 908 file = fopen(path, "rb"); 909 if (file == NULL) { 910 rte_errno = errno; 911 return -rte_errno; 912 } 913 while (fgets(line, sizeof(line), file) == line) { 914 size_t len = strlen(line); 915 int ret; 916 917 /* Truncate long lines. */ 918 if (len == (sizeof(line) - 1)) 919 while (line[(len - 1)] != '\n') { 920 ret = fgetc(file); 921 if (ret == EOF) 922 break; 923 line[(len - 1)] = ret; 924 } 925 /* Extract information. */ 926 if (sscanf(line, 927 "PCI_SLOT_NAME=" 928 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 929 &pci_addr->domain, 930 &pci_addr->bus, 931 &pci_addr->devid, 932 &pci_addr->function) == 4) { 933 ret = 0; 934 break; 935 } 936 } 937 fclose(file); 938 return 0; 939 } 940 941 /** 942 * Update the link status. 943 * 944 * @param dev 945 * Pointer to Ethernet device. 946 * 947 * @return 948 * Zero if the callback process can be called immediately, negative errno 949 * value otherwise and rte_errno is set. 950 */ 951 static int 952 mlx5_link_status_update(struct rte_eth_dev *dev) 953 { 954 struct priv *priv = dev->data->dev_private; 955 struct rte_eth_link *link = &dev->data->dev_link; 956 int ret; 957 958 ret = mlx5_link_update(dev, 0); 959 if (ret) 960 return ret; 961 if (((link->link_speed == 0) && link->link_status) || 962 ((link->link_speed != 0) && !link->link_status)) { 963 /* 964 * Inconsistent status. Event likely occurred before the 965 * kernel netdevice exposes the new status. 966 */ 967 if (!priv->pending_alarm) { 968 priv->pending_alarm = 1; 969 rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US, 970 mlx5_dev_link_status_handler, 971 priv->dev); 972 } 973 return 1; 974 } else if (unlikely(priv->pending_alarm)) { 975 /* Link interrupt occurred while alarm is already scheduled. */ 976 priv->pending_alarm = 0; 977 rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev); 978 } 979 return 0; 980 } 981 982 /** 983 * Device status handler. 984 * 985 * @param dev 986 * Pointer to Ethernet device. 987 * @param events 988 * Pointer to event flags holder. 989 * 990 * @return 991 * Events bitmap of callback process which can be called immediately. 992 */ 993 static uint32_t 994 mlx5_dev_status_handler(struct rte_eth_dev *dev) 995 { 996 struct priv *priv = dev->data->dev_private; 997 struct ibv_async_event event; 998 uint32_t ret = 0; 999 1000 /* Read all message and acknowledge them. */ 1001 for (;;) { 1002 if (mlx5_glue->get_async_event(priv->ctx, &event)) 1003 break; 1004 if ((event.event_type == IBV_EVENT_PORT_ACTIVE || 1005 event.event_type == IBV_EVENT_PORT_ERR) && 1006 (dev->data->dev_conf.intr_conf.lsc == 1)) 1007 ret |= (1 << RTE_ETH_EVENT_INTR_LSC); 1008 else if (event.event_type == IBV_EVENT_DEVICE_FATAL && 1009 dev->data->dev_conf.intr_conf.rmv == 1) 1010 ret |= (1 << RTE_ETH_EVENT_INTR_RMV); 1011 else 1012 DRV_LOG(DEBUG, 1013 "port %u event type %d on not handled", 1014 dev->data->port_id, event.event_type); 1015 mlx5_glue->ack_async_event(&event); 1016 } 1017 if (ret & (1 << RTE_ETH_EVENT_INTR_LSC)) 1018 if (mlx5_link_status_update(dev)) 1019 ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC); 1020 return ret; 1021 } 1022 1023 /** 1024 * Handle delayed link status event. 1025 * 1026 * @param arg 1027 * Registered argument. 1028 */ 1029 void 1030 mlx5_dev_link_status_handler(void *arg) 1031 { 1032 struct rte_eth_dev *dev = arg; 1033 struct priv *priv = dev->data->dev_private; 1034 int ret; 1035 1036 priv->pending_alarm = 0; 1037 ret = mlx5_link_status_update(dev); 1038 if (!ret) 1039 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1040 } 1041 1042 /** 1043 * Handle interrupts from the NIC. 1044 * 1045 * @param[in] intr_handle 1046 * Interrupt handler. 1047 * @param cb_arg 1048 * Callback argument. 1049 */ 1050 void 1051 mlx5_dev_interrupt_handler(void *cb_arg) 1052 { 1053 struct rte_eth_dev *dev = cb_arg; 1054 uint32_t events; 1055 1056 events = mlx5_dev_status_handler(dev); 1057 if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) 1058 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1059 if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) 1060 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL); 1061 } 1062 1063 /** 1064 * Handle interrupts from the socket. 1065 * 1066 * @param cb_arg 1067 * Callback argument. 1068 */ 1069 static void 1070 mlx5_dev_handler_socket(void *cb_arg) 1071 { 1072 struct rte_eth_dev *dev = cb_arg; 1073 1074 mlx5_socket_handle(dev); 1075 } 1076 1077 /** 1078 * Uninstall interrupt handler. 1079 * 1080 * @param dev 1081 * Pointer to Ethernet device. 1082 */ 1083 void 1084 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) 1085 { 1086 struct priv *priv = dev->data->dev_private; 1087 1088 if (dev->data->dev_conf.intr_conf.lsc || 1089 dev->data->dev_conf.intr_conf.rmv) 1090 rte_intr_callback_unregister(&priv->intr_handle, 1091 mlx5_dev_interrupt_handler, dev); 1092 if (priv->primary_socket) 1093 rte_intr_callback_unregister(&priv->intr_handle_socket, 1094 mlx5_dev_handler_socket, dev); 1095 if (priv->pending_alarm) { 1096 priv->pending_alarm = 0; 1097 rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); 1098 } 1099 priv->intr_handle.fd = 0; 1100 priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 1101 priv->intr_handle_socket.fd = 0; 1102 priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN; 1103 } 1104 1105 /** 1106 * Install interrupt handler. 1107 * 1108 * @param dev 1109 * Pointer to Ethernet device. 1110 */ 1111 void 1112 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) 1113 { 1114 struct priv *priv = dev->data->dev_private; 1115 int ret; 1116 int flags; 1117 1118 assert(priv->ctx->async_fd > 0); 1119 flags = fcntl(priv->ctx->async_fd, F_GETFL); 1120 ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); 1121 if (ret) { 1122 DRV_LOG(INFO, 1123 "port %u failed to change file descriptor async event" 1124 " queue", 1125 dev->data->port_id); 1126 dev->data->dev_conf.intr_conf.lsc = 0; 1127 dev->data->dev_conf.intr_conf.rmv = 0; 1128 } 1129 if (dev->data->dev_conf.intr_conf.lsc || 1130 dev->data->dev_conf.intr_conf.rmv) { 1131 priv->intr_handle.fd = priv->ctx->async_fd; 1132 priv->intr_handle.type = RTE_INTR_HANDLE_EXT; 1133 rte_intr_callback_register(&priv->intr_handle, 1134 mlx5_dev_interrupt_handler, dev); 1135 } 1136 ret = mlx5_socket_init(dev); 1137 if (ret) 1138 DRV_LOG(ERR, "port %u cannot initialise socket: %s", 1139 dev->data->port_id, strerror(rte_errno)); 1140 else if (priv->primary_socket) { 1141 priv->intr_handle_socket.fd = priv->primary_socket; 1142 priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; 1143 rte_intr_callback_register(&priv->intr_handle_socket, 1144 mlx5_dev_handler_socket, dev); 1145 } 1146 } 1147 1148 /** 1149 * DPDK callback to bring the link DOWN. 1150 * 1151 * @param dev 1152 * Pointer to Ethernet device structure. 1153 * 1154 * @return 1155 * 0 on success, a negative errno value otherwise and rte_errno is set. 1156 */ 1157 int 1158 mlx5_set_link_down(struct rte_eth_dev *dev) 1159 { 1160 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 1161 } 1162 1163 /** 1164 * DPDK callback to bring the link UP. 1165 * 1166 * @param dev 1167 * Pointer to Ethernet device structure. 1168 * 1169 * @return 1170 * 0 on success, a negative errno value otherwise and rte_errno is set. 1171 */ 1172 int 1173 mlx5_set_link_up(struct rte_eth_dev *dev) 1174 { 1175 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 1176 } 1177 1178 /** 1179 * Configure the TX function to use. 1180 * 1181 * @param dev 1182 * Pointer to private data structure. 1183 * 1184 * @return 1185 * Pointer to selected Tx burst function. 1186 */ 1187 eth_tx_burst_t 1188 mlx5_select_tx_function(struct rte_eth_dev *dev) 1189 { 1190 struct priv *priv = dev->data->dev_private; 1191 eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; 1192 struct mlx5_dev_config *config = &priv->config; 1193 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 1194 int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 1195 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1196 DEV_TX_OFFLOAD_GRE_TNL_TSO)); 1197 int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); 1198 1199 assert(priv != NULL); 1200 /* Select appropriate TX function. */ 1201 if (vlan_insert || tso) 1202 return tx_pkt_burst; 1203 if (config->mps == MLX5_MPW_ENHANCED) { 1204 if (mlx5_check_vec_tx_support(dev) > 0) { 1205 if (mlx5_check_raw_vec_tx_support(dev) > 0) 1206 tx_pkt_burst = mlx5_tx_burst_raw_vec; 1207 else 1208 tx_pkt_burst = mlx5_tx_burst_vec; 1209 DRV_LOG(DEBUG, 1210 "port %u selected enhanced MPW Tx vectorized" 1211 " function", 1212 dev->data->port_id); 1213 } else { 1214 tx_pkt_burst = mlx5_tx_burst_empw; 1215 DRV_LOG(DEBUG, 1216 "port %u selected enhanced MPW Tx function", 1217 dev->data->port_id); 1218 } 1219 } else if (config->mps && (config->txq_inline > 0)) { 1220 tx_pkt_burst = mlx5_tx_burst_mpw_inline; 1221 DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", 1222 dev->data->port_id); 1223 } else if (config->mps) { 1224 tx_pkt_burst = mlx5_tx_burst_mpw; 1225 DRV_LOG(DEBUG, "port %u selected MPW Tx function", 1226 dev->data->port_id); 1227 } 1228 return tx_pkt_burst; 1229 } 1230 1231 /** 1232 * Configure the RX function to use. 1233 * 1234 * @param dev 1235 * Pointer to private data structure. 1236 * 1237 * @return 1238 * Pointer to selected Rx burst function. 1239 */ 1240 eth_rx_burst_t 1241 mlx5_select_rx_function(struct rte_eth_dev *dev) 1242 { 1243 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; 1244 1245 assert(dev != NULL); 1246 if (mlx5_check_vec_rx_support(dev) > 0) { 1247 rx_pkt_burst = mlx5_rx_burst_vec; 1248 DRV_LOG(DEBUG, "port %u selected Rx vectorized function", 1249 dev->data->port_id); 1250 } 1251 return rx_pkt_burst; 1252 } 1253 1254 /** 1255 * Check if mlx5 device was removed. 1256 * 1257 * @param dev 1258 * Pointer to Ethernet device structure. 1259 * 1260 * @return 1261 * 1 when device is removed, otherwise 0. 1262 */ 1263 int 1264 mlx5_is_removed(struct rte_eth_dev *dev) 1265 { 1266 struct ibv_device_attr device_attr; 1267 struct priv *priv = dev->data->dev_private; 1268 1269 if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO) 1270 return 1; 1271 return 0; 1272 } 1273