1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox. 4 */ 5 6 #define _GNU_SOURCE 7 8 #include <stddef.h> 9 #include <assert.h> 10 #include <inttypes.h> 11 #include <unistd.h> 12 #include <stdint.h> 13 #include <stdio.h> 14 #include <string.h> 15 #include <stdlib.h> 16 #include <errno.h> 17 #include <dirent.h> 18 #include <net/if.h> 19 #include <sys/ioctl.h> 20 #include <sys/socket.h> 21 #include <sys/utsname.h> 22 #include <netinet/in.h> 23 #include <linux/ethtool.h> 24 #include <linux/sockios.h> 25 #include <linux/version.h> 26 #include <fcntl.h> 27 #include <stdalign.h> 28 #include <sys/un.h> 29 30 #include <rte_atomic.h> 31 #include <rte_ethdev_driver.h> 32 #include <rte_bus_pci.h> 33 #include <rte_mbuf.h> 34 #include <rte_common.h> 35 #include <rte_interrupts.h> 36 #include <rte_alarm.h> 37 #include <rte_malloc.h> 38 39 #include "mlx5.h" 40 #include "mlx5_glue.h" 41 #include "mlx5_rxtx.h" 42 #include "mlx5_utils.h" 43 44 /* Add defines in case the running kernel is not the same as user headers. */ 45 #ifndef ETHTOOL_GLINKSETTINGS 46 struct ethtool_link_settings { 47 uint32_t cmd; 48 uint32_t speed; 49 uint8_t duplex; 50 uint8_t port; 51 uint8_t phy_address; 52 uint8_t autoneg; 53 uint8_t mdio_support; 54 uint8_t eth_to_mdix; 55 uint8_t eth_tp_mdix_ctrl; 56 int8_t link_mode_masks_nwords; 57 uint32_t reserved[8]; 58 uint32_t link_mode_masks[]; 59 }; 60 61 #define ETHTOOL_GLINKSETTINGS 0x0000004c 62 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 63 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 64 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 65 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 66 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 67 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 68 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 69 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 70 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 71 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 72 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 73 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 74 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 75 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 76 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 77 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 78 #endif 79 #ifndef HAVE_ETHTOOL_LINK_MODE_25G 80 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 81 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 82 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 83 #endif 84 #ifndef HAVE_ETHTOOL_LINK_MODE_50G 85 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 86 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 87 #endif 88 #ifndef HAVE_ETHTOOL_LINK_MODE_100G 89 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 90 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 91 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 92 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 93 #endif 94 95 /** 96 * Get interface name from private structure. 97 * 98 * @param[in] priv 99 * Pointer to private structure. 100 * @param[out] ifname 101 * Interface name output buffer. 102 * 103 * @return 104 * 0 on success, -1 on failure and errno is set. 105 */ 106 int 107 priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]) 108 { 109 DIR *dir; 110 struct dirent *dent; 111 unsigned int dev_type = 0; 112 unsigned int dev_port_prev = ~0u; 113 char match[IF_NAMESIZE] = ""; 114 115 { 116 MKSTR(path, "%s/device/net", priv->ibdev_path); 117 118 dir = opendir(path); 119 if (dir == NULL) 120 return -1; 121 } 122 while ((dent = readdir(dir)) != NULL) { 123 char *name = dent->d_name; 124 FILE *file; 125 unsigned int dev_port; 126 int r; 127 128 if ((name[0] == '.') && 129 ((name[1] == '\0') || 130 ((name[1] == '.') && (name[2] == '\0')))) 131 continue; 132 133 MKSTR(path, "%s/device/net/%s/%s", 134 priv->ibdev_path, name, 135 (dev_type ? "dev_id" : "dev_port")); 136 137 file = fopen(path, "rb"); 138 if (file == NULL) { 139 if (errno != ENOENT) 140 continue; 141 /* 142 * Switch to dev_id when dev_port does not exist as 143 * is the case with Linux kernel versions < 3.15. 144 */ 145 try_dev_id: 146 match[0] = '\0'; 147 if (dev_type) 148 break; 149 dev_type = 1; 150 dev_port_prev = ~0u; 151 rewinddir(dir); 152 continue; 153 } 154 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); 155 fclose(file); 156 if (r != 1) 157 continue; 158 /* 159 * Switch to dev_id when dev_port returns the same value for 160 * all ports. May happen when using a MOFED release older than 161 * 3.0 with a Linux kernel >= 3.15. 162 */ 163 if (dev_port == dev_port_prev) 164 goto try_dev_id; 165 dev_port_prev = dev_port; 166 if (dev_port == (priv->port - 1u)) 167 snprintf(match, sizeof(match), "%s", name); 168 } 169 closedir(dir); 170 if (match[0] == '\0') 171 return -1; 172 strncpy(*ifname, match, sizeof(*ifname)); 173 return 0; 174 } 175 176 /** 177 * Perform ifreq ioctl() on associated Ethernet device. 178 * 179 * @param[in] priv 180 * Pointer to private structure. 181 * @param req 182 * Request number to pass to ioctl(). 183 * @param[out] ifr 184 * Interface request structure output buffer. 185 * 186 * @return 187 * 0 on success, -1 on failure and errno is set. 188 */ 189 int 190 priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) 191 { 192 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 193 int ret = -1; 194 195 if (sock == -1) 196 return ret; 197 if (priv_get_ifname(priv, &ifr->ifr_name) == 0) 198 ret = ioctl(sock, req, ifr); 199 close(sock); 200 return ret; 201 } 202 203 /** 204 * Get device MTU. 205 * 206 * @param priv 207 * Pointer to private structure. 208 * @param[out] mtu 209 * MTU value output buffer. 210 * 211 * @return 212 * 0 on success, -1 on failure and errno is set. 213 */ 214 int 215 priv_get_mtu(struct priv *priv, uint16_t *mtu) 216 { 217 struct ifreq request; 218 int ret = priv_ifreq(priv, SIOCGIFMTU, &request); 219 220 if (ret) 221 return ret; 222 *mtu = request.ifr_mtu; 223 return 0; 224 } 225 226 /** 227 * Set device MTU. 228 * 229 * @param priv 230 * Pointer to private structure. 231 * @param mtu 232 * MTU value to set. 233 * 234 * @return 235 * 0 on success, -1 on failure and errno is set. 236 */ 237 static int 238 priv_set_mtu(struct priv *priv, uint16_t mtu) 239 { 240 struct ifreq request = { .ifr_mtu = mtu, }; 241 242 return priv_ifreq(priv, SIOCSIFMTU, &request); 243 } 244 245 /** 246 * Set device flags. 247 * 248 * @param priv 249 * Pointer to private structure. 250 * @param keep 251 * Bitmask for flags that must remain untouched. 252 * @param flags 253 * Bitmask for flags to modify. 254 * 255 * @return 256 * 0 on success, -1 on failure and errno is set. 257 */ 258 int 259 priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) 260 { 261 struct ifreq request; 262 int ret = priv_ifreq(priv, SIOCGIFFLAGS, &request); 263 264 if (ret) 265 return ret; 266 request.ifr_flags &= keep; 267 request.ifr_flags |= flags & ~keep; 268 return priv_ifreq(priv, SIOCSIFFLAGS, &request); 269 } 270 271 /** 272 * Ethernet device configuration. 273 * 274 * Prepare the driver for a given number of TX and RX queues. 275 * 276 * @param dev 277 * Pointer to Ethernet device structure. 278 * 279 * @return 280 * 0 on success, errno value on failure. 281 */ 282 static int 283 dev_configure(struct rte_eth_dev *dev) 284 { 285 struct priv *priv = dev->data->dev_private; 286 unsigned int rxqs_n = dev->data->nb_rx_queues; 287 unsigned int txqs_n = dev->data->nb_tx_queues; 288 unsigned int i; 289 unsigned int j; 290 unsigned int reta_idx_n; 291 const uint8_t use_app_rss_key = 292 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; 293 uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); 294 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 295 uint64_t supp_rx_offloads = 296 (mlx5_priv_get_rx_port_offloads(priv) | 297 mlx5_priv_get_rx_queue_offloads(priv)); 298 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 299 300 if ((tx_offloads & supp_tx_offloads) != tx_offloads) { 301 ERROR("Some Tx offloads are not supported " 302 "requested 0x%" PRIx64 " supported 0x%" PRIx64, 303 tx_offloads, supp_tx_offloads); 304 return ENOTSUP; 305 } 306 if ((rx_offloads & supp_rx_offloads) != rx_offloads) { 307 ERROR("Some Rx offloads are not supported " 308 "requested 0x%" PRIx64 " supported 0x%" PRIx64, 309 rx_offloads, supp_rx_offloads); 310 return ENOTSUP; 311 } 312 if (use_app_rss_key && 313 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != 314 rss_hash_default_key_len)) { 315 /* MLX5 RSS only support 40bytes key. */ 316 return EINVAL; 317 } 318 priv->rss_conf.rss_key = 319 rte_realloc(priv->rss_conf.rss_key, 320 rss_hash_default_key_len, 0); 321 if (!priv->rss_conf.rss_key) { 322 ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n); 323 return ENOMEM; 324 } 325 memcpy(priv->rss_conf.rss_key, 326 use_app_rss_key ? 327 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : 328 rss_hash_default_key, 329 rss_hash_default_key_len); 330 priv->rss_conf.rss_key_len = rss_hash_default_key_len; 331 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; 332 priv->rxqs = (void *)dev->data->rx_queues; 333 priv->txqs = (void *)dev->data->tx_queues; 334 if (txqs_n != priv->txqs_n) { 335 INFO("%p: TX queues number update: %u -> %u", 336 (void *)dev, priv->txqs_n, txqs_n); 337 priv->txqs_n = txqs_n; 338 } 339 if (rxqs_n > priv->config.ind_table_max_size) { 340 ERROR("cannot handle this many RX queues (%u)", rxqs_n); 341 return EINVAL; 342 } 343 if (rxqs_n == priv->rxqs_n) 344 return 0; 345 INFO("%p: RX queues number update: %u -> %u", 346 (void *)dev, priv->rxqs_n, rxqs_n); 347 priv->rxqs_n = rxqs_n; 348 /* If the requested number of RX queues is not a power of two, use the 349 * maximum indirection table size for better balancing. 350 * The result is always rounded to the next power of two. */ 351 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? 352 priv->config.ind_table_max_size : 353 rxqs_n)); 354 if (priv_rss_reta_index_resize(priv, reta_idx_n)) 355 return ENOMEM; 356 /* When the number of RX queues is not a power of two, the remaining 357 * table entries are padded with reused WQs and hashes are not spread 358 * uniformly. */ 359 for (i = 0, j = 0; (i != reta_idx_n); ++i) { 360 (*priv->reta_idx)[i] = j; 361 if (++j == rxqs_n) 362 j = 0; 363 } 364 return 0; 365 } 366 367 /** 368 * DPDK callback for Ethernet device configuration. 369 * 370 * @param dev 371 * Pointer to Ethernet device structure. 372 * 373 * @return 374 * 0 on success, negative errno value on failure. 375 */ 376 int 377 mlx5_dev_configure(struct rte_eth_dev *dev) 378 { 379 struct priv *priv = dev->data->dev_private; 380 int ret; 381 382 priv_lock(priv); 383 ret = dev_configure(dev); 384 assert(ret >= 0); 385 priv_unlock(priv); 386 return -ret; 387 } 388 389 /** 390 * DPDK callback to get information about the device. 391 * 392 * @param dev 393 * Pointer to Ethernet device structure. 394 * @param[out] info 395 * Info structure output buffer. 396 */ 397 void 398 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 399 { 400 struct priv *priv = dev->data->dev_private; 401 struct mlx5_dev_config *config = &priv->config; 402 unsigned int max; 403 char ifname[IF_NAMESIZE]; 404 405 info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 406 407 priv_lock(priv); 408 /* FIXME: we should ask the device for these values. */ 409 info->min_rx_bufsize = 32; 410 info->max_rx_pktlen = 65536; 411 /* 412 * Since we need one CQ per QP, the limit is the minimum number 413 * between the two values. 414 */ 415 max = RTE_MIN(priv->device_attr.orig_attr.max_cq, 416 priv->device_attr.orig_attr.max_qp); 417 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ 418 if (max >= 65535) 419 max = 65535; 420 info->max_rx_queues = max; 421 info->max_tx_queues = max; 422 info->max_mac_addrs = RTE_DIM(priv->mac); 423 info->rx_queue_offload_capa = 424 mlx5_priv_get_rx_queue_offloads(priv); 425 info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) | 426 info->rx_queue_offload_capa); 427 info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv); 428 if (priv_get_ifname(priv, &ifname) == 0) 429 info->if_index = if_nametoindex(ifname); 430 info->reta_size = priv->reta_idx_n ? 431 priv->reta_idx_n : config->ind_table_max_size; 432 info->hash_key_size = priv->rss_conf.rss_key_len; 433 info->speed_capa = priv->link_speed_capa; 434 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; 435 priv_unlock(priv); 436 } 437 438 /** 439 * Get supported packet types. 440 * 441 * @param dev 442 * Pointer to Ethernet device structure. 443 * 444 * @return 445 * A pointer to the supported Packet types array. 446 */ 447 const uint32_t * 448 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) 449 { 450 static const uint32_t ptypes[] = { 451 /* refers to rxq_cq_to_pkt_type() */ 452 RTE_PTYPE_L2_ETHER, 453 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 454 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 455 RTE_PTYPE_L4_NONFRAG, 456 RTE_PTYPE_L4_FRAG, 457 RTE_PTYPE_L4_TCP, 458 RTE_PTYPE_L4_UDP, 459 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 460 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 461 RTE_PTYPE_INNER_L4_NONFRAG, 462 RTE_PTYPE_INNER_L4_FRAG, 463 RTE_PTYPE_INNER_L4_TCP, 464 RTE_PTYPE_INNER_L4_UDP, 465 RTE_PTYPE_UNKNOWN 466 }; 467 468 if (dev->rx_pkt_burst == mlx5_rx_burst || 469 dev->rx_pkt_burst == mlx5_rx_burst_vec) 470 return ptypes; 471 return NULL; 472 } 473 474 /** 475 * DPDK callback to retrieve physical link information. 476 * 477 * @param dev 478 * Pointer to Ethernet device structure. 479 * 480 * @return 481 * 0 on success, -1 on error. 482 */ 483 static int 484 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) 485 { 486 struct priv *priv = dev->data->dev_private; 487 struct ethtool_cmd edata = { 488 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 489 }; 490 struct ifreq ifr; 491 struct rte_eth_link dev_link; 492 int link_speed = 0; 493 494 /* priv_lock() is not taken to allow concurrent calls. */ 495 496 if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { 497 WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); 498 return -1; 499 } 500 memset(&dev_link, 0, sizeof(dev_link)); 501 dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && 502 (ifr.ifr_flags & IFF_RUNNING)); 503 ifr.ifr_data = (void *)&edata; 504 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { 505 WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", 506 strerror(errno)); 507 return -1; 508 } 509 link_speed = ethtool_cmd_speed(&edata); 510 if (link_speed == -1) 511 dev_link.link_speed = 0; 512 else 513 dev_link.link_speed = link_speed; 514 priv->link_speed_capa = 0; 515 if (edata.supported & SUPPORTED_Autoneg) 516 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 517 if (edata.supported & (SUPPORTED_1000baseT_Full | 518 SUPPORTED_1000baseKX_Full)) 519 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 520 if (edata.supported & SUPPORTED_10000baseKR_Full) 521 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 522 if (edata.supported & (SUPPORTED_40000baseKR4_Full | 523 SUPPORTED_40000baseCR4_Full | 524 SUPPORTED_40000baseSR4_Full | 525 SUPPORTED_40000baseLR4_Full)) 526 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 527 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 528 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 529 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 530 ETH_LINK_SPEED_FIXED); 531 if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { 532 /* Link status changed. */ 533 dev->data->dev_link = dev_link; 534 return 0; 535 } 536 /* Link status is still the same. */ 537 return -1; 538 } 539 540 /** 541 * Retrieve physical link information (unlocked version using new ioctl). 542 * 543 * @param dev 544 * Pointer to Ethernet device structure. 545 * 546 * @return 547 * 0 on success, -1 on error. 548 */ 549 static int 550 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) 551 { 552 struct priv *priv = dev->data->dev_private; 553 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 554 struct ifreq ifr; 555 struct rte_eth_link dev_link; 556 uint64_t sc; 557 558 if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { 559 WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); 560 return -1; 561 } 562 memset(&dev_link, 0, sizeof(dev_link)); 563 dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && 564 (ifr.ifr_flags & IFF_RUNNING)); 565 ifr.ifr_data = (void *)&gcmd; 566 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { 567 DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", 568 strerror(errno)); 569 return -1; 570 } 571 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 572 573 alignas(struct ethtool_link_settings) 574 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 575 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 576 struct ethtool_link_settings *ecmd = (void *)data; 577 578 *ecmd = gcmd; 579 ifr.ifr_data = (void *)ecmd; 580 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { 581 DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s", 582 strerror(errno)); 583 return -1; 584 } 585 dev_link.link_speed = ecmd->speed; 586 sc = ecmd->link_mode_masks[0] | 587 ((uint64_t)ecmd->link_mode_masks[1] << 32); 588 priv->link_speed_capa = 0; 589 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) 590 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 591 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 592 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 593 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 594 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 595 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 596 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 597 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 598 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 599 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 600 priv->link_speed_capa |= ETH_LINK_SPEED_20G; 601 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 602 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 603 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 604 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 605 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 606 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 607 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 608 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 609 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 610 priv->link_speed_capa |= ETH_LINK_SPEED_56G; 611 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 612 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 613 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 614 priv->link_speed_capa |= ETH_LINK_SPEED_25G; 615 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 616 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 617 priv->link_speed_capa |= ETH_LINK_SPEED_50G; 618 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 619 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 620 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 621 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 622 priv->link_speed_capa |= ETH_LINK_SPEED_100G; 623 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 624 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 625 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 626 ETH_LINK_SPEED_FIXED); 627 if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { 628 /* Link status changed. */ 629 dev->data->dev_link = dev_link; 630 return 0; 631 } 632 /* Link status is still the same. */ 633 return -1; 634 } 635 636 /** 637 * Enable receiving and transmitting traffic. 638 * 639 * @param priv 640 * Pointer to private structure. 641 */ 642 static void 643 priv_link_start(struct priv *priv) 644 { 645 struct rte_eth_dev *dev = priv->dev; 646 int err; 647 648 dev->tx_pkt_burst = priv_select_tx_function(priv, dev); 649 dev->rx_pkt_burst = priv_select_rx_function(priv, dev); 650 err = priv_dev_traffic_enable(priv, dev); 651 if (err) 652 ERROR("%p: error occurred while configuring control flows: %s", 653 (void *)priv, strerror(err)); 654 err = priv_flow_start(priv, &priv->flows); 655 if (err) 656 ERROR("%p: error occurred while configuring flows: %s", 657 (void *)priv, strerror(err)); 658 } 659 660 /** 661 * Disable receiving and transmitting traffic. 662 * 663 * @param priv 664 * Pointer to private structure. 665 */ 666 static void 667 priv_link_stop(struct priv *priv) 668 { 669 struct rte_eth_dev *dev = priv->dev; 670 671 priv_flow_stop(priv, &priv->flows); 672 priv_dev_traffic_disable(priv, dev); 673 dev->rx_pkt_burst = removed_rx_burst; 674 dev->tx_pkt_burst = removed_tx_burst; 675 } 676 677 /** 678 * Retrieve physical link information and update rx/tx_pkt_burst callbacks 679 * accordingly. 680 * 681 * @param priv 682 * Pointer to private structure. 683 * @param wait_to_complete 684 * Wait for request completion (ignored). 685 */ 686 int 687 priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) 688 { 689 struct rte_eth_dev *dev = priv->dev; 690 struct utsname utsname; 691 int ver[3]; 692 int ret; 693 struct rte_eth_link dev_link = dev->data->dev_link; 694 695 if (uname(&utsname) == -1 || 696 sscanf(utsname.release, "%d.%d.%d", 697 &ver[0], &ver[1], &ver[2]) != 3 || 698 KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) 699 ret = mlx5_link_update_unlocked_gset(dev); 700 else 701 ret = mlx5_link_update_unlocked_gs(dev); 702 /* If lsc interrupt is disabled, should always be ready for traffic. */ 703 if (!dev->data->dev_conf.intr_conf.lsc) { 704 priv_link_start(priv); 705 return ret; 706 } 707 /* Re-select burst callbacks only if link status has been changed. */ 708 if (!ret && dev_link.link_status != dev->data->dev_link.link_status) { 709 if (dev->data->dev_link.link_status == ETH_LINK_UP) 710 priv_link_start(priv); 711 else 712 priv_link_stop(priv); 713 } 714 return ret; 715 } 716 717 /** 718 * Querying the link status till it changes to the desired state. 719 * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS. 720 * 721 * @param priv 722 * Pointer to private structure. 723 * @param status 724 * Link desired status. 725 * 726 * @return 727 * 0 on success, negative errno value on failure. 728 */ 729 int 730 priv_force_link_status_change(struct priv *priv, int status) 731 { 732 int try = 0; 733 734 while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) { 735 priv_link_update(priv, 0); 736 if (priv->dev->data->dev_link.link_status == status) 737 return 0; 738 try++; 739 sleep(1); 740 } 741 return -EAGAIN; 742 } 743 744 /** 745 * DPDK callback to retrieve physical link information. 746 * 747 * @param dev 748 * Pointer to Ethernet device structure. 749 * @param wait_to_complete 750 * Wait for request completion (ignored). 751 * 752 * @return 753 * 0 on success, -1 on error. 754 */ 755 int 756 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) 757 { 758 struct priv *priv = dev->data->dev_private; 759 int ret; 760 761 priv_lock(priv); 762 ret = priv_link_update(priv, wait_to_complete); 763 priv_unlock(priv); 764 return ret; 765 } 766 767 /** 768 * DPDK callback to change the MTU. 769 * 770 * @param dev 771 * Pointer to Ethernet device structure. 772 * @param in_mtu 773 * New MTU. 774 * 775 * @return 776 * 0 on success, negative errno value on failure. 777 */ 778 int 779 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 780 { 781 struct priv *priv = dev->data->dev_private; 782 uint16_t kern_mtu; 783 int ret = 0; 784 785 priv_lock(priv); 786 ret = priv_get_mtu(priv, &kern_mtu); 787 if (ret) 788 goto out; 789 /* Set kernel interface MTU first. */ 790 ret = priv_set_mtu(priv, mtu); 791 if (ret) 792 goto out; 793 ret = priv_get_mtu(priv, &kern_mtu); 794 if (ret) 795 goto out; 796 if (kern_mtu == mtu) { 797 priv->mtu = mtu; 798 DEBUG("adapter port %u MTU set to %u", priv->port, mtu); 799 } 800 priv_unlock(priv); 801 return 0; 802 out: 803 ret = errno; 804 WARN("cannot set port %u MTU to %u: %s", priv->port, mtu, 805 strerror(ret)); 806 priv_unlock(priv); 807 assert(ret >= 0); 808 return -ret; 809 } 810 811 /** 812 * DPDK callback to get flow control status. 813 * 814 * @param dev 815 * Pointer to Ethernet device structure. 816 * @param[out] fc_conf 817 * Flow control output buffer. 818 * 819 * @return 820 * 0 on success, negative errno value on failure. 821 */ 822 int 823 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 824 { 825 struct priv *priv = dev->data->dev_private; 826 struct ifreq ifr; 827 struct ethtool_pauseparam ethpause = { 828 .cmd = ETHTOOL_GPAUSEPARAM 829 }; 830 int ret; 831 832 ifr.ifr_data = (void *)ðpause; 833 priv_lock(priv); 834 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { 835 ret = errno; 836 WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)" 837 " failed: %s", 838 strerror(ret)); 839 goto out; 840 } 841 842 fc_conf->autoneg = ethpause.autoneg; 843 if (ethpause.rx_pause && ethpause.tx_pause) 844 fc_conf->mode = RTE_FC_FULL; 845 else if (ethpause.rx_pause) 846 fc_conf->mode = RTE_FC_RX_PAUSE; 847 else if (ethpause.tx_pause) 848 fc_conf->mode = RTE_FC_TX_PAUSE; 849 else 850 fc_conf->mode = RTE_FC_NONE; 851 ret = 0; 852 853 out: 854 priv_unlock(priv); 855 assert(ret >= 0); 856 return -ret; 857 } 858 859 /** 860 * DPDK callback to modify flow control parameters. 861 * 862 * @param dev 863 * Pointer to Ethernet device structure. 864 * @param[in] fc_conf 865 * Flow control parameters. 866 * 867 * @return 868 * 0 on success, negative errno value on failure. 869 */ 870 int 871 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 872 { 873 struct priv *priv = dev->data->dev_private; 874 struct ifreq ifr; 875 struct ethtool_pauseparam ethpause = { 876 .cmd = ETHTOOL_SPAUSEPARAM 877 }; 878 int ret; 879 880 ifr.ifr_data = (void *)ðpause; 881 ethpause.autoneg = fc_conf->autoneg; 882 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 883 (fc_conf->mode & RTE_FC_RX_PAUSE)) 884 ethpause.rx_pause = 1; 885 else 886 ethpause.rx_pause = 0; 887 888 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 889 (fc_conf->mode & RTE_FC_TX_PAUSE)) 890 ethpause.tx_pause = 1; 891 else 892 ethpause.tx_pause = 0; 893 894 priv_lock(priv); 895 if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { 896 ret = errno; 897 WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 898 " failed: %s", 899 strerror(ret)); 900 goto out; 901 } 902 ret = 0; 903 904 out: 905 priv_unlock(priv); 906 assert(ret >= 0); 907 return -ret; 908 } 909 910 /** 911 * Get PCI information from struct ibv_device. 912 * 913 * @param device 914 * Pointer to Ethernet device structure. 915 * @param[out] pci_addr 916 * PCI bus address output buffer. 917 * 918 * @return 919 * 0 on success, -1 on failure and errno is set. 920 */ 921 int 922 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 923 struct rte_pci_addr *pci_addr) 924 { 925 FILE *file; 926 char line[32]; 927 MKSTR(path, "%s/device/uevent", device->ibdev_path); 928 929 file = fopen(path, "rb"); 930 if (file == NULL) 931 return -1; 932 while (fgets(line, sizeof(line), file) == line) { 933 size_t len = strlen(line); 934 int ret; 935 936 /* Truncate long lines. */ 937 if (len == (sizeof(line) - 1)) 938 while (line[(len - 1)] != '\n') { 939 ret = fgetc(file); 940 if (ret == EOF) 941 break; 942 line[(len - 1)] = ret; 943 } 944 /* Extract information. */ 945 if (sscanf(line, 946 "PCI_SLOT_NAME=" 947 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 948 &pci_addr->domain, 949 &pci_addr->bus, 950 &pci_addr->devid, 951 &pci_addr->function) == 4) { 952 ret = 0; 953 break; 954 } 955 } 956 fclose(file); 957 return 0; 958 } 959 960 /** 961 * Update the link status. 962 * 963 * @param priv 964 * Pointer to private structure. 965 * 966 * @return 967 * Zero if the callback process can be called immediately. 968 */ 969 static int 970 priv_link_status_update(struct priv *priv) 971 { 972 struct rte_eth_link *link = &priv->dev->data->dev_link; 973 974 priv_link_update(priv, 0); 975 if (((link->link_speed == 0) && link->link_status) || 976 ((link->link_speed != 0) && !link->link_status)) { 977 /* 978 * Inconsistent status. Event likely occurred before the 979 * kernel netdevice exposes the new status. 980 */ 981 if (!priv->pending_alarm) { 982 priv->pending_alarm = 1; 983 rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US, 984 mlx5_dev_link_status_handler, 985 priv->dev); 986 } 987 return 1; 988 } else if (unlikely(priv->pending_alarm)) { 989 /* Link interrupt occurred while alarm is already scheduled. */ 990 priv->pending_alarm = 0; 991 rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev); 992 } 993 return 0; 994 } 995 996 /** 997 * Device status handler. 998 * 999 * @param priv 1000 * Pointer to private structure. 1001 * @param events 1002 * Pointer to event flags holder. 1003 * 1004 * @return 1005 * Events bitmap of callback process which can be called immediately. 1006 */ 1007 static uint32_t 1008 priv_dev_status_handler(struct priv *priv) 1009 { 1010 struct ibv_async_event event; 1011 uint32_t ret = 0; 1012 1013 /* Read all message and acknowledge them. */ 1014 for (;;) { 1015 if (mlx5_glue->get_async_event(priv->ctx, &event)) 1016 break; 1017 if ((event.event_type == IBV_EVENT_PORT_ACTIVE || 1018 event.event_type == IBV_EVENT_PORT_ERR) && 1019 (priv->dev->data->dev_conf.intr_conf.lsc == 1)) 1020 ret |= (1 << RTE_ETH_EVENT_INTR_LSC); 1021 else if (event.event_type == IBV_EVENT_DEVICE_FATAL && 1022 priv->dev->data->dev_conf.intr_conf.rmv == 1) 1023 ret |= (1 << RTE_ETH_EVENT_INTR_RMV); 1024 else 1025 DEBUG("event type %d on port %d not handled", 1026 event.event_type, event.element.port_num); 1027 mlx5_glue->ack_async_event(&event); 1028 } 1029 if (ret & (1 << RTE_ETH_EVENT_INTR_LSC)) 1030 if (priv_link_status_update(priv)) 1031 ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC); 1032 return ret; 1033 } 1034 1035 /** 1036 * Handle delayed link status event. 1037 * 1038 * @param arg 1039 * Registered argument. 1040 */ 1041 void 1042 mlx5_dev_link_status_handler(void *arg) 1043 { 1044 struct rte_eth_dev *dev = arg; 1045 struct priv *priv = dev->data->dev_private; 1046 int ret; 1047 1048 while (!priv_trylock(priv)) { 1049 /* Alarm is being canceled. */ 1050 if (priv->pending_alarm == 0) 1051 return; 1052 rte_pause(); 1053 } 1054 priv->pending_alarm = 0; 1055 ret = priv_link_status_update(priv); 1056 priv_unlock(priv); 1057 if (!ret) 1058 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1059 } 1060 1061 /** 1062 * Handle interrupts from the NIC. 1063 * 1064 * @param[in] intr_handle 1065 * Interrupt handler. 1066 * @param cb_arg 1067 * Callback argument. 1068 */ 1069 void 1070 mlx5_dev_interrupt_handler(void *cb_arg) 1071 { 1072 struct rte_eth_dev *dev = cb_arg; 1073 struct priv *priv = dev->data->dev_private; 1074 uint32_t events; 1075 1076 priv_lock(priv); 1077 events = priv_dev_status_handler(priv); 1078 priv_unlock(priv); 1079 if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) 1080 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1081 if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) 1082 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL); 1083 } 1084 1085 /** 1086 * Handle interrupts from the socket. 1087 * 1088 * @param cb_arg 1089 * Callback argument. 1090 */ 1091 static void 1092 mlx5_dev_handler_socket(void *cb_arg) 1093 { 1094 struct rte_eth_dev *dev = cb_arg; 1095 struct priv *priv = dev->data->dev_private; 1096 1097 priv_lock(priv); 1098 priv_socket_handle(priv); 1099 priv_unlock(priv); 1100 } 1101 1102 /** 1103 * Uninstall interrupt handler. 1104 * 1105 * @param priv 1106 * Pointer to private structure. 1107 * @param dev 1108 * Pointer to the rte_eth_dev structure. 1109 */ 1110 void 1111 priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) 1112 { 1113 if (dev->data->dev_conf.intr_conf.lsc || 1114 dev->data->dev_conf.intr_conf.rmv) 1115 rte_intr_callback_unregister(&priv->intr_handle, 1116 mlx5_dev_interrupt_handler, dev); 1117 if (priv->primary_socket) 1118 rte_intr_callback_unregister(&priv->intr_handle_socket, 1119 mlx5_dev_handler_socket, dev); 1120 if (priv->pending_alarm) { 1121 priv->pending_alarm = 0; 1122 rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); 1123 } 1124 priv->intr_handle.fd = 0; 1125 priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 1126 priv->intr_handle_socket.fd = 0; 1127 priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN; 1128 } 1129 1130 /** 1131 * Install interrupt handler. 1132 * 1133 * @param priv 1134 * Pointer to private structure. 1135 * @param dev 1136 * Pointer to the rte_eth_dev structure. 1137 */ 1138 void 1139 priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) 1140 { 1141 int rc, flags; 1142 1143 assert(priv->ctx->async_fd > 0); 1144 flags = fcntl(priv->ctx->async_fd, F_GETFL); 1145 rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); 1146 if (rc < 0) { 1147 INFO("failed to change file descriptor async event queue"); 1148 dev->data->dev_conf.intr_conf.lsc = 0; 1149 dev->data->dev_conf.intr_conf.rmv = 0; 1150 } 1151 if (dev->data->dev_conf.intr_conf.lsc || 1152 dev->data->dev_conf.intr_conf.rmv) { 1153 priv->intr_handle.fd = priv->ctx->async_fd; 1154 priv->intr_handle.type = RTE_INTR_HANDLE_EXT; 1155 rte_intr_callback_register(&priv->intr_handle, 1156 mlx5_dev_interrupt_handler, dev); 1157 } 1158 1159 rc = priv_socket_init(priv); 1160 if (!rc && priv->primary_socket) { 1161 priv->intr_handle_socket.fd = priv->primary_socket; 1162 priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; 1163 rte_intr_callback_register(&priv->intr_handle_socket, 1164 mlx5_dev_handler_socket, dev); 1165 } 1166 } 1167 1168 /** 1169 * Change the link state (UP / DOWN). 1170 * 1171 * @param priv 1172 * Pointer to private data structure. 1173 * @param up 1174 * Nonzero for link up, otherwise link down. 1175 * 1176 * @return 1177 * 0 on success, errno value on failure. 1178 */ 1179 static int 1180 priv_dev_set_link(struct priv *priv, int up) 1181 { 1182 return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP); 1183 } 1184 1185 /** 1186 * DPDK callback to bring the link DOWN. 1187 * 1188 * @param dev 1189 * Pointer to Ethernet device structure. 1190 * 1191 * @return 1192 * 0 on success, errno value on failure. 1193 */ 1194 int 1195 mlx5_set_link_down(struct rte_eth_dev *dev) 1196 { 1197 struct priv *priv = dev->data->dev_private; 1198 int err; 1199 1200 priv_lock(priv); 1201 err = priv_dev_set_link(priv, 0); 1202 priv_unlock(priv); 1203 return err; 1204 } 1205 1206 /** 1207 * DPDK callback to bring the link UP. 1208 * 1209 * @param dev 1210 * Pointer to Ethernet device structure. 1211 * 1212 * @return 1213 * 0 on success, errno value on failure. 1214 */ 1215 int 1216 mlx5_set_link_up(struct rte_eth_dev *dev) 1217 { 1218 struct priv *priv = dev->data->dev_private; 1219 int err; 1220 1221 priv_lock(priv); 1222 err = priv_dev_set_link(priv, 1); 1223 priv_unlock(priv); 1224 return err; 1225 } 1226 1227 /** 1228 * Configure the TX function to use. 1229 * 1230 * @param priv 1231 * Pointer to private data structure. 1232 * @param dev 1233 * Pointer to rte_eth_dev structure. 1234 * 1235 * @return 1236 * Pointer to selected Tx burst function. 1237 */ 1238 eth_tx_burst_t 1239 priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev) 1240 { 1241 eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; 1242 struct mlx5_dev_config *config = &priv->config; 1243 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 1244 int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 1245 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1246 DEV_TX_OFFLOAD_GRE_TNL_TSO)); 1247 int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); 1248 1249 assert(priv != NULL); 1250 /* Select appropriate TX function. */ 1251 if (vlan_insert || tso) 1252 return tx_pkt_burst; 1253 if (config->mps == MLX5_MPW_ENHANCED) { 1254 if (priv_check_vec_tx_support(priv, dev) > 0) { 1255 if (priv_check_raw_vec_tx_support(priv, dev) > 0) 1256 tx_pkt_burst = mlx5_tx_burst_raw_vec; 1257 else 1258 tx_pkt_burst = mlx5_tx_burst_vec; 1259 DEBUG("selected Enhanced MPW TX vectorized function"); 1260 } else { 1261 tx_pkt_burst = mlx5_tx_burst_empw; 1262 DEBUG("selected Enhanced MPW TX function"); 1263 } 1264 } else if (config->mps && (config->txq_inline > 0)) { 1265 tx_pkt_burst = mlx5_tx_burst_mpw_inline; 1266 DEBUG("selected MPW inline TX function"); 1267 } else if (config->mps) { 1268 tx_pkt_burst = mlx5_tx_burst_mpw; 1269 DEBUG("selected MPW TX function"); 1270 } 1271 return tx_pkt_burst; 1272 } 1273 1274 /** 1275 * Configure the RX function to use. 1276 * 1277 * @param priv 1278 * Pointer to private data structure. 1279 * @param dev 1280 * Pointer to rte_eth_dev structure. 1281 * 1282 * @return 1283 * Pointer to selected Rx burst function. 1284 */ 1285 eth_rx_burst_t 1286 priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev) 1287 { 1288 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; 1289 1290 assert(priv != NULL); 1291 if (priv_check_vec_rx_support(priv) > 0) { 1292 rx_pkt_burst = mlx5_rx_burst_vec; 1293 DEBUG("selected RX vectorized function"); 1294 } 1295 return rx_pkt_burst; 1296 } 1297 1298 /** 1299 * Check if mlx5 device was removed. 1300 * 1301 * @param dev 1302 * Pointer to Ethernet device structure. 1303 * 1304 * @return 1305 * 1 when device is removed, otherwise 0. 1306 */ 1307 int 1308 mlx5_is_removed(struct rte_eth_dev *dev) 1309 { 1310 struct ibv_device_attr device_attr; 1311 struct priv *priv = dev->data->dev_private; 1312 1313 if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO) 1314 return 1; 1315 return 0; 1316 } 1317