1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #define _GNU_SOURCE 7 8 #include <stddef.h> 9 #include <assert.h> 10 #include <inttypes.h> 11 #include <unistd.h> 12 #include <stdint.h> 13 #include <stdio.h> 14 #include <string.h> 15 #include <stdlib.h> 16 #include <errno.h> 17 #include <dirent.h> 18 #include <net/if.h> 19 #include <sys/ioctl.h> 20 #include <sys/socket.h> 21 #include <netinet/in.h> 22 #include <linux/ethtool.h> 23 #include <linux/sockios.h> 24 #include <fcntl.h> 25 #include <stdalign.h> 26 #include <sys/un.h> 27 #include <time.h> 28 29 #include <rte_atomic.h> 30 #include <rte_ethdev_driver.h> 31 #include <rte_bus_pci.h> 32 #include <rte_mbuf.h> 33 #include <rte_common.h> 34 #include <rte_interrupts.h> 35 #include <rte_malloc.h> 36 #include <rte_string_fns.h> 37 #include <rte_rwlock.h> 38 39 #include "mlx5.h" 40 #include "mlx5_glue.h" 41 #include "mlx5_rxtx.h" 42 #include "mlx5_utils.h" 43 44 /* Supported speed values found in /usr/include/linux/ethtool.h */ 45 #ifndef HAVE_SUPPORTED_40000baseKR4_Full 46 #define SUPPORTED_40000baseKR4_Full (1 << 23) 47 #endif 48 #ifndef HAVE_SUPPORTED_40000baseCR4_Full 49 #define SUPPORTED_40000baseCR4_Full (1 << 24) 50 #endif 51 #ifndef HAVE_SUPPORTED_40000baseSR4_Full 52 #define SUPPORTED_40000baseSR4_Full (1 << 25) 53 #endif 54 #ifndef HAVE_SUPPORTED_40000baseLR4_Full 55 #define SUPPORTED_40000baseLR4_Full (1 << 26) 56 #endif 57 #ifndef HAVE_SUPPORTED_56000baseKR4_Full 58 #define SUPPORTED_56000baseKR4_Full (1 << 27) 59 #endif 60 #ifndef HAVE_SUPPORTED_56000baseCR4_Full 61 #define SUPPORTED_56000baseCR4_Full (1 << 28) 62 #endif 63 #ifndef HAVE_SUPPORTED_56000baseSR4_Full 64 #define SUPPORTED_56000baseSR4_Full (1 << 29) 65 #endif 66 #ifndef HAVE_SUPPORTED_56000baseLR4_Full 67 #define SUPPORTED_56000baseLR4_Full (1 << 30) 68 #endif 69 70 /* Add defines in case the running kernel is not the same as user headers. */ 71 #ifndef ETHTOOL_GLINKSETTINGS 72 struct ethtool_link_settings { 73 uint32_t cmd; 74 uint32_t speed; 75 uint8_t duplex; 76 uint8_t port; 77 uint8_t phy_address; 78 uint8_t autoneg; 79 uint8_t mdio_support; 80 uint8_t eth_to_mdix; 81 uint8_t eth_tp_mdix_ctrl; 82 int8_t link_mode_masks_nwords; 83 uint32_t reserved[8]; 84 uint32_t link_mode_masks[]; 85 }; 86 87 #define ETHTOOL_GLINKSETTINGS 0x0000004c 88 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 89 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 90 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 91 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 92 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 93 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 94 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 95 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 96 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 97 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 98 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 99 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 100 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 101 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 102 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 103 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 104 #endif 105 #ifndef HAVE_ETHTOOL_LINK_MODE_25G 106 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 107 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 108 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 109 #endif 110 #ifndef HAVE_ETHTOOL_LINK_MODE_50G 111 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 112 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 113 #endif 114 #ifndef HAVE_ETHTOOL_LINK_MODE_100G 115 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 116 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 117 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 118 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 119 #endif 120 121 /** 122 * Get master interface name from private structure. 123 * 124 * @param[in] dev 125 * Pointer to Ethernet device. 126 * @param[out] ifname 127 * Interface name output buffer. 128 * 129 * @return 130 * 0 on success, a negative errno value otherwise and rte_errno is set. 131 */ 132 int 133 mlx5_get_master_ifname(const struct rte_eth_dev *dev, 134 char (*ifname)[IF_NAMESIZE]) 135 { 136 struct priv *priv = dev->data->dev_private; 137 DIR *dir; 138 struct dirent *dent; 139 unsigned int dev_type = 0; 140 unsigned int dev_port_prev = ~0u; 141 char match[IF_NAMESIZE] = ""; 142 143 { 144 MKSTR(path, "%s/device/net", priv->ibdev_path); 145 146 dir = opendir(path); 147 if (dir == NULL) { 148 rte_errno = errno; 149 return -rte_errno; 150 } 151 } 152 while ((dent = readdir(dir)) != NULL) { 153 char *name = dent->d_name; 154 FILE *file; 155 unsigned int dev_port; 156 int r; 157 158 if ((name[0] == '.') && 159 ((name[1] == '\0') || 160 ((name[1] == '.') && (name[2] == '\0')))) 161 continue; 162 163 MKSTR(path, "%s/device/net/%s/%s", 164 priv->ibdev_path, name, 165 (dev_type ? "dev_id" : "dev_port")); 166 167 file = fopen(path, "rb"); 168 if (file == NULL) { 169 if (errno != ENOENT) 170 continue; 171 /* 172 * Switch to dev_id when dev_port does not exist as 173 * is the case with Linux kernel versions < 3.15. 174 */ 175 try_dev_id: 176 match[0] = '\0'; 177 if (dev_type) 178 break; 179 dev_type = 1; 180 dev_port_prev = ~0u; 181 rewinddir(dir); 182 continue; 183 } 184 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); 185 fclose(file); 186 if (r != 1) 187 continue; 188 /* 189 * Switch to dev_id when dev_port returns the same value for 190 * all ports. May happen when using a MOFED release older than 191 * 3.0 with a Linux kernel >= 3.15. 192 */ 193 if (dev_port == dev_port_prev) 194 goto try_dev_id; 195 dev_port_prev = dev_port; 196 if (dev_port == 0) 197 strlcpy(match, name, sizeof(match)); 198 } 199 closedir(dir); 200 if (match[0] == '\0') { 201 rte_errno = ENOENT; 202 return -rte_errno; 203 } 204 strncpy(*ifname, match, sizeof(*ifname)); 205 return 0; 206 } 207 208 /** 209 * Get interface name from private structure. 210 * 211 * This is a port representor-aware version of mlx5_get_master_ifname(). 212 * 213 * @param[in] dev 214 * Pointer to Ethernet device. 215 * @param[out] ifname 216 * Interface name output buffer. 217 * 218 * @return 219 * 0 on success, a negative errno value otherwise and rte_errno is set. 220 */ 221 int 222 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) 223 { 224 struct priv *priv = dev->data->dev_private; 225 unsigned int ifindex = 226 priv->nl_socket_rdma >= 0 ? 227 mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0; 228 229 if (!ifindex) { 230 if (!priv->representor) 231 return mlx5_get_master_ifname(dev, ifname); 232 rte_errno = ENXIO; 233 return -rte_errno; 234 } 235 if (if_indextoname(ifindex, &(*ifname)[0])) 236 return 0; 237 rte_errno = errno; 238 return -rte_errno; 239 } 240 241 /** 242 * Get the interface index from device name. 243 * 244 * @param[in] dev 245 * Pointer to Ethernet device. 246 * 247 * @return 248 * Nonzero interface index on success, zero otherwise and rte_errno is set. 249 */ 250 unsigned int 251 mlx5_ifindex(const struct rte_eth_dev *dev) 252 { 253 char ifname[IF_NAMESIZE]; 254 unsigned int ifindex; 255 256 if (mlx5_get_ifname(dev, &ifname)) 257 return 0; 258 ifindex = if_nametoindex(ifname); 259 if (!ifindex) 260 rte_errno = errno; 261 return ifindex; 262 } 263 264 /** 265 * Perform ifreq ioctl() on associated Ethernet device. 266 * 267 * @param[in] dev 268 * Pointer to Ethernet device. 269 * @param req 270 * Request number to pass to ioctl(). 271 * @param[out] ifr 272 * Interface request structure output buffer. 273 * @param master 274 * When device is a port representor, perform request on master device 275 * instead. 276 * 277 * @return 278 * 0 on success, a negative errno value otherwise and rte_errno is set. 279 */ 280 int 281 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr, 282 int master) 283 { 284 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 285 int ret = 0; 286 287 if (sock == -1) { 288 rte_errno = errno; 289 return -rte_errno; 290 } 291 if (master) 292 ret = mlx5_get_master_ifname(dev, &ifr->ifr_name); 293 else 294 ret = mlx5_get_ifname(dev, &ifr->ifr_name); 295 if (ret) 296 goto error; 297 ret = ioctl(sock, req, ifr); 298 if (ret == -1) { 299 rte_errno = errno; 300 goto error; 301 } 302 close(sock); 303 return 0; 304 error: 305 close(sock); 306 return -rte_errno; 307 } 308 309 /** 310 * Get device MTU. 311 * 312 * @param dev 313 * Pointer to Ethernet device. 314 * @param[out] mtu 315 * MTU value output buffer. 316 * 317 * @return 318 * 0 on success, a negative errno value otherwise and rte_errno is set. 319 */ 320 int 321 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 322 { 323 struct ifreq request; 324 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request, 0); 325 326 if (ret) 327 return ret; 328 *mtu = request.ifr_mtu; 329 return 0; 330 } 331 332 /** 333 * Set device MTU. 334 * 335 * @param dev 336 * Pointer to Ethernet device. 337 * @param mtu 338 * MTU value to set. 339 * 340 * @return 341 * 0 on success, a negative errno value otherwise and rte_errno is set. 342 */ 343 static int 344 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 345 { 346 struct ifreq request = { .ifr_mtu = mtu, }; 347 348 return mlx5_ifreq(dev, SIOCSIFMTU, &request, 0); 349 } 350 351 /** 352 * Set device flags. 353 * 354 * @param dev 355 * Pointer to Ethernet device. 356 * @param keep 357 * Bitmask for flags that must remain untouched. 358 * @param flags 359 * Bitmask for flags to modify. 360 * 361 * @return 362 * 0 on success, a negative errno value otherwise and rte_errno is set. 363 */ 364 int 365 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 366 { 367 struct ifreq request; 368 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request, 0); 369 370 if (ret) 371 return ret; 372 request.ifr_flags &= keep; 373 request.ifr_flags |= flags & ~keep; 374 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request, 0); 375 } 376 377 /** 378 * DPDK callback for Ethernet device configuration. 379 * 380 * @param dev 381 * Pointer to Ethernet device structure. 382 * 383 * @return 384 * 0 on success, a negative errno value otherwise and rte_errno is set. 385 */ 386 int 387 mlx5_dev_configure(struct rte_eth_dev *dev) 388 { 389 struct priv *priv = dev->data->dev_private; 390 unsigned int rxqs_n = dev->data->nb_rx_queues; 391 unsigned int txqs_n = dev->data->nb_tx_queues; 392 unsigned int i; 393 unsigned int j; 394 unsigned int reta_idx_n; 395 const uint8_t use_app_rss_key = 396 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; 397 int ret = 0; 398 399 if (use_app_rss_key && 400 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != 401 MLX5_RSS_HASH_KEY_LEN)) { 402 DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long", 403 dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN)); 404 rte_errno = EINVAL; 405 return -rte_errno; 406 } 407 priv->rss_conf.rss_key = 408 rte_realloc(priv->rss_conf.rss_key, 409 MLX5_RSS_HASH_KEY_LEN, 0); 410 if (!priv->rss_conf.rss_key) { 411 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", 412 dev->data->port_id, rxqs_n); 413 rte_errno = ENOMEM; 414 return -rte_errno; 415 } 416 memcpy(priv->rss_conf.rss_key, 417 use_app_rss_key ? 418 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : 419 rss_hash_default_key, 420 MLX5_RSS_HASH_KEY_LEN); 421 priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN; 422 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; 423 priv->rxqs = (void *)dev->data->rx_queues; 424 priv->txqs = (void *)dev->data->tx_queues; 425 if (txqs_n != priv->txqs_n) { 426 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", 427 dev->data->port_id, priv->txqs_n, txqs_n); 428 priv->txqs_n = txqs_n; 429 } 430 if (rxqs_n > priv->config.ind_table_max_size) { 431 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", 432 dev->data->port_id, rxqs_n); 433 rte_errno = EINVAL; 434 return -rte_errno; 435 } 436 if (rxqs_n == priv->rxqs_n) 437 return 0; 438 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", 439 dev->data->port_id, priv->rxqs_n, rxqs_n); 440 priv->rxqs_n = rxqs_n; 441 /* If the requested number of RX queues is not a power of two, use the 442 * maximum indirection table size for better balancing. 443 * The result is always rounded to the next power of two. */ 444 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? 445 priv->config.ind_table_max_size : 446 rxqs_n)); 447 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); 448 if (ret) 449 return ret; 450 /* When the number of RX queues is not a power of two, the remaining 451 * table entries are padded with reused WQs and hashes are not spread 452 * uniformly. */ 453 for (i = 0, j = 0; (i != reta_idx_n); ++i) { 454 (*priv->reta_idx)[i] = j; 455 if (++j == rxqs_n) 456 j = 0; 457 } 458 return 0; 459 } 460 461 /** 462 * Sets default tuning parameters. 463 * 464 * @param dev 465 * Pointer to Ethernet device. 466 * @param[out] info 467 * Info structure output buffer. 468 */ 469 static void 470 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 471 { 472 struct priv *priv = dev->data->dev_private; 473 474 /* Minimum CPU utilization. */ 475 info->default_rxportconf.ring_size = 256; 476 info->default_txportconf.ring_size = 256; 477 info->default_rxportconf.burst_size = 64; 478 info->default_txportconf.burst_size = 64; 479 if (priv->link_speed_capa & ETH_LINK_SPEED_100G) { 480 info->default_rxportconf.nb_queues = 16; 481 info->default_txportconf.nb_queues = 16; 482 if (dev->data->nb_rx_queues > 2 || 483 dev->data->nb_tx_queues > 2) { 484 /* Max Throughput. */ 485 info->default_rxportconf.ring_size = 2048; 486 info->default_txportconf.ring_size = 2048; 487 } 488 } else { 489 info->default_rxportconf.nb_queues = 8; 490 info->default_txportconf.nb_queues = 8; 491 if (dev->data->nb_rx_queues > 2 || 492 dev->data->nb_tx_queues > 2) { 493 /* Max Throughput. */ 494 info->default_rxportconf.ring_size = 4096; 495 info->default_txportconf.ring_size = 4096; 496 } 497 } 498 } 499 500 /** 501 * DPDK callback to get information about the device. 502 * 503 * @param dev 504 * Pointer to Ethernet device structure. 505 * @param[out] info 506 * Info structure output buffer. 507 */ 508 void 509 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 510 { 511 struct priv *priv = dev->data->dev_private; 512 struct mlx5_dev_config *config = &priv->config; 513 unsigned int max; 514 char ifname[IF_NAMESIZE]; 515 516 /* FIXME: we should ask the device for these values. */ 517 info->min_rx_bufsize = 32; 518 info->max_rx_pktlen = 65536; 519 /* 520 * Since we need one CQ per QP, the limit is the minimum number 521 * between the two values. 522 */ 523 max = RTE_MIN(priv->device_attr.orig_attr.max_cq, 524 priv->device_attr.orig_attr.max_qp); 525 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ 526 if (max >= 65535) 527 max = 65535; 528 info->max_rx_queues = max; 529 info->max_tx_queues = max; 530 info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; 531 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); 532 info->rx_offload_capa = (mlx5_get_rx_port_offloads() | 533 info->rx_queue_offload_capa); 534 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); 535 if (mlx5_get_ifname(dev, &ifname) == 0) 536 info->if_index = if_nametoindex(ifname); 537 info->reta_size = priv->reta_idx_n ? 538 priv->reta_idx_n : config->ind_table_max_size; 539 info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; 540 info->speed_capa = priv->link_speed_capa; 541 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; 542 mlx5_set_default_params(dev, info); 543 info->switch_info.name = dev->data->name; 544 info->switch_info.domain_id = priv->domain_id; 545 info->switch_info.port_id = priv->representor_id; 546 if (priv->representor) { 547 unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); 548 uint16_t port_id[i]; 549 550 i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); 551 while (i--) { 552 struct priv *opriv = 553 rte_eth_devices[port_id[i]].data->dev_private; 554 555 if (!opriv || 556 opriv->representor || 557 opriv->domain_id != priv->domain_id) 558 continue; 559 /* 560 * Override switch name with that of the master 561 * device. 562 */ 563 info->switch_info.name = opriv->dev_data->name; 564 break; 565 } 566 } 567 } 568 569 /** 570 * Get supported packet types. 571 * 572 * @param dev 573 * Pointer to Ethernet device structure. 574 * 575 * @return 576 * A pointer to the supported Packet types array. 577 */ 578 const uint32_t * 579 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) 580 { 581 static const uint32_t ptypes[] = { 582 /* refers to rxq_cq_to_pkt_type() */ 583 RTE_PTYPE_L2_ETHER, 584 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 585 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 586 RTE_PTYPE_L4_NONFRAG, 587 RTE_PTYPE_L4_FRAG, 588 RTE_PTYPE_L4_TCP, 589 RTE_PTYPE_L4_UDP, 590 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 591 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 592 RTE_PTYPE_INNER_L4_NONFRAG, 593 RTE_PTYPE_INNER_L4_FRAG, 594 RTE_PTYPE_INNER_L4_TCP, 595 RTE_PTYPE_INNER_L4_UDP, 596 RTE_PTYPE_UNKNOWN 597 }; 598 599 if (dev->rx_pkt_burst == mlx5_rx_burst || 600 dev->rx_pkt_burst == mlx5_rx_burst_mprq || 601 dev->rx_pkt_burst == mlx5_rx_burst_vec) 602 return ptypes; 603 return NULL; 604 } 605 606 /** 607 * DPDK callback to retrieve physical link information. 608 * 609 * @param dev 610 * Pointer to Ethernet device structure. 611 * @param[out] link 612 * Storage for current link status. 613 * 614 * @return 615 * 0 on success, a negative errno value otherwise and rte_errno is set. 616 */ 617 static int 618 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, 619 struct rte_eth_link *link) 620 { 621 struct priv *priv = dev->data->dev_private; 622 struct ethtool_cmd edata = { 623 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 624 }; 625 struct ifreq ifr; 626 struct rte_eth_link dev_link; 627 int link_speed = 0; 628 int ret; 629 630 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1); 631 if (ret) { 632 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 633 dev->data->port_id, strerror(rte_errno)); 634 return ret; 635 } 636 memset(&dev_link, 0, sizeof(dev_link)); 637 dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && 638 (ifr.ifr_flags & IFF_RUNNING)); 639 ifr.ifr_data = (void *)&edata; 640 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); 641 if (ret) { 642 DRV_LOG(WARNING, 643 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", 644 dev->data->port_id, strerror(rte_errno)); 645 return ret; 646 } 647 link_speed = ethtool_cmd_speed(&edata); 648 if (link_speed == -1) 649 dev_link.link_speed = ETH_SPEED_NUM_NONE; 650 else 651 dev_link.link_speed = link_speed; 652 priv->link_speed_capa = 0; 653 if (edata.supported & SUPPORTED_Autoneg) 654 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 655 if (edata.supported & (SUPPORTED_1000baseT_Full | 656 SUPPORTED_1000baseKX_Full)) 657 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 658 if (edata.supported & SUPPORTED_10000baseKR_Full) 659 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 660 if (edata.supported & (SUPPORTED_40000baseKR4_Full | 661 SUPPORTED_40000baseCR4_Full | 662 SUPPORTED_40000baseSR4_Full | 663 SUPPORTED_40000baseLR4_Full)) 664 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 665 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 666 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 667 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 668 ETH_LINK_SPEED_FIXED); 669 if ((dev_link.link_speed && !dev_link.link_status) || 670 (!dev_link.link_speed && dev_link.link_status)) { 671 rte_errno = EAGAIN; 672 return -rte_errno; 673 } 674 *link = dev_link; 675 return 0; 676 } 677 678 /** 679 * Retrieve physical link information (unlocked version using new ioctl). 680 * 681 * @param dev 682 * Pointer to Ethernet device structure. 683 * @param[out] link 684 * Storage for current link status. 685 * 686 * @return 687 * 0 on success, a negative errno value otherwise and rte_errno is set. 688 */ 689 static int 690 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, 691 struct rte_eth_link *link) 692 693 { 694 struct priv *priv = dev->data->dev_private; 695 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 696 struct ifreq ifr; 697 struct rte_eth_link dev_link; 698 uint64_t sc; 699 int ret; 700 701 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1); 702 if (ret) { 703 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 704 dev->data->port_id, strerror(rte_errno)); 705 return ret; 706 } 707 memset(&dev_link, 0, sizeof(dev_link)); 708 dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && 709 (ifr.ifr_flags & IFF_RUNNING)); 710 ifr.ifr_data = (void *)&gcmd; 711 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); 712 if (ret) { 713 DRV_LOG(DEBUG, 714 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" 715 " failed: %s", 716 dev->data->port_id, strerror(rte_errno)); 717 return ret; 718 } 719 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 720 721 alignas(struct ethtool_link_settings) 722 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 723 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 724 struct ethtool_link_settings *ecmd = (void *)data; 725 726 *ecmd = gcmd; 727 ifr.ifr_data = (void *)ecmd; 728 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); 729 if (ret) { 730 DRV_LOG(DEBUG, 731 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" 732 " failed: %s", 733 dev->data->port_id, strerror(rte_errno)); 734 return ret; 735 } 736 dev_link.link_speed = ecmd->speed; 737 sc = ecmd->link_mode_masks[0] | 738 ((uint64_t)ecmd->link_mode_masks[1] << 32); 739 priv->link_speed_capa = 0; 740 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) 741 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 742 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 743 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 744 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 745 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 746 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 747 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 748 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 749 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 750 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 751 priv->link_speed_capa |= ETH_LINK_SPEED_20G; 752 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 753 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 754 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 755 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 756 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 757 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 758 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 759 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 760 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 761 priv->link_speed_capa |= ETH_LINK_SPEED_56G; 762 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 763 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 764 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 765 priv->link_speed_capa |= ETH_LINK_SPEED_25G; 766 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 767 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 768 priv->link_speed_capa |= ETH_LINK_SPEED_50G; 769 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 770 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 771 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 772 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 773 priv->link_speed_capa |= ETH_LINK_SPEED_100G; 774 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 775 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 776 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 777 ETH_LINK_SPEED_FIXED); 778 if ((dev_link.link_speed && !dev_link.link_status) || 779 (!dev_link.link_speed && dev_link.link_status)) { 780 rte_errno = EAGAIN; 781 return -rte_errno; 782 } 783 *link = dev_link; 784 return 0; 785 } 786 787 /** 788 * DPDK callback to retrieve physical link information. 789 * 790 * @param dev 791 * Pointer to Ethernet device structure. 792 * @param wait_to_complete 793 * Wait for request completion. 794 * 795 * @return 796 * 0 if link status was not updated, positive if it was, a negative errno 797 * value otherwise and rte_errno is set. 798 */ 799 int 800 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) 801 { 802 int ret; 803 struct rte_eth_link dev_link; 804 time_t start_time = time(NULL); 805 806 do { 807 ret = mlx5_link_update_unlocked_gs(dev, &dev_link); 808 if (ret) 809 ret = mlx5_link_update_unlocked_gset(dev, &dev_link); 810 if (ret == 0) 811 break; 812 /* Handle wait to complete situation. */ 813 if (wait_to_complete && ret == -EAGAIN) { 814 if (abs((int)difftime(time(NULL), start_time)) < 815 MLX5_LINK_STATUS_TIMEOUT) { 816 usleep(0); 817 continue; 818 } else { 819 rte_errno = EBUSY; 820 return -rte_errno; 821 } 822 } else if (ret < 0) { 823 return ret; 824 } 825 } while (wait_to_complete); 826 ret = !!memcmp(&dev->data->dev_link, &dev_link, 827 sizeof(struct rte_eth_link)); 828 dev->data->dev_link = dev_link; 829 return ret; 830 } 831 832 /** 833 * DPDK callback to change the MTU. 834 * 835 * @param dev 836 * Pointer to Ethernet device structure. 837 * @param in_mtu 838 * New MTU. 839 * 840 * @return 841 * 0 on success, a negative errno value otherwise and rte_errno is set. 842 */ 843 int 844 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 845 { 846 struct priv *priv = dev->data->dev_private; 847 uint16_t kern_mtu = 0; 848 int ret; 849 850 ret = mlx5_get_mtu(dev, &kern_mtu); 851 if (ret) 852 return ret; 853 /* Set kernel interface MTU first. */ 854 ret = mlx5_set_mtu(dev, mtu); 855 if (ret) 856 return ret; 857 ret = mlx5_get_mtu(dev, &kern_mtu); 858 if (ret) 859 return ret; 860 if (kern_mtu == mtu) { 861 priv->mtu = mtu; 862 DRV_LOG(DEBUG, "port %u adapter MTU set to %u", 863 dev->data->port_id, mtu); 864 return 0; 865 } 866 rte_errno = EAGAIN; 867 return -rte_errno; 868 } 869 870 /** 871 * DPDK callback to get flow control status. 872 * 873 * @param dev 874 * Pointer to Ethernet device structure. 875 * @param[out] fc_conf 876 * Flow control output buffer. 877 * 878 * @return 879 * 0 on success, a negative errno value otherwise and rte_errno is set. 880 */ 881 int 882 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 883 { 884 struct ifreq ifr; 885 struct ethtool_pauseparam ethpause = { 886 .cmd = ETHTOOL_GPAUSEPARAM 887 }; 888 int ret; 889 890 ifr.ifr_data = (void *)ðpause; 891 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1); 892 if (ret) { 893 DRV_LOG(WARNING, 894 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 895 " %s", 896 dev->data->port_id, strerror(rte_errno)); 897 return ret; 898 } 899 fc_conf->autoneg = ethpause.autoneg; 900 if (ethpause.rx_pause && ethpause.tx_pause) 901 fc_conf->mode = RTE_FC_FULL; 902 else if (ethpause.rx_pause) 903 fc_conf->mode = RTE_FC_RX_PAUSE; 904 else if (ethpause.tx_pause) 905 fc_conf->mode = RTE_FC_TX_PAUSE; 906 else 907 fc_conf->mode = RTE_FC_NONE; 908 return 0; 909 } 910 911 /** 912 * DPDK callback to modify flow control parameters. 913 * 914 * @param dev 915 * Pointer to Ethernet device structure. 916 * @param[in] fc_conf 917 * Flow control parameters. 918 * 919 * @return 920 * 0 on success, a negative errno value otherwise and rte_errno is set. 921 */ 922 int 923 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 924 { 925 struct ifreq ifr; 926 struct ethtool_pauseparam ethpause = { 927 .cmd = ETHTOOL_SPAUSEPARAM 928 }; 929 int ret; 930 931 ifr.ifr_data = (void *)ðpause; 932 ethpause.autoneg = fc_conf->autoneg; 933 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 934 (fc_conf->mode & RTE_FC_RX_PAUSE)) 935 ethpause.rx_pause = 1; 936 else 937 ethpause.rx_pause = 0; 938 939 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 940 (fc_conf->mode & RTE_FC_TX_PAUSE)) 941 ethpause.tx_pause = 1; 942 else 943 ethpause.tx_pause = 0; 944 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 0); 945 if (ret) { 946 DRV_LOG(WARNING, 947 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 948 " failed: %s", 949 dev->data->port_id, strerror(rte_errno)); 950 return ret; 951 } 952 return 0; 953 } 954 955 /** 956 * Get PCI information from struct ibv_device. 957 * 958 * @param device 959 * Pointer to Ethernet device structure. 960 * @param[out] pci_addr 961 * PCI bus address output buffer. 962 * 963 * @return 964 * 0 on success, a negative errno value otherwise and rte_errno is set. 965 */ 966 int 967 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 968 struct rte_pci_addr *pci_addr) 969 { 970 FILE *file; 971 char line[32]; 972 MKSTR(path, "%s/device/uevent", device->ibdev_path); 973 974 file = fopen(path, "rb"); 975 if (file == NULL) { 976 rte_errno = errno; 977 return -rte_errno; 978 } 979 while (fgets(line, sizeof(line), file) == line) { 980 size_t len = strlen(line); 981 int ret; 982 983 /* Truncate long lines. */ 984 if (len == (sizeof(line) - 1)) 985 while (line[(len - 1)] != '\n') { 986 ret = fgetc(file); 987 if (ret == EOF) 988 break; 989 line[(len - 1)] = ret; 990 } 991 /* Extract information. */ 992 if (sscanf(line, 993 "PCI_SLOT_NAME=" 994 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 995 &pci_addr->domain, 996 &pci_addr->bus, 997 &pci_addr->devid, 998 &pci_addr->function) == 4) { 999 ret = 0; 1000 break; 1001 } 1002 } 1003 fclose(file); 1004 return 0; 1005 } 1006 1007 /** 1008 * Device status handler. 1009 * 1010 * @param dev 1011 * Pointer to Ethernet device. 1012 * @param events 1013 * Pointer to event flags holder. 1014 * 1015 * @return 1016 * Events bitmap of callback process which can be called immediately. 1017 */ 1018 static uint32_t 1019 mlx5_dev_status_handler(struct rte_eth_dev *dev) 1020 { 1021 struct priv *priv = dev->data->dev_private; 1022 struct ibv_async_event event; 1023 uint32_t ret = 0; 1024 1025 if (mlx5_link_update(dev, 0) == -EAGAIN) { 1026 usleep(0); 1027 return 0; 1028 } 1029 /* Read all message and acknowledge them. */ 1030 for (;;) { 1031 if (mlx5_glue->get_async_event(priv->ctx, &event)) 1032 break; 1033 if ((event.event_type == IBV_EVENT_PORT_ACTIVE || 1034 event.event_type == IBV_EVENT_PORT_ERR) && 1035 (dev->data->dev_conf.intr_conf.lsc == 1)) 1036 ret |= (1 << RTE_ETH_EVENT_INTR_LSC); 1037 else if (event.event_type == IBV_EVENT_DEVICE_FATAL && 1038 dev->data->dev_conf.intr_conf.rmv == 1) 1039 ret |= (1 << RTE_ETH_EVENT_INTR_RMV); 1040 else 1041 DRV_LOG(DEBUG, 1042 "port %u event type %d on not handled", 1043 dev->data->port_id, event.event_type); 1044 mlx5_glue->ack_async_event(&event); 1045 } 1046 return ret; 1047 } 1048 1049 /** 1050 * Handle interrupts from the NIC. 1051 * 1052 * @param[in] intr_handle 1053 * Interrupt handler. 1054 * @param cb_arg 1055 * Callback argument. 1056 */ 1057 void 1058 mlx5_dev_interrupt_handler(void *cb_arg) 1059 { 1060 struct rte_eth_dev *dev = cb_arg; 1061 uint32_t events; 1062 1063 events = mlx5_dev_status_handler(dev); 1064 if (events & (1 << RTE_ETH_EVENT_INTR_LSC)) 1065 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1066 if (events & (1 << RTE_ETH_EVENT_INTR_RMV)) 1067 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL); 1068 } 1069 1070 /** 1071 * Handle interrupts from the socket. 1072 * 1073 * @param cb_arg 1074 * Callback argument. 1075 */ 1076 static void 1077 mlx5_dev_handler_socket(void *cb_arg) 1078 { 1079 struct rte_eth_dev *dev = cb_arg; 1080 1081 mlx5_socket_handle(dev); 1082 } 1083 1084 /** 1085 * Uninstall interrupt handler. 1086 * 1087 * @param dev 1088 * Pointer to Ethernet device. 1089 */ 1090 void 1091 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) 1092 { 1093 struct priv *priv = dev->data->dev_private; 1094 1095 if (dev->data->dev_conf.intr_conf.lsc || 1096 dev->data->dev_conf.intr_conf.rmv) 1097 rte_intr_callback_unregister(&priv->intr_handle, 1098 mlx5_dev_interrupt_handler, dev); 1099 if (priv->primary_socket) 1100 rte_intr_callback_unregister(&priv->intr_handle_socket, 1101 mlx5_dev_handler_socket, dev); 1102 priv->intr_handle.fd = 0; 1103 priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 1104 priv->intr_handle_socket.fd = 0; 1105 priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN; 1106 } 1107 1108 /** 1109 * Install interrupt handler. 1110 * 1111 * @param dev 1112 * Pointer to Ethernet device. 1113 */ 1114 void 1115 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) 1116 { 1117 struct priv *priv = dev->data->dev_private; 1118 int ret; 1119 int flags; 1120 1121 assert(priv->ctx->async_fd > 0); 1122 flags = fcntl(priv->ctx->async_fd, F_GETFL); 1123 ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); 1124 if (ret) { 1125 DRV_LOG(INFO, 1126 "port %u failed to change file descriptor async event" 1127 " queue", 1128 dev->data->port_id); 1129 dev->data->dev_conf.intr_conf.lsc = 0; 1130 dev->data->dev_conf.intr_conf.rmv = 0; 1131 } 1132 if (dev->data->dev_conf.intr_conf.lsc || 1133 dev->data->dev_conf.intr_conf.rmv) { 1134 priv->intr_handle.fd = priv->ctx->async_fd; 1135 priv->intr_handle.type = RTE_INTR_HANDLE_EXT; 1136 rte_intr_callback_register(&priv->intr_handle, 1137 mlx5_dev_interrupt_handler, dev); 1138 } 1139 ret = mlx5_socket_init(dev); 1140 if (ret) 1141 DRV_LOG(ERR, "port %u cannot initialise socket: %s", 1142 dev->data->port_id, strerror(rte_errno)); 1143 else if (priv->primary_socket) { 1144 priv->intr_handle_socket.fd = priv->primary_socket; 1145 priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; 1146 rte_intr_callback_register(&priv->intr_handle_socket, 1147 mlx5_dev_handler_socket, dev); 1148 } 1149 } 1150 1151 /** 1152 * DPDK callback to bring the link DOWN. 1153 * 1154 * @param dev 1155 * Pointer to Ethernet device structure. 1156 * 1157 * @return 1158 * 0 on success, a negative errno value otherwise and rte_errno is set. 1159 */ 1160 int 1161 mlx5_set_link_down(struct rte_eth_dev *dev) 1162 { 1163 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 1164 } 1165 1166 /** 1167 * DPDK callback to bring the link UP. 1168 * 1169 * @param dev 1170 * Pointer to Ethernet device structure. 1171 * 1172 * @return 1173 * 0 on success, a negative errno value otherwise and rte_errno is set. 1174 */ 1175 int 1176 mlx5_set_link_up(struct rte_eth_dev *dev) 1177 { 1178 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 1179 } 1180 1181 /** 1182 * Configure the TX function to use. 1183 * 1184 * @param dev 1185 * Pointer to private data structure. 1186 * 1187 * @return 1188 * Pointer to selected Tx burst function. 1189 */ 1190 eth_tx_burst_t 1191 mlx5_select_tx_function(struct rte_eth_dev *dev) 1192 { 1193 struct priv *priv = dev->data->dev_private; 1194 eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; 1195 struct mlx5_dev_config *config = &priv->config; 1196 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 1197 int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 1198 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1199 DEV_TX_OFFLOAD_GRE_TNL_TSO | 1200 DEV_TX_OFFLOAD_IP_TNL_TSO | 1201 DEV_TX_OFFLOAD_UDP_TNL_TSO)); 1202 int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | 1203 DEV_TX_OFFLOAD_UDP_TNL_TSO | 1204 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); 1205 int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); 1206 1207 assert(priv != NULL); 1208 /* Select appropriate TX function. */ 1209 if (vlan_insert || tso || swp) 1210 return tx_pkt_burst; 1211 if (config->mps == MLX5_MPW_ENHANCED) { 1212 if (mlx5_check_vec_tx_support(dev) > 0) { 1213 if (mlx5_check_raw_vec_tx_support(dev) > 0) 1214 tx_pkt_burst = mlx5_tx_burst_raw_vec; 1215 else 1216 tx_pkt_burst = mlx5_tx_burst_vec; 1217 DRV_LOG(DEBUG, 1218 "port %u selected enhanced MPW Tx vectorized" 1219 " function", 1220 dev->data->port_id); 1221 } else { 1222 tx_pkt_burst = mlx5_tx_burst_empw; 1223 DRV_LOG(DEBUG, 1224 "port %u selected enhanced MPW Tx function", 1225 dev->data->port_id); 1226 } 1227 } else if (config->mps && (config->txq_inline > 0)) { 1228 tx_pkt_burst = mlx5_tx_burst_mpw_inline; 1229 DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", 1230 dev->data->port_id); 1231 } else if (config->mps) { 1232 tx_pkt_burst = mlx5_tx_burst_mpw; 1233 DRV_LOG(DEBUG, "port %u selected MPW Tx function", 1234 dev->data->port_id); 1235 } 1236 return tx_pkt_burst; 1237 } 1238 1239 /** 1240 * Configure the RX function to use. 1241 * 1242 * @param dev 1243 * Pointer to private data structure. 1244 * 1245 * @return 1246 * Pointer to selected Rx burst function. 1247 */ 1248 eth_rx_burst_t 1249 mlx5_select_rx_function(struct rte_eth_dev *dev) 1250 { 1251 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; 1252 1253 assert(dev != NULL); 1254 if (mlx5_check_vec_rx_support(dev) > 0) { 1255 rx_pkt_burst = mlx5_rx_burst_vec; 1256 DRV_LOG(DEBUG, "port %u selected Rx vectorized function", 1257 dev->data->port_id); 1258 } else if (mlx5_mprq_enabled(dev)) { 1259 rx_pkt_burst = mlx5_rx_burst_mprq; 1260 } 1261 return rx_pkt_burst; 1262 } 1263 1264 /** 1265 * Check if mlx5 device was removed. 1266 * 1267 * @param dev 1268 * Pointer to Ethernet device structure. 1269 * 1270 * @return 1271 * 1 when device is removed, otherwise 0. 1272 */ 1273 int 1274 mlx5_is_removed(struct rte_eth_dev *dev) 1275 { 1276 struct ibv_device_attr device_attr; 1277 struct priv *priv = dev->data->dev_private; 1278 1279 if (mlx5_glue->query_device(priv->ctx, &device_attr) == EIO) 1280 return 1; 1281 return 0; 1282 } 1283 1284 /** 1285 * Get port ID list of mlx5 instances sharing a common device. 1286 * 1287 * @param[in] dev 1288 * Device to look for. 1289 * @param[out] port_list 1290 * Result buffer for collected port IDs. 1291 * @param port_list_n 1292 * Maximum number of entries in result buffer. If 0, @p port_list can be 1293 * NULL. 1294 * 1295 * @return 1296 * Number of matching instances regardless of the @p port_list_n 1297 * parameter, 0 if none were found. 1298 */ 1299 unsigned int 1300 mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, 1301 unsigned int port_list_n) 1302 { 1303 uint16_t id; 1304 unsigned int n = 0; 1305 1306 RTE_ETH_FOREACH_DEV(id) { 1307 struct rte_eth_dev *ldev = &rte_eth_devices[id]; 1308 1309 if (!ldev->device || 1310 !ldev->device->driver || 1311 strcmp(ldev->device->driver->name, MLX5_DRIVER_NAME) || 1312 ldev->device != dev) 1313 continue; 1314 if (n < port_list_n) 1315 port_list[n] = id; 1316 n++; 1317 } 1318 return n; 1319 } 1320 1321 /** 1322 * Get switch information associated with network interface. 1323 * 1324 * @param ifindex 1325 * Network interface index. 1326 * @param[out] info 1327 * Switch information object, populated in case of success. 1328 * 1329 * @return 1330 * 0 on success, a negative errno value otherwise and rte_errno is set. 1331 */ 1332 int 1333 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) 1334 { 1335 char ifname[IF_NAMESIZE]; 1336 FILE *file; 1337 struct mlx5_switch_info data = { .master = 0, }; 1338 bool port_name_set = false; 1339 bool port_switch_id_set = false; 1340 char c; 1341 1342 if (!if_indextoname(ifindex, ifname)) { 1343 rte_errno = errno; 1344 return -rte_errno; 1345 } 1346 1347 MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", 1348 ifname); 1349 MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", 1350 ifname); 1351 1352 file = fopen(phys_port_name, "rb"); 1353 if (file != NULL) { 1354 port_name_set = 1355 fscanf(file, "%d%c", &data.port_name, &c) == 2 && 1356 c == '\n'; 1357 fclose(file); 1358 } 1359 file = fopen(phys_switch_id, "rb"); 1360 if (file == NULL) { 1361 rte_errno = errno; 1362 return -rte_errno; 1363 } 1364 port_switch_id_set = 1365 fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && 1366 c == '\n'; 1367 fclose(file); 1368 data.master = port_switch_id_set && !port_name_set; 1369 data.representor = port_switch_id_set && port_name_set; 1370 *info = data; 1371 return 0; 1372 } 1373