1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #include <stddef.h> 7 #include <assert.h> 8 #include <inttypes.h> 9 #include <unistd.h> 10 #include <stdint.h> 11 #include <stdio.h> 12 #include <string.h> 13 #include <stdlib.h> 14 #include <errno.h> 15 #include <dirent.h> 16 #include <net/if.h> 17 #include <sys/ioctl.h> 18 #include <sys/socket.h> 19 #include <netinet/in.h> 20 #include <linux/ethtool.h> 21 #include <linux/sockios.h> 22 #include <fcntl.h> 23 #include <stdalign.h> 24 #include <sys/un.h> 25 #include <time.h> 26 27 #include <rte_atomic.h> 28 #include <rte_ethdev_driver.h> 29 #include <rte_bus_pci.h> 30 #include <rte_mbuf.h> 31 #include <rte_common.h> 32 #include <rte_interrupts.h> 33 #include <rte_malloc.h> 34 #include <rte_string_fns.h> 35 #include <rte_rwlock.h> 36 37 #include "mlx5.h" 38 #include "mlx5_glue.h" 39 #include "mlx5_rxtx.h" 40 #include "mlx5_utils.h" 41 42 /* Supported speed values found in /usr/include/linux/ethtool.h */ 43 #ifndef HAVE_SUPPORTED_40000baseKR4_Full 44 #define SUPPORTED_40000baseKR4_Full (1 << 23) 45 #endif 46 #ifndef HAVE_SUPPORTED_40000baseCR4_Full 47 #define SUPPORTED_40000baseCR4_Full (1 << 24) 48 #endif 49 #ifndef HAVE_SUPPORTED_40000baseSR4_Full 50 #define SUPPORTED_40000baseSR4_Full (1 << 25) 51 #endif 52 #ifndef HAVE_SUPPORTED_40000baseLR4_Full 53 #define SUPPORTED_40000baseLR4_Full (1 << 26) 54 #endif 55 #ifndef HAVE_SUPPORTED_56000baseKR4_Full 56 #define SUPPORTED_56000baseKR4_Full (1 << 27) 57 #endif 58 #ifndef HAVE_SUPPORTED_56000baseCR4_Full 59 #define SUPPORTED_56000baseCR4_Full (1 << 28) 60 #endif 61 #ifndef HAVE_SUPPORTED_56000baseSR4_Full 62 #define SUPPORTED_56000baseSR4_Full (1 << 29) 63 #endif 64 #ifndef HAVE_SUPPORTED_56000baseLR4_Full 65 #define SUPPORTED_56000baseLR4_Full (1 << 30) 66 #endif 67 68 /* Add defines in case the running kernel is not the same as user headers. */ 69 #ifndef ETHTOOL_GLINKSETTINGS 70 struct ethtool_link_settings { 71 uint32_t cmd; 72 uint32_t speed; 73 uint8_t duplex; 74 uint8_t port; 75 uint8_t phy_address; 76 uint8_t autoneg; 77 uint8_t mdio_support; 78 uint8_t eth_to_mdix; 79 uint8_t eth_tp_mdix_ctrl; 80 int8_t link_mode_masks_nwords; 81 uint32_t reserved[8]; 82 uint32_t link_mode_masks[]; 83 }; 84 85 #define ETHTOOL_GLINKSETTINGS 0x0000004c 86 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 87 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 88 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 89 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 90 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 91 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 92 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 93 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 94 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 95 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 96 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 97 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 98 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 99 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 100 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 101 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 102 #endif 103 #ifndef HAVE_ETHTOOL_LINK_MODE_25G 104 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 105 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 106 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 107 #endif 108 #ifndef HAVE_ETHTOOL_LINK_MODE_50G 109 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 110 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 111 #endif 112 #ifndef HAVE_ETHTOOL_LINK_MODE_100G 113 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 114 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 115 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 116 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 117 #endif 118 119 /** 120 * Get master interface name from private structure. 121 * 122 * @param[in] dev 123 * Pointer to Ethernet device. 124 * @param[out] ifname 125 * Interface name output buffer. 126 * 127 * @return 128 * 0 on success, a negative errno value otherwise and rte_errno is set. 129 */ 130 static int 131 mlx5_get_master_ifname(const struct rte_eth_dev *dev, 132 char (*ifname)[IF_NAMESIZE]) 133 { 134 struct mlx5_priv *priv = dev->data->dev_private; 135 DIR *dir; 136 struct dirent *dent; 137 unsigned int dev_type = 0; 138 unsigned int dev_port_prev = ~0u; 139 char match[IF_NAMESIZE] = ""; 140 141 assert(priv); 142 assert(priv->sh); 143 { 144 MKSTR(path, "%s/device/net", priv->sh->ibdev_path); 145 146 dir = opendir(path); 147 if (dir == NULL) { 148 rte_errno = errno; 149 return -rte_errno; 150 } 151 } 152 while ((dent = readdir(dir)) != NULL) { 153 char *name = dent->d_name; 154 FILE *file; 155 unsigned int dev_port; 156 int r; 157 158 if ((name[0] == '.') && 159 ((name[1] == '\0') || 160 ((name[1] == '.') && (name[2] == '\0')))) 161 continue; 162 163 MKSTR(path, "%s/device/net/%s/%s", 164 priv->sh->ibdev_path, name, 165 (dev_type ? "dev_id" : "dev_port")); 166 167 file = fopen(path, "rb"); 168 if (file == NULL) { 169 if (errno != ENOENT) 170 continue; 171 /* 172 * Switch to dev_id when dev_port does not exist as 173 * is the case with Linux kernel versions < 3.15. 174 */ 175 try_dev_id: 176 match[0] = '\0'; 177 if (dev_type) 178 break; 179 dev_type = 1; 180 dev_port_prev = ~0u; 181 rewinddir(dir); 182 continue; 183 } 184 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); 185 fclose(file); 186 if (r != 1) 187 continue; 188 /* 189 * Switch to dev_id when dev_port returns the same value for 190 * all ports. May happen when using a MOFED release older than 191 * 3.0 with a Linux kernel >= 3.15. 192 */ 193 if (dev_port == dev_port_prev) 194 goto try_dev_id; 195 dev_port_prev = dev_port; 196 if (dev_port == 0) 197 strlcpy(match, name, sizeof(match)); 198 } 199 closedir(dir); 200 if (match[0] == '\0') { 201 rte_errno = ENOENT; 202 return -rte_errno; 203 } 204 strncpy(*ifname, match, sizeof(*ifname)); 205 return 0; 206 } 207 208 /** 209 * Get interface name from private structure. 210 * 211 * This is a port representor-aware version of mlx5_get_master_ifname(). 212 * 213 * @param[in] dev 214 * Pointer to Ethernet device. 215 * @param[out] ifname 216 * Interface name output buffer. 217 * 218 * @return 219 * 0 on success, a negative errno value otherwise and rte_errno is set. 220 */ 221 int 222 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) 223 { 224 struct mlx5_priv *priv = dev->data->dev_private; 225 unsigned int ifindex = 226 priv->nl_socket_rdma >= 0 ? 227 mlx5_nl_ifindex(priv->nl_socket_rdma, 228 priv->sh->ibdev_name, 229 priv->ibv_port) : 0; 230 231 if (!ifindex) { 232 if (!priv->representor) 233 return mlx5_get_master_ifname(dev, ifname); 234 rte_errno = ENXIO; 235 return -rte_errno; 236 } 237 if (if_indextoname(ifindex, &(*ifname)[0])) 238 return 0; 239 rte_errno = errno; 240 return -rte_errno; 241 } 242 243 /** 244 * Get the interface index from device name. 245 * 246 * @param[in] dev 247 * Pointer to Ethernet device. 248 * 249 * @return 250 * Nonzero interface index on success, zero otherwise and rte_errno is set. 251 */ 252 unsigned int 253 mlx5_ifindex(const struct rte_eth_dev *dev) 254 { 255 char ifname[IF_NAMESIZE]; 256 unsigned int ifindex; 257 258 if (mlx5_get_ifname(dev, &ifname)) 259 return 0; 260 ifindex = if_nametoindex(ifname); 261 if (!ifindex) 262 rte_errno = errno; 263 return ifindex; 264 } 265 266 /** 267 * Perform ifreq ioctl() on associated Ethernet device. 268 * 269 * @param[in] dev 270 * Pointer to Ethernet device. 271 * @param req 272 * Request number to pass to ioctl(). 273 * @param[out] ifr 274 * Interface request structure output buffer. 275 * 276 * @return 277 * 0 on success, a negative errno value otherwise and rte_errno is set. 278 */ 279 int 280 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) 281 { 282 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 283 int ret = 0; 284 285 if (sock == -1) { 286 rte_errno = errno; 287 return -rte_errno; 288 } 289 ret = mlx5_get_ifname(dev, &ifr->ifr_name); 290 if (ret) 291 goto error; 292 ret = ioctl(sock, req, ifr); 293 if (ret == -1) { 294 rte_errno = errno; 295 goto error; 296 } 297 close(sock); 298 return 0; 299 error: 300 close(sock); 301 return -rte_errno; 302 } 303 304 /** 305 * Get device MTU. 306 * 307 * @param dev 308 * Pointer to Ethernet device. 309 * @param[out] mtu 310 * MTU value output buffer. 311 * 312 * @return 313 * 0 on success, a negative errno value otherwise and rte_errno is set. 314 */ 315 int 316 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 317 { 318 struct ifreq request; 319 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); 320 321 if (ret) 322 return ret; 323 *mtu = request.ifr_mtu; 324 return 0; 325 } 326 327 /** 328 * Set device MTU. 329 * 330 * @param dev 331 * Pointer to Ethernet device. 332 * @param mtu 333 * MTU value to set. 334 * 335 * @return 336 * 0 on success, a negative errno value otherwise and rte_errno is set. 337 */ 338 static int 339 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 340 { 341 struct ifreq request = { .ifr_mtu = mtu, }; 342 343 return mlx5_ifreq(dev, SIOCSIFMTU, &request); 344 } 345 346 /** 347 * Set device flags. 348 * 349 * @param dev 350 * Pointer to Ethernet device. 351 * @param keep 352 * Bitmask for flags that must remain untouched. 353 * @param flags 354 * Bitmask for flags to modify. 355 * 356 * @return 357 * 0 on success, a negative errno value otherwise and rte_errno is set. 358 */ 359 int 360 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 361 { 362 struct ifreq request; 363 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); 364 365 if (ret) 366 return ret; 367 request.ifr_flags &= keep; 368 request.ifr_flags |= flags & ~keep; 369 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); 370 } 371 372 /** 373 * DPDK callback for Ethernet device configuration. 374 * 375 * @param dev 376 * Pointer to Ethernet device structure. 377 * 378 * @return 379 * 0 on success, a negative errno value otherwise and rte_errno is set. 380 */ 381 int 382 mlx5_dev_configure(struct rte_eth_dev *dev) 383 { 384 struct mlx5_priv *priv = dev->data->dev_private; 385 unsigned int rxqs_n = dev->data->nb_rx_queues; 386 unsigned int txqs_n = dev->data->nb_tx_queues; 387 unsigned int i; 388 unsigned int j; 389 unsigned int reta_idx_n; 390 const uint8_t use_app_rss_key = 391 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; 392 int ret = 0; 393 394 if (use_app_rss_key && 395 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != 396 MLX5_RSS_HASH_KEY_LEN)) { 397 DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long", 398 dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN)); 399 rte_errno = EINVAL; 400 return -rte_errno; 401 } 402 priv->rss_conf.rss_key = 403 rte_realloc(priv->rss_conf.rss_key, 404 MLX5_RSS_HASH_KEY_LEN, 0); 405 if (!priv->rss_conf.rss_key) { 406 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", 407 dev->data->port_id, rxqs_n); 408 rte_errno = ENOMEM; 409 return -rte_errno; 410 } 411 memcpy(priv->rss_conf.rss_key, 412 use_app_rss_key ? 413 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : 414 rss_hash_default_key, 415 MLX5_RSS_HASH_KEY_LEN); 416 priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN; 417 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; 418 priv->rxqs = (void *)dev->data->rx_queues; 419 priv->txqs = (void *)dev->data->tx_queues; 420 if (txqs_n != priv->txqs_n) { 421 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", 422 dev->data->port_id, priv->txqs_n, txqs_n); 423 priv->txqs_n = txqs_n; 424 } 425 if (rxqs_n > priv->config.ind_table_max_size) { 426 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", 427 dev->data->port_id, rxqs_n); 428 rte_errno = EINVAL; 429 return -rte_errno; 430 } 431 if (rxqs_n == priv->rxqs_n) 432 return 0; 433 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", 434 dev->data->port_id, priv->rxqs_n, rxqs_n); 435 priv->rxqs_n = rxqs_n; 436 /* If the requested number of RX queues is not a power of two, use the 437 * maximum indirection table size for better balancing. 438 * The result is always rounded to the next power of two. */ 439 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? 440 priv->config.ind_table_max_size : 441 rxqs_n)); 442 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); 443 if (ret) 444 return ret; 445 /* When the number of RX queues is not a power of two, the remaining 446 * table entries are padded with reused WQs and hashes are not spread 447 * uniformly. */ 448 for (i = 0, j = 0; (i != reta_idx_n); ++i) { 449 (*priv->reta_idx)[i] = j; 450 if (++j == rxqs_n) 451 j = 0; 452 } 453 return 0; 454 } 455 456 /** 457 * Sets default tuning parameters. 458 * 459 * @param dev 460 * Pointer to Ethernet device. 461 * @param[out] info 462 * Info structure output buffer. 463 */ 464 static void 465 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 466 { 467 struct mlx5_priv *priv = dev->data->dev_private; 468 469 /* Minimum CPU utilization. */ 470 info->default_rxportconf.ring_size = 256; 471 info->default_txportconf.ring_size = 256; 472 info->default_rxportconf.burst_size = 64; 473 info->default_txportconf.burst_size = 64; 474 if (priv->link_speed_capa & ETH_LINK_SPEED_100G) { 475 info->default_rxportconf.nb_queues = 16; 476 info->default_txportconf.nb_queues = 16; 477 if (dev->data->nb_rx_queues > 2 || 478 dev->data->nb_tx_queues > 2) { 479 /* Max Throughput. */ 480 info->default_rxportconf.ring_size = 2048; 481 info->default_txportconf.ring_size = 2048; 482 } 483 } else { 484 info->default_rxportconf.nb_queues = 8; 485 info->default_txportconf.nb_queues = 8; 486 if (dev->data->nb_rx_queues > 2 || 487 dev->data->nb_tx_queues > 2) { 488 /* Max Throughput. */ 489 info->default_rxportconf.ring_size = 4096; 490 info->default_txportconf.ring_size = 4096; 491 } 492 } 493 } 494 495 /** 496 * DPDK callback to get information about the device. 497 * 498 * @param dev 499 * Pointer to Ethernet device structure. 500 * @param[out] info 501 * Info structure output buffer. 502 */ 503 void 504 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 505 { 506 struct mlx5_priv *priv = dev->data->dev_private; 507 struct mlx5_dev_config *config = &priv->config; 508 unsigned int max; 509 char ifname[IF_NAMESIZE]; 510 511 /* FIXME: we should ask the device for these values. */ 512 info->min_rx_bufsize = 32; 513 info->max_rx_pktlen = 65536; 514 /* 515 * Since we need one CQ per QP, the limit is the minimum number 516 * between the two values. 517 */ 518 max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq, 519 priv->sh->device_attr.orig_attr.max_qp); 520 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ 521 if (max >= 65535) 522 max = 65535; 523 info->max_rx_queues = max; 524 info->max_tx_queues = max; 525 info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; 526 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); 527 info->rx_offload_capa = (mlx5_get_rx_port_offloads() | 528 info->rx_queue_offload_capa); 529 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); 530 if (mlx5_get_ifname(dev, &ifname) == 0) 531 info->if_index = if_nametoindex(ifname); 532 info->reta_size = priv->reta_idx_n ? 533 priv->reta_idx_n : config->ind_table_max_size; 534 info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; 535 info->speed_capa = priv->link_speed_capa; 536 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; 537 mlx5_set_default_params(dev, info); 538 info->switch_info.name = dev->data->name; 539 info->switch_info.domain_id = priv->domain_id; 540 info->switch_info.port_id = priv->representor_id; 541 if (priv->representor) { 542 unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); 543 uint16_t port_id[i]; 544 545 i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); 546 while (i--) { 547 struct mlx5_priv *opriv = 548 rte_eth_devices[port_id[i]].data->dev_private; 549 550 if (!opriv || 551 opriv->representor || 552 opriv->domain_id != priv->domain_id) 553 continue; 554 /* 555 * Override switch name with that of the master 556 * device. 557 */ 558 info->switch_info.name = opriv->dev_data->name; 559 break; 560 } 561 } 562 } 563 564 /** 565 * Get firmware version of a device. 566 * 567 * @param dev 568 * Ethernet device port. 569 * @param fw_ver 570 * String output allocated by caller. 571 * @param fw_size 572 * Size of the output string, including terminating null byte. 573 * 574 * @return 575 * 0 on success, or the size of the non truncated string if too big. 576 */ 577 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) 578 { 579 struct mlx5_priv *priv = dev->data->dev_private; 580 struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr; 581 size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1; 582 583 if (fw_size < size) 584 return size; 585 if (fw_ver != NULL) 586 strlcpy(fw_ver, attr->fw_ver, fw_size); 587 return 0; 588 } 589 590 /** 591 * Get supported packet types. 592 * 593 * @param dev 594 * Pointer to Ethernet device structure. 595 * 596 * @return 597 * A pointer to the supported Packet types array. 598 */ 599 const uint32_t * 600 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) 601 { 602 static const uint32_t ptypes[] = { 603 /* refers to rxq_cq_to_pkt_type() */ 604 RTE_PTYPE_L2_ETHER, 605 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 606 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 607 RTE_PTYPE_L4_NONFRAG, 608 RTE_PTYPE_L4_FRAG, 609 RTE_PTYPE_L4_TCP, 610 RTE_PTYPE_L4_UDP, 611 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 612 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 613 RTE_PTYPE_INNER_L4_NONFRAG, 614 RTE_PTYPE_INNER_L4_FRAG, 615 RTE_PTYPE_INNER_L4_TCP, 616 RTE_PTYPE_INNER_L4_UDP, 617 RTE_PTYPE_UNKNOWN 618 }; 619 620 if (dev->rx_pkt_burst == mlx5_rx_burst || 621 dev->rx_pkt_burst == mlx5_rx_burst_mprq || 622 dev->rx_pkt_burst == mlx5_rx_burst_vec) 623 return ptypes; 624 return NULL; 625 } 626 627 /** 628 * DPDK callback to retrieve physical link information. 629 * 630 * @param dev 631 * Pointer to Ethernet device structure. 632 * @param[out] link 633 * Storage for current link status. 634 * 635 * @return 636 * 0 on success, a negative errno value otherwise and rte_errno is set. 637 */ 638 static int 639 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, 640 struct rte_eth_link *link) 641 { 642 struct mlx5_priv *priv = dev->data->dev_private; 643 struct ethtool_cmd edata = { 644 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 645 }; 646 struct ifreq ifr; 647 struct rte_eth_link dev_link; 648 int link_speed = 0; 649 int ret; 650 651 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 652 if (ret) { 653 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 654 dev->data->port_id, strerror(rte_errno)); 655 return ret; 656 } 657 dev_link = (struct rte_eth_link) { 658 .link_status = ((ifr.ifr_flags & IFF_UP) && 659 (ifr.ifr_flags & IFF_RUNNING)), 660 }; 661 ifr = (struct ifreq) { 662 .ifr_data = (void *)&edata, 663 }; 664 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 665 if (ret) { 666 DRV_LOG(WARNING, 667 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", 668 dev->data->port_id, strerror(rte_errno)); 669 return ret; 670 } 671 link_speed = ethtool_cmd_speed(&edata); 672 if (link_speed == -1) 673 dev_link.link_speed = ETH_SPEED_NUM_NONE; 674 else 675 dev_link.link_speed = link_speed; 676 priv->link_speed_capa = 0; 677 if (edata.supported & SUPPORTED_Autoneg) 678 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 679 if (edata.supported & (SUPPORTED_1000baseT_Full | 680 SUPPORTED_1000baseKX_Full)) 681 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 682 if (edata.supported & SUPPORTED_10000baseKR_Full) 683 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 684 if (edata.supported & (SUPPORTED_40000baseKR4_Full | 685 SUPPORTED_40000baseCR4_Full | 686 SUPPORTED_40000baseSR4_Full | 687 SUPPORTED_40000baseLR4_Full)) 688 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 689 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 690 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 691 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 692 ETH_LINK_SPEED_FIXED); 693 if (((dev_link.link_speed && !dev_link.link_status) || 694 (!dev_link.link_speed && dev_link.link_status))) { 695 rte_errno = EAGAIN; 696 return -rte_errno; 697 } 698 *link = dev_link; 699 return 0; 700 } 701 702 /** 703 * Retrieve physical link information (unlocked version using new ioctl). 704 * 705 * @param dev 706 * Pointer to Ethernet device structure. 707 * @param[out] link 708 * Storage for current link status. 709 * 710 * @return 711 * 0 on success, a negative errno value otherwise and rte_errno is set. 712 */ 713 static int 714 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, 715 struct rte_eth_link *link) 716 717 { 718 struct mlx5_priv *priv = dev->data->dev_private; 719 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 720 struct ifreq ifr; 721 struct rte_eth_link dev_link; 722 uint64_t sc; 723 int ret; 724 725 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 726 if (ret) { 727 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 728 dev->data->port_id, strerror(rte_errno)); 729 return ret; 730 } 731 dev_link = (struct rte_eth_link) { 732 .link_status = ((ifr.ifr_flags & IFF_UP) && 733 (ifr.ifr_flags & IFF_RUNNING)), 734 }; 735 ifr = (struct ifreq) { 736 .ifr_data = (void *)&gcmd, 737 }; 738 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 739 if (ret) { 740 DRV_LOG(DEBUG, 741 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" 742 " failed: %s", 743 dev->data->port_id, strerror(rte_errno)); 744 return ret; 745 } 746 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 747 748 alignas(struct ethtool_link_settings) 749 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 750 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 751 struct ethtool_link_settings *ecmd = (void *)data; 752 753 *ecmd = gcmd; 754 ifr.ifr_data = (void *)ecmd; 755 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 756 if (ret) { 757 DRV_LOG(DEBUG, 758 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)" 759 " failed: %s", 760 dev->data->port_id, strerror(rte_errno)); 761 return ret; 762 } 763 dev_link.link_speed = ecmd->speed; 764 sc = ecmd->link_mode_masks[0] | 765 ((uint64_t)ecmd->link_mode_masks[1] << 32); 766 priv->link_speed_capa = 0; 767 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) 768 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 769 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 770 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 771 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 772 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 773 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 774 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 775 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 776 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 777 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 778 priv->link_speed_capa |= ETH_LINK_SPEED_20G; 779 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 780 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 781 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 782 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 783 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 784 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 785 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 786 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 787 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 788 priv->link_speed_capa |= ETH_LINK_SPEED_56G; 789 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 790 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 791 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 792 priv->link_speed_capa |= ETH_LINK_SPEED_25G; 793 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 794 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 795 priv->link_speed_capa |= ETH_LINK_SPEED_50G; 796 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 797 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 798 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 799 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 800 priv->link_speed_capa |= ETH_LINK_SPEED_100G; 801 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 802 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 803 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 804 ETH_LINK_SPEED_FIXED); 805 if (((dev_link.link_speed && !dev_link.link_status) || 806 (!dev_link.link_speed && dev_link.link_status))) { 807 rte_errno = EAGAIN; 808 return -rte_errno; 809 } 810 *link = dev_link; 811 return 0; 812 } 813 814 /** 815 * DPDK callback to retrieve physical link information. 816 * 817 * @param dev 818 * Pointer to Ethernet device structure. 819 * @param wait_to_complete 820 * Wait for request completion. 821 * 822 * @return 823 * 0 if link status was not updated, positive if it was, a negative errno 824 * value otherwise and rte_errno is set. 825 */ 826 int 827 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) 828 { 829 int ret; 830 struct rte_eth_link dev_link; 831 time_t start_time = time(NULL); 832 833 do { 834 ret = mlx5_link_update_unlocked_gs(dev, &dev_link); 835 if (ret) 836 ret = mlx5_link_update_unlocked_gset(dev, &dev_link); 837 if (ret == 0) 838 break; 839 /* Handle wait to complete situation. */ 840 if (wait_to_complete && ret == -EAGAIN) { 841 if (abs((int)difftime(time(NULL), start_time)) < 842 MLX5_LINK_STATUS_TIMEOUT) { 843 usleep(0); 844 continue; 845 } else { 846 rte_errno = EBUSY; 847 return -rte_errno; 848 } 849 } else if (ret < 0) { 850 return ret; 851 } 852 } while (wait_to_complete); 853 ret = !!memcmp(&dev->data->dev_link, &dev_link, 854 sizeof(struct rte_eth_link)); 855 dev->data->dev_link = dev_link; 856 return ret; 857 } 858 859 /** 860 * DPDK callback to change the MTU. 861 * 862 * @param dev 863 * Pointer to Ethernet device structure. 864 * @param in_mtu 865 * New MTU. 866 * 867 * @return 868 * 0 on success, a negative errno value otherwise and rte_errno is set. 869 */ 870 int 871 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 872 { 873 struct mlx5_priv *priv = dev->data->dev_private; 874 uint16_t kern_mtu = 0; 875 int ret; 876 877 ret = mlx5_get_mtu(dev, &kern_mtu); 878 if (ret) 879 return ret; 880 /* Set kernel interface MTU first. */ 881 ret = mlx5_set_mtu(dev, mtu); 882 if (ret) 883 return ret; 884 ret = mlx5_get_mtu(dev, &kern_mtu); 885 if (ret) 886 return ret; 887 if (kern_mtu == mtu) { 888 priv->mtu = mtu; 889 DRV_LOG(DEBUG, "port %u adapter MTU set to %u", 890 dev->data->port_id, mtu); 891 return 0; 892 } 893 rte_errno = EAGAIN; 894 return -rte_errno; 895 } 896 897 /** 898 * DPDK callback to get flow control status. 899 * 900 * @param dev 901 * Pointer to Ethernet device structure. 902 * @param[out] fc_conf 903 * Flow control output buffer. 904 * 905 * @return 906 * 0 on success, a negative errno value otherwise and rte_errno is set. 907 */ 908 int 909 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 910 { 911 struct ifreq ifr; 912 struct ethtool_pauseparam ethpause = { 913 .cmd = ETHTOOL_GPAUSEPARAM 914 }; 915 int ret; 916 917 ifr.ifr_data = (void *)ðpause; 918 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 919 if (ret) { 920 DRV_LOG(WARNING, 921 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 922 " %s", 923 dev->data->port_id, strerror(rte_errno)); 924 return ret; 925 } 926 fc_conf->autoneg = ethpause.autoneg; 927 if (ethpause.rx_pause && ethpause.tx_pause) 928 fc_conf->mode = RTE_FC_FULL; 929 else if (ethpause.rx_pause) 930 fc_conf->mode = RTE_FC_RX_PAUSE; 931 else if (ethpause.tx_pause) 932 fc_conf->mode = RTE_FC_TX_PAUSE; 933 else 934 fc_conf->mode = RTE_FC_NONE; 935 return 0; 936 } 937 938 /** 939 * DPDK callback to modify flow control parameters. 940 * 941 * @param dev 942 * Pointer to Ethernet device structure. 943 * @param[in] fc_conf 944 * Flow control parameters. 945 * 946 * @return 947 * 0 on success, a negative errno value otherwise and rte_errno is set. 948 */ 949 int 950 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 951 { 952 struct ifreq ifr; 953 struct ethtool_pauseparam ethpause = { 954 .cmd = ETHTOOL_SPAUSEPARAM 955 }; 956 int ret; 957 958 ifr.ifr_data = (void *)ðpause; 959 ethpause.autoneg = fc_conf->autoneg; 960 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 961 (fc_conf->mode & RTE_FC_RX_PAUSE)) 962 ethpause.rx_pause = 1; 963 else 964 ethpause.rx_pause = 0; 965 966 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 967 (fc_conf->mode & RTE_FC_TX_PAUSE)) 968 ethpause.tx_pause = 1; 969 else 970 ethpause.tx_pause = 0; 971 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 972 if (ret) { 973 DRV_LOG(WARNING, 974 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 975 " failed: %s", 976 dev->data->port_id, strerror(rte_errno)); 977 return ret; 978 } 979 return 0; 980 } 981 982 /** 983 * Get PCI information from struct ibv_device. 984 * 985 * @param device 986 * Pointer to Ethernet device structure. 987 * @param[out] pci_addr 988 * PCI bus address output buffer. 989 * 990 * @return 991 * 0 on success, a negative errno value otherwise and rte_errno is set. 992 */ 993 int 994 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 995 struct rte_pci_addr *pci_addr) 996 { 997 FILE *file; 998 char line[32]; 999 MKSTR(path, "%s/device/uevent", device->ibdev_path); 1000 1001 file = fopen(path, "rb"); 1002 if (file == NULL) { 1003 rte_errno = errno; 1004 return -rte_errno; 1005 } 1006 while (fgets(line, sizeof(line), file) == line) { 1007 size_t len = strlen(line); 1008 int ret; 1009 1010 /* Truncate long lines. */ 1011 if (len == (sizeof(line) - 1)) 1012 while (line[(len - 1)] != '\n') { 1013 ret = fgetc(file); 1014 if (ret == EOF) 1015 break; 1016 line[(len - 1)] = ret; 1017 } 1018 /* Extract information. */ 1019 if (sscanf(line, 1020 "PCI_SLOT_NAME=" 1021 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 1022 &pci_addr->domain, 1023 &pci_addr->bus, 1024 &pci_addr->devid, 1025 &pci_addr->function) == 4) { 1026 ret = 0; 1027 break; 1028 } 1029 } 1030 fclose(file); 1031 return 0; 1032 } 1033 1034 /** 1035 * Handle shared asynchronous events the NIC (removal event 1036 * and link status change). Supports multiport IB device. 1037 * 1038 * @param cb_arg 1039 * Callback argument. 1040 */ 1041 void 1042 mlx5_dev_interrupt_handler(void *cb_arg) 1043 { 1044 struct mlx5_ibv_shared *sh = cb_arg; 1045 struct ibv_async_event event; 1046 1047 /* Read all message from the IB device and acknowledge them. */ 1048 for (;;) { 1049 struct rte_eth_dev *dev; 1050 uint32_t tmp; 1051 1052 if (mlx5_glue->get_async_event(sh->ctx, &event)) 1053 break; 1054 /* Retrieve and check IB port index. */ 1055 tmp = (uint32_t)event.element.port_num; 1056 assert(tmp && (tmp <= sh->max_port)); 1057 if (!tmp || 1058 tmp > sh->max_port || 1059 sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { 1060 /* 1061 * Invalid IB port index or no handler 1062 * installed for this port. 1063 */ 1064 mlx5_glue->ack_async_event(&event); 1065 continue; 1066 } 1067 /* Retrieve ethernet device descriptor. */ 1068 tmp = sh->port[tmp - 1].ih_port_id; 1069 dev = &rte_eth_devices[tmp]; 1070 tmp = 0; 1071 assert(dev); 1072 if ((event.event_type == IBV_EVENT_PORT_ACTIVE || 1073 event.event_type == IBV_EVENT_PORT_ERR) && 1074 dev->data->dev_conf.intr_conf.lsc) { 1075 mlx5_glue->ack_async_event(&event); 1076 if (mlx5_link_update(dev, 0) == -EAGAIN) { 1077 usleep(0); 1078 continue; 1079 } 1080 _rte_eth_dev_callback_process 1081 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1082 continue; 1083 } 1084 if (event.event_type == IBV_EVENT_DEVICE_FATAL && 1085 dev->data->dev_conf.intr_conf.rmv) { 1086 mlx5_glue->ack_async_event(&event); 1087 _rte_eth_dev_callback_process 1088 (dev, RTE_ETH_EVENT_INTR_RMV, NULL); 1089 continue; 1090 } 1091 DRV_LOG(DEBUG, 1092 "port %u event type %d on not handled", 1093 dev->data->port_id, event.event_type); 1094 mlx5_glue->ack_async_event(&event); 1095 } 1096 } 1097 1098 /** 1099 * Handle interrupts from the socket. 1100 * 1101 * @param cb_arg 1102 * Callback argument. 1103 */ 1104 static void 1105 mlx5_dev_handler_socket(void *cb_arg) 1106 { 1107 struct rte_eth_dev *dev = cb_arg; 1108 1109 mlx5_socket_handle(dev); 1110 } 1111 1112 /** 1113 * Uninstall shared asynchronous device events handler. 1114 * This function is implemeted to support event sharing 1115 * between multiple ports of single IB device. 1116 * 1117 * @param dev 1118 * Pointer to Ethernet device. 1119 */ 1120 static void 1121 mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev) 1122 { 1123 struct mlx5_priv *priv = dev->data->dev_private; 1124 struct mlx5_ibv_shared *sh = priv->sh; 1125 1126 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1127 return; 1128 pthread_mutex_lock(&sh->intr_mutex); 1129 assert(priv->ibv_port); 1130 assert(priv->ibv_port <= sh->max_port); 1131 assert(dev->data->port_id < RTE_MAX_ETHPORTS); 1132 if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS) 1133 goto exit; 1134 assert(sh->port[priv->ibv_port - 1].ih_port_id == 1135 (uint32_t)dev->data->port_id); 1136 assert(sh->intr_cnt); 1137 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 1138 if (!sh->intr_cnt || --sh->intr_cnt) 1139 goto exit; 1140 rte_intr_callback_unregister(&sh->intr_handle, 1141 mlx5_dev_interrupt_handler, sh); 1142 sh->intr_handle.fd = 0; 1143 sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 1144 exit: 1145 pthread_mutex_unlock(&sh->intr_mutex); 1146 } 1147 1148 /** 1149 * Install shared asyncronous device events handler. 1150 * This function is implemeted to support event sharing 1151 * between multiple ports of single IB device. 1152 * 1153 * @param dev 1154 * Pointer to Ethernet device. 1155 */ 1156 static void 1157 mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) 1158 { 1159 struct mlx5_priv *priv = dev->data->dev_private; 1160 struct mlx5_ibv_shared *sh = priv->sh; 1161 int ret; 1162 int flags; 1163 1164 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1165 return; 1166 pthread_mutex_lock(&sh->intr_mutex); 1167 assert(priv->ibv_port); 1168 assert(priv->ibv_port <= sh->max_port); 1169 assert(dev->data->port_id < RTE_MAX_ETHPORTS); 1170 if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) { 1171 /* The handler is already installed for this port. */ 1172 assert(sh->intr_cnt); 1173 goto exit; 1174 } 1175 sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id; 1176 if (sh->intr_cnt) { 1177 sh->intr_cnt++; 1178 goto exit; 1179 } 1180 /* No shared handler installed. */ 1181 assert(sh->ctx->async_fd > 0); 1182 flags = fcntl(sh->ctx->async_fd, F_GETFL); 1183 ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); 1184 if (ret) { 1185 DRV_LOG(INFO, "failed to change file descriptor" 1186 " async event queue"); 1187 /* Indicate there will be no interrupts. */ 1188 dev->data->dev_conf.intr_conf.lsc = 0; 1189 dev->data->dev_conf.intr_conf.rmv = 0; 1190 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 1191 goto exit; 1192 } 1193 sh->intr_handle.fd = sh->ctx->async_fd; 1194 sh->intr_handle.type = RTE_INTR_HANDLE_EXT; 1195 rte_intr_callback_register(&sh->intr_handle, 1196 mlx5_dev_interrupt_handler, sh); 1197 sh->intr_cnt++; 1198 exit: 1199 pthread_mutex_unlock(&sh->intr_mutex); 1200 } 1201 1202 /** 1203 * Uninstall interrupt handler. 1204 * 1205 * @param dev 1206 * Pointer to Ethernet device. 1207 */ 1208 void 1209 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) 1210 { 1211 struct mlx5_priv *priv = dev->data->dev_private; 1212 1213 mlx5_dev_shared_handler_uninstall(dev); 1214 if (priv->primary_socket) 1215 rte_intr_callback_unregister(&priv->intr_handle_socket, 1216 mlx5_dev_handler_socket, dev); 1217 priv->intr_handle_socket.fd = 0; 1218 priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN; 1219 } 1220 1221 /** 1222 * Install interrupt handler. 1223 * 1224 * @param dev 1225 * Pointer to Ethernet device. 1226 */ 1227 void 1228 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) 1229 { 1230 struct mlx5_priv *priv = dev->data->dev_private; 1231 int ret; 1232 1233 mlx5_dev_shared_handler_install(dev); 1234 ret = mlx5_socket_init(dev); 1235 if (ret) 1236 DRV_LOG(ERR, "port %u cannot initialise socket: %s", 1237 dev->data->port_id, strerror(rte_errno)); 1238 else if (priv->primary_socket) { 1239 priv->intr_handle_socket.fd = priv->primary_socket; 1240 priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT; 1241 rte_intr_callback_register(&priv->intr_handle_socket, 1242 mlx5_dev_handler_socket, dev); 1243 } 1244 } 1245 1246 /** 1247 * DPDK callback to bring the link DOWN. 1248 * 1249 * @param dev 1250 * Pointer to Ethernet device structure. 1251 * 1252 * @return 1253 * 0 on success, a negative errno value otherwise and rte_errno is set. 1254 */ 1255 int 1256 mlx5_set_link_down(struct rte_eth_dev *dev) 1257 { 1258 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 1259 } 1260 1261 /** 1262 * DPDK callback to bring the link UP. 1263 * 1264 * @param dev 1265 * Pointer to Ethernet device structure. 1266 * 1267 * @return 1268 * 0 on success, a negative errno value otherwise and rte_errno is set. 1269 */ 1270 int 1271 mlx5_set_link_up(struct rte_eth_dev *dev) 1272 { 1273 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 1274 } 1275 1276 /** 1277 * Configure the TX function to use. 1278 * 1279 * @param dev 1280 * Pointer to private data structure. 1281 * 1282 * @return 1283 * Pointer to selected Tx burst function. 1284 */ 1285 eth_tx_burst_t 1286 mlx5_select_tx_function(struct rte_eth_dev *dev) 1287 { 1288 struct mlx5_priv *priv = dev->data->dev_private; 1289 eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; 1290 struct mlx5_dev_config *config = &priv->config; 1291 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 1292 int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 1293 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1294 DEV_TX_OFFLOAD_GRE_TNL_TSO | 1295 DEV_TX_OFFLOAD_IP_TNL_TSO | 1296 DEV_TX_OFFLOAD_UDP_TNL_TSO)); 1297 int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | 1298 DEV_TX_OFFLOAD_UDP_TNL_TSO | 1299 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); 1300 int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); 1301 1302 assert(priv != NULL); 1303 /* Select appropriate TX function. */ 1304 if (vlan_insert || tso || swp) 1305 return tx_pkt_burst; 1306 if (config->mps == MLX5_MPW_ENHANCED) { 1307 if (mlx5_check_vec_tx_support(dev) > 0) { 1308 if (mlx5_check_raw_vec_tx_support(dev) > 0) 1309 tx_pkt_burst = mlx5_tx_burst_raw_vec; 1310 else 1311 tx_pkt_burst = mlx5_tx_burst_vec; 1312 DRV_LOG(DEBUG, 1313 "port %u selected enhanced MPW Tx vectorized" 1314 " function", 1315 dev->data->port_id); 1316 } else { 1317 tx_pkt_burst = mlx5_tx_burst_empw; 1318 DRV_LOG(DEBUG, 1319 "port %u selected enhanced MPW Tx function", 1320 dev->data->port_id); 1321 } 1322 } else if (config->mps && (config->txq_inline > 0)) { 1323 tx_pkt_burst = mlx5_tx_burst_mpw_inline; 1324 DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", 1325 dev->data->port_id); 1326 } else if (config->mps) { 1327 tx_pkt_burst = mlx5_tx_burst_mpw; 1328 DRV_LOG(DEBUG, "port %u selected MPW Tx function", 1329 dev->data->port_id); 1330 } 1331 return tx_pkt_burst; 1332 } 1333 1334 /** 1335 * Configure the RX function to use. 1336 * 1337 * @param dev 1338 * Pointer to private data structure. 1339 * 1340 * @return 1341 * Pointer to selected Rx burst function. 1342 */ 1343 eth_rx_burst_t 1344 mlx5_select_rx_function(struct rte_eth_dev *dev) 1345 { 1346 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; 1347 1348 assert(dev != NULL); 1349 if (mlx5_check_vec_rx_support(dev) > 0) { 1350 rx_pkt_burst = mlx5_rx_burst_vec; 1351 DRV_LOG(DEBUG, "port %u selected Rx vectorized function", 1352 dev->data->port_id); 1353 } else if (mlx5_mprq_enabled(dev)) { 1354 rx_pkt_burst = mlx5_rx_burst_mprq; 1355 } 1356 return rx_pkt_burst; 1357 } 1358 1359 /** 1360 * Check if mlx5 device was removed. 1361 * 1362 * @param dev 1363 * Pointer to Ethernet device structure. 1364 * 1365 * @return 1366 * 1 when device is removed, otherwise 0. 1367 */ 1368 int 1369 mlx5_is_removed(struct rte_eth_dev *dev) 1370 { 1371 struct ibv_device_attr device_attr; 1372 struct mlx5_priv *priv = dev->data->dev_private; 1373 1374 if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO) 1375 return 1; 1376 return 0; 1377 } 1378 1379 /** 1380 * Get port ID list of mlx5 instances sharing a common device. 1381 * 1382 * @param[in] dev 1383 * Device to look for. 1384 * @param[out] port_list 1385 * Result buffer for collected port IDs. 1386 * @param port_list_n 1387 * Maximum number of entries in result buffer. If 0, @p port_list can be 1388 * NULL. 1389 * 1390 * @return 1391 * Number of matching instances regardless of the @p port_list_n 1392 * parameter, 0 if none were found. 1393 */ 1394 unsigned int 1395 mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, 1396 unsigned int port_list_n) 1397 { 1398 uint16_t id; 1399 unsigned int n = 0; 1400 1401 RTE_ETH_FOREACH_DEV(id) { 1402 struct rte_eth_dev *ldev = &rte_eth_devices[id]; 1403 1404 if (ldev->device != dev) 1405 continue; 1406 if (n < port_list_n) 1407 port_list[n] = id; 1408 n++; 1409 } 1410 return n; 1411 } 1412 1413 /** 1414 * Get switch information associated with network interface. 1415 * 1416 * @param ifindex 1417 * Network interface index. 1418 * @param[out] info 1419 * Switch information object, populated in case of success. 1420 * 1421 * @return 1422 * 0 on success, a negative errno value otherwise and rte_errno is set. 1423 */ 1424 int 1425 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) 1426 { 1427 char ifname[IF_NAMESIZE]; 1428 char port_name[IF_NAMESIZE]; 1429 FILE *file; 1430 struct mlx5_switch_info data = { 1431 .master = 0, 1432 .representor = 0, 1433 .port_name_new = 0, 1434 .port_name = 0, 1435 .switch_id = 0, 1436 }; 1437 DIR *dir; 1438 bool port_name_set = false; 1439 bool port_switch_id_set = false; 1440 bool device_dir = false; 1441 char c; 1442 int ret; 1443 1444 if (!if_indextoname(ifindex, ifname)) { 1445 rte_errno = errno; 1446 return -rte_errno; 1447 } 1448 1449 MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", 1450 ifname); 1451 MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", 1452 ifname); 1453 MKSTR(pci_device, "/sys/class/net/%s/device", 1454 ifname); 1455 1456 file = fopen(phys_port_name, "rb"); 1457 if (file != NULL) { 1458 ret = fscanf(file, "%s", port_name); 1459 fclose(file); 1460 if (ret == 1) 1461 port_name_set = mlx5_translate_port_name(port_name, 1462 &data); 1463 } 1464 file = fopen(phys_switch_id, "rb"); 1465 if (file == NULL) { 1466 rte_errno = errno; 1467 return -rte_errno; 1468 } 1469 port_switch_id_set = 1470 fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && 1471 c == '\n'; 1472 fclose(file); 1473 dir = opendir(pci_device); 1474 if (dir != NULL) { 1475 closedir(dir); 1476 device_dir = true; 1477 } 1478 data.master = port_switch_id_set && (!port_name_set || device_dir); 1479 data.representor = port_switch_id_set && port_name_set && !device_dir; 1480 *info = data; 1481 assert(!(data.master && data.representor)); 1482 if (data.master && data.representor) { 1483 DRV_LOG(ERR, "ifindex %u device is recognized as master" 1484 " and as representor", ifindex); 1485 rte_errno = ENODEV; 1486 return -rte_errno; 1487 } 1488 return 0; 1489 } 1490 1491 /** 1492 * Extract port name, as a number, from sysfs or netlink information. 1493 * 1494 * @param[in] port_name_in 1495 * String representing the port name. 1496 * @param[out] port_info_out 1497 * Port information, including port name as a number. 1498 * 1499 * @return 1500 * true on success, false otherwise. 1501 */ 1502 bool 1503 mlx5_translate_port_name(const char *port_name_in, 1504 struct mlx5_switch_info *port_info_out) 1505 { 1506 char pf_c1, pf_c2, vf_c1, vf_c2; 1507 char *end; 1508 int32_t pf_num; 1509 bool port_name_set = false; 1510 1511 /* 1512 * Check for port-name as a string of the form pf0vf0 1513 * (support kernel ver >= 5.0) 1514 */ 1515 port_name_set = (sscanf(port_name_in, "%c%c%d%c%c%d", &pf_c1, &pf_c2, 1516 &pf_num, &vf_c1, &vf_c2, 1517 &port_info_out->port_name) == 6); 1518 if (port_name_set) { 1519 port_info_out->port_name_new = 1; 1520 } else { 1521 /* Check for port-name as a number (support kernel ver < 5.0 */ 1522 errno = 0; 1523 port_info_out->port_name = strtol(port_name_in, &end, 0); 1524 if (!errno && 1525 (size_t)(end - port_name_in) == strlen(port_name_in)) 1526 port_name_set = true; 1527 } 1528 return port_name_set; 1529 } 1530