1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #include <stddef.h> 7 #include <assert.h> 8 #include <inttypes.h> 9 #include <unistd.h> 10 #include <stdbool.h> 11 #include <stdint.h> 12 #include <stdio.h> 13 #include <string.h> 14 #include <stdlib.h> 15 #include <errno.h> 16 #include <dirent.h> 17 #include <net/if.h> 18 #include <sys/ioctl.h> 19 #include <sys/socket.h> 20 #include <netinet/in.h> 21 #include <linux/ethtool.h> 22 #include <linux/sockios.h> 23 #include <fcntl.h> 24 #include <stdalign.h> 25 #include <sys/un.h> 26 #include <time.h> 27 28 #include <rte_atomic.h> 29 #include <rte_ethdev_driver.h> 30 #include <rte_bus_pci.h> 31 #include <rte_mbuf.h> 32 #include <rte_common.h> 33 #include <rte_interrupts.h> 34 #include <rte_malloc.h> 35 #include <rte_string_fns.h> 36 #include <rte_rwlock.h> 37 #include <rte_cycles.h> 38 39 #include "mlx5.h" 40 #include "mlx5_glue.h" 41 #include "mlx5_rxtx.h" 42 #include "mlx5_utils.h" 43 44 /* Supported speed values found in /usr/include/linux/ethtool.h */ 45 #ifndef HAVE_SUPPORTED_40000baseKR4_Full 46 #define SUPPORTED_40000baseKR4_Full (1 << 23) 47 #endif 48 #ifndef HAVE_SUPPORTED_40000baseCR4_Full 49 #define SUPPORTED_40000baseCR4_Full (1 << 24) 50 #endif 51 #ifndef HAVE_SUPPORTED_40000baseSR4_Full 52 #define SUPPORTED_40000baseSR4_Full (1 << 25) 53 #endif 54 #ifndef HAVE_SUPPORTED_40000baseLR4_Full 55 #define SUPPORTED_40000baseLR4_Full (1 << 26) 56 #endif 57 #ifndef HAVE_SUPPORTED_56000baseKR4_Full 58 #define SUPPORTED_56000baseKR4_Full (1 << 27) 59 #endif 60 #ifndef HAVE_SUPPORTED_56000baseCR4_Full 61 #define SUPPORTED_56000baseCR4_Full (1 << 28) 62 #endif 63 #ifndef HAVE_SUPPORTED_56000baseSR4_Full 64 #define SUPPORTED_56000baseSR4_Full (1 << 29) 65 #endif 66 #ifndef HAVE_SUPPORTED_56000baseLR4_Full 67 #define SUPPORTED_56000baseLR4_Full (1 << 30) 68 #endif 69 70 /* Add defines in case the running kernel is not the same as user headers. */ 71 #ifndef ETHTOOL_GLINKSETTINGS 72 struct ethtool_link_settings { 73 uint32_t cmd; 74 uint32_t speed; 75 uint8_t duplex; 76 uint8_t port; 77 uint8_t phy_address; 78 uint8_t autoneg; 79 uint8_t mdio_support; 80 uint8_t eth_to_mdix; 81 uint8_t eth_tp_mdix_ctrl; 82 int8_t link_mode_masks_nwords; 83 uint32_t reserved[8]; 84 uint32_t link_mode_masks[]; 85 }; 86 87 #define ETHTOOL_GLINKSETTINGS 0x0000004c 88 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 89 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 90 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 91 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 92 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 93 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 94 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 95 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 96 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 97 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 98 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 99 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 100 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 101 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 102 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 103 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 104 #endif 105 #ifndef HAVE_ETHTOOL_LINK_MODE_25G 106 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 107 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 108 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 109 #endif 110 #ifndef HAVE_ETHTOOL_LINK_MODE_50G 111 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 112 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 113 #endif 114 #ifndef HAVE_ETHTOOL_LINK_MODE_100G 115 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 116 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 117 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 118 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 119 #endif 120 121 /** 122 * Get master interface name from private structure. 123 * 124 * @param[in] dev 125 * Pointer to Ethernet device. 126 * @param[out] ifname 127 * Interface name output buffer. 128 * 129 * @return 130 * 0 on success, a negative errno value otherwise and rte_errno is set. 131 */ 132 int 133 mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]) 134 { 135 DIR *dir; 136 struct dirent *dent; 137 unsigned int dev_type = 0; 138 unsigned int dev_port_prev = ~0u; 139 char match[IF_NAMESIZE] = ""; 140 141 assert(ibdev_path); 142 { 143 MKSTR(path, "%s/device/net", ibdev_path); 144 145 dir = opendir(path); 146 if (dir == NULL) { 147 rte_errno = errno; 148 return -rte_errno; 149 } 150 } 151 while ((dent = readdir(dir)) != NULL) { 152 char *name = dent->d_name; 153 FILE *file; 154 unsigned int dev_port; 155 int r; 156 157 if ((name[0] == '.') && 158 ((name[1] == '\0') || 159 ((name[1] == '.') && (name[2] == '\0')))) 160 continue; 161 162 MKSTR(path, "%s/device/net/%s/%s", 163 ibdev_path, name, 164 (dev_type ? "dev_id" : "dev_port")); 165 166 file = fopen(path, "rb"); 167 if (file == NULL) { 168 if (errno != ENOENT) 169 continue; 170 /* 171 * Switch to dev_id when dev_port does not exist as 172 * is the case with Linux kernel versions < 3.15. 173 */ 174 try_dev_id: 175 match[0] = '\0'; 176 if (dev_type) 177 break; 178 dev_type = 1; 179 dev_port_prev = ~0u; 180 rewinddir(dir); 181 continue; 182 } 183 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); 184 fclose(file); 185 if (r != 1) 186 continue; 187 /* 188 * Switch to dev_id when dev_port returns the same value for 189 * all ports. May happen when using a MOFED release older than 190 * 3.0 with a Linux kernel >= 3.15. 191 */ 192 if (dev_port == dev_port_prev) 193 goto try_dev_id; 194 dev_port_prev = dev_port; 195 if (dev_port == 0) 196 strlcpy(match, name, sizeof(match)); 197 } 198 closedir(dir); 199 if (match[0] == '\0') { 200 rte_errno = ENOENT; 201 return -rte_errno; 202 } 203 strncpy(*ifname, match, sizeof(*ifname)); 204 return 0; 205 } 206 207 /** 208 * Get interface name from private structure. 209 * 210 * This is a port representor-aware version of mlx5_get_master_ifname(). 211 * 212 * @param[in] dev 213 * Pointer to Ethernet device. 214 * @param[out] ifname 215 * Interface name output buffer. 216 * 217 * @return 218 * 0 on success, a negative errno value otherwise and rte_errno is set. 219 */ 220 int 221 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) 222 { 223 struct mlx5_priv *priv = dev->data->dev_private; 224 unsigned int ifindex; 225 226 assert(priv); 227 assert(priv->sh); 228 ifindex = priv->nl_socket_rdma >= 0 ? 229 mlx5_nl_ifindex(priv->nl_socket_rdma, 230 priv->sh->ibdev_name, 231 priv->ibv_port) : 0; 232 if (!ifindex) { 233 if (!priv->representor) 234 return mlx5_get_master_ifname(priv->sh->ibdev_path, 235 ifname); 236 rte_errno = ENXIO; 237 return -rte_errno; 238 } 239 if (if_indextoname(ifindex, &(*ifname)[0])) 240 return 0; 241 rte_errno = errno; 242 return -rte_errno; 243 } 244 245 /** 246 * Get interface name for the specified device, uses the extra base 247 * device resources to perform Netlink requests. 248 * 249 * This is a port representor-aware version of mlx5_get_master_ifname(). 250 * 251 * @param[in] base 252 * Pointer to Ethernet device to use Netlink socket from 253 * to perfrom requests. 254 * @param[in] dev 255 * Pointer to Ethernet device. 256 * @param[out] ifname 257 * Interface name output buffer. 258 * 259 * @return 260 * 0 on success, a negative errno value otherwise and rte_errno is set. 261 */ 262 int 263 mlx5_get_ifname_base(const struct rte_eth_dev *base, 264 const struct rte_eth_dev *dev, 265 char (*ifname)[IF_NAMESIZE]) 266 { 267 struct mlx5_priv *priv = dev->data->dev_private; 268 struct mlx5_priv *priv_base = base->data->dev_private; 269 unsigned int ifindex; 270 271 assert(priv); 272 assert(priv->sh); 273 assert(priv_base); 274 ifindex = priv_base->nl_socket_rdma >= 0 ? 275 mlx5_nl_ifindex(priv_base->nl_socket_rdma, 276 priv->sh->ibdev_name, 277 priv->ibv_port) : 0; 278 if (!ifindex) { 279 if (!priv->representor) 280 return mlx5_get_master_ifname(priv->sh->ibdev_path, 281 ifname); 282 rte_errno = ENXIO; 283 return -rte_errno; 284 } 285 if (if_indextoname(ifindex, &(*ifname)[0])) 286 return 0; 287 rte_errno = errno; 288 return -rte_errno; 289 } 290 /** 291 * Get the interface index from device name. 292 * 293 * @param[in] dev 294 * Pointer to Ethernet device. 295 * 296 * @return 297 * Nonzero interface index on success, zero otherwise and rte_errno is set. 298 */ 299 unsigned int 300 mlx5_ifindex(const struct rte_eth_dev *dev) 301 { 302 char ifname[IF_NAMESIZE]; 303 unsigned int ifindex; 304 305 if (mlx5_get_ifname(dev, &ifname)) 306 return 0; 307 ifindex = if_nametoindex(ifname); 308 if (!ifindex) 309 rte_errno = errno; 310 return ifindex; 311 } 312 313 /** 314 * Perform ifreq ioctl() on associated Ethernet device. 315 * 316 * @param[in] dev 317 * Pointer to Ethernet device. 318 * @param req 319 * Request number to pass to ioctl(). 320 * @param[out] ifr 321 * Interface request structure output buffer. 322 * 323 * @return 324 * 0 on success, a negative errno value otherwise and rte_errno is set. 325 */ 326 int 327 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) 328 { 329 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 330 int ret = 0; 331 332 if (sock == -1) { 333 rte_errno = errno; 334 return -rte_errno; 335 } 336 ret = mlx5_get_ifname(dev, &ifr->ifr_name); 337 if (ret) 338 goto error; 339 ret = ioctl(sock, req, ifr); 340 if (ret == -1) { 341 rte_errno = errno; 342 goto error; 343 } 344 close(sock); 345 return 0; 346 error: 347 close(sock); 348 return -rte_errno; 349 } 350 351 /** 352 * Perform ifreq ioctl() on specified Ethernet device, 353 * ifindex, name and other attributes are requested 354 * on the base device to avoid specified device Netlink 355 * socket sharing (this is not thread-safe). 356 * 357 * @param[in] base 358 * Pointer to Ethernet device to get dev attributes. 359 * @param[in] dev 360 * Pointer to Ethernet device to perform ioctl. 361 * @param req 362 * Request number to pass to ioctl(). 363 * @param[out] ifr 364 * Interface request structure output buffer. 365 * 366 * @return 367 * 0 on success, a negative errno value otherwise and rte_errno is set. 368 */ 369 int 370 mlx5_ifreq_base(const struct rte_eth_dev *base, 371 const struct rte_eth_dev *dev, 372 int req, struct ifreq *ifr) 373 { 374 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 375 int ret = 0; 376 377 if (sock == -1) { 378 rte_errno = errno; 379 return -rte_errno; 380 } 381 ret = mlx5_get_ifname_base(base, dev, &ifr->ifr_name); 382 if (ret) 383 goto error; 384 ret = ioctl(sock, req, ifr); 385 if (ret == -1) { 386 rte_errno = errno; 387 goto error; 388 } 389 close(sock); 390 return 0; 391 error: 392 close(sock); 393 return -rte_errno; 394 } 395 396 /** 397 * Get device MTU. 398 * 399 * @param dev 400 * Pointer to Ethernet device. 401 * @param[out] mtu 402 * MTU value output buffer. 403 * 404 * @return 405 * 0 on success, a negative errno value otherwise and rte_errno is set. 406 */ 407 int 408 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 409 { 410 struct ifreq request; 411 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); 412 413 if (ret) 414 return ret; 415 *mtu = request.ifr_mtu; 416 return 0; 417 } 418 419 /** 420 * Set device MTU. 421 * 422 * @param dev 423 * Pointer to Ethernet device. 424 * @param mtu 425 * MTU value to set. 426 * 427 * @return 428 * 0 on success, a negative errno value otherwise and rte_errno is set. 429 */ 430 static int 431 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 432 { 433 struct ifreq request = { .ifr_mtu = mtu, }; 434 435 return mlx5_ifreq(dev, SIOCSIFMTU, &request); 436 } 437 438 /** 439 * Set device flags. 440 * 441 * @param dev 442 * Pointer to Ethernet device. 443 * @param keep 444 * Bitmask for flags that must remain untouched. 445 * @param flags 446 * Bitmask for flags to modify. 447 * 448 * @return 449 * 0 on success, a negative errno value otherwise and rte_errno is set. 450 */ 451 int 452 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 453 { 454 struct ifreq request; 455 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); 456 457 if (ret) 458 return ret; 459 request.ifr_flags &= keep; 460 request.ifr_flags |= flags & ~keep; 461 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); 462 } 463 464 /** 465 * DPDK callback for Ethernet device configuration. 466 * 467 * @param dev 468 * Pointer to Ethernet device structure. 469 * 470 * @return 471 * 0 on success, a negative errno value otherwise and rte_errno is set. 472 */ 473 int 474 mlx5_dev_configure(struct rte_eth_dev *dev) 475 { 476 struct mlx5_priv *priv = dev->data->dev_private; 477 unsigned int rxqs_n = dev->data->nb_rx_queues; 478 unsigned int txqs_n = dev->data->nb_tx_queues; 479 unsigned int i; 480 unsigned int j; 481 unsigned int reta_idx_n; 482 const uint8_t use_app_rss_key = 483 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; 484 int ret = 0; 485 486 if (use_app_rss_key && 487 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != 488 MLX5_RSS_HASH_KEY_LEN)) { 489 DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long", 490 dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN)); 491 rte_errno = EINVAL; 492 return -rte_errno; 493 } 494 priv->rss_conf.rss_key = 495 rte_realloc(priv->rss_conf.rss_key, 496 MLX5_RSS_HASH_KEY_LEN, 0); 497 if (!priv->rss_conf.rss_key) { 498 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", 499 dev->data->port_id, rxqs_n); 500 rte_errno = ENOMEM; 501 return -rte_errno; 502 } 503 memcpy(priv->rss_conf.rss_key, 504 use_app_rss_key ? 505 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : 506 rss_hash_default_key, 507 MLX5_RSS_HASH_KEY_LEN); 508 priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN; 509 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; 510 priv->rxqs = (void *)dev->data->rx_queues; 511 priv->txqs = (void *)dev->data->tx_queues; 512 if (txqs_n != priv->txqs_n) { 513 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", 514 dev->data->port_id, priv->txqs_n, txqs_n); 515 priv->txqs_n = txqs_n; 516 } 517 if (rxqs_n > priv->config.ind_table_max_size) { 518 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", 519 dev->data->port_id, rxqs_n); 520 rte_errno = EINVAL; 521 return -rte_errno; 522 } 523 if (rxqs_n != priv->rxqs_n) { 524 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", 525 dev->data->port_id, priv->rxqs_n, rxqs_n); 526 priv->rxqs_n = rxqs_n; 527 /* 528 * If the requested number of RX queues is not a power of two, 529 * use the maximum indirection table size for better balancing. 530 * The result is always rounded to the next power of two. 531 */ 532 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? 533 priv->config.ind_table_max_size : 534 rxqs_n)); 535 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); 536 if (ret) 537 return ret; 538 /* 539 * When the number of RX queues is not a power of two, 540 * the remaining table entries are padded with reused WQs 541 * and hashes are not spread uniformly. 542 */ 543 for (i = 0, j = 0; (i != reta_idx_n); ++i) { 544 (*priv->reta_idx)[i] = j; 545 if (++j == rxqs_n) 546 j = 0; 547 } 548 } 549 ret = mlx5_proc_priv_init(dev); 550 if (ret) 551 return ret; 552 return 0; 553 } 554 555 /** 556 * Sets default tuning parameters. 557 * 558 * @param dev 559 * Pointer to Ethernet device. 560 * @param[out] info 561 * Info structure output buffer. 562 */ 563 static void 564 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 565 { 566 struct mlx5_priv *priv = dev->data->dev_private; 567 568 /* Minimum CPU utilization. */ 569 info->default_rxportconf.ring_size = 256; 570 info->default_txportconf.ring_size = 256; 571 info->default_rxportconf.burst_size = 64; 572 info->default_txportconf.burst_size = 64; 573 if (priv->link_speed_capa & ETH_LINK_SPEED_100G) { 574 info->default_rxportconf.nb_queues = 16; 575 info->default_txportconf.nb_queues = 16; 576 if (dev->data->nb_rx_queues > 2 || 577 dev->data->nb_tx_queues > 2) { 578 /* Max Throughput. */ 579 info->default_rxportconf.ring_size = 2048; 580 info->default_txportconf.ring_size = 2048; 581 } 582 } else { 583 info->default_rxportconf.nb_queues = 8; 584 info->default_txportconf.nb_queues = 8; 585 if (dev->data->nb_rx_queues > 2 || 586 dev->data->nb_tx_queues > 2) { 587 /* Max Throughput. */ 588 info->default_rxportconf.ring_size = 4096; 589 info->default_txportconf.ring_size = 4096; 590 } 591 } 592 } 593 594 /** 595 * DPDK callback to get information about the device. 596 * 597 * @param dev 598 * Pointer to Ethernet device structure. 599 * @param[out] info 600 * Info structure output buffer. 601 */ 602 void 603 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 604 { 605 struct mlx5_priv *priv = dev->data->dev_private; 606 struct mlx5_dev_config *config = &priv->config; 607 unsigned int max; 608 char ifname[IF_NAMESIZE]; 609 610 /* FIXME: we should ask the device for these values. */ 611 info->min_rx_bufsize = 32; 612 info->max_rx_pktlen = 65536; 613 /* 614 * Since we need one CQ per QP, the limit is the minimum number 615 * between the two values. 616 */ 617 max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq, 618 priv->sh->device_attr.orig_attr.max_qp); 619 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ 620 if (max >= 65535) 621 max = 65535; 622 info->max_rx_queues = max; 623 info->max_tx_queues = max; 624 info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; 625 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); 626 info->rx_offload_capa = (mlx5_get_rx_port_offloads() | 627 info->rx_queue_offload_capa); 628 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); 629 if (mlx5_get_ifname(dev, &ifname) == 0) 630 info->if_index = if_nametoindex(ifname); 631 info->reta_size = priv->reta_idx_n ? 632 priv->reta_idx_n : config->ind_table_max_size; 633 info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; 634 info->speed_capa = priv->link_speed_capa; 635 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; 636 mlx5_set_default_params(dev, info); 637 info->switch_info.name = dev->data->name; 638 info->switch_info.domain_id = priv->domain_id; 639 info->switch_info.port_id = priv->representor_id; 640 if (priv->representor) { 641 unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); 642 uint16_t port_id[i]; 643 644 i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); 645 while (i--) { 646 struct mlx5_priv *opriv = 647 rte_eth_devices[port_id[i]].data->dev_private; 648 649 if (!opriv || 650 opriv->representor || 651 opriv->domain_id != priv->domain_id) 652 continue; 653 /* 654 * Override switch name with that of the master 655 * device. 656 */ 657 info->switch_info.name = opriv->dev_data->name; 658 break; 659 } 660 } 661 } 662 663 /** 664 * Get device current raw clock counter 665 * 666 * @param dev 667 * Pointer to Ethernet device structure. 668 * @param[out] time 669 * Current raw clock counter of the device. 670 * 671 * @return 672 * 0 if the clock has correctly been read 673 * The value of errno in case of error 674 */ 675 int 676 mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock) 677 { 678 struct mlx5_priv *priv = dev->data->dev_private; 679 struct ibv_context *ctx = priv->sh->ctx; 680 struct ibv_values_ex values; 681 int err = 0; 682 683 values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK; 684 err = mlx5_glue->query_rt_values_ex(ctx, &values); 685 if (err != 0) { 686 DRV_LOG(WARNING, "Could not query the clock !"); 687 return err; 688 } 689 *clock = values.raw_clock.tv_nsec; 690 return 0; 691 } 692 693 /** 694 * Get firmware version of a device. 695 * 696 * @param dev 697 * Ethernet device port. 698 * @param fw_ver 699 * String output allocated by caller. 700 * @param fw_size 701 * Size of the output string, including terminating null byte. 702 * 703 * @return 704 * 0 on success, or the size of the non truncated string if too big. 705 */ 706 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) 707 { 708 struct mlx5_priv *priv = dev->data->dev_private; 709 struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr; 710 size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1; 711 712 if (fw_size < size) 713 return size; 714 if (fw_ver != NULL) 715 strlcpy(fw_ver, attr->fw_ver, fw_size); 716 return 0; 717 } 718 719 /** 720 * Get supported packet types. 721 * 722 * @param dev 723 * Pointer to Ethernet device structure. 724 * 725 * @return 726 * A pointer to the supported Packet types array. 727 */ 728 const uint32_t * 729 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) 730 { 731 static const uint32_t ptypes[] = { 732 /* refers to rxq_cq_to_pkt_type() */ 733 RTE_PTYPE_L2_ETHER, 734 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 735 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 736 RTE_PTYPE_L4_NONFRAG, 737 RTE_PTYPE_L4_FRAG, 738 RTE_PTYPE_L4_TCP, 739 RTE_PTYPE_L4_UDP, 740 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 741 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 742 RTE_PTYPE_INNER_L4_NONFRAG, 743 RTE_PTYPE_INNER_L4_FRAG, 744 RTE_PTYPE_INNER_L4_TCP, 745 RTE_PTYPE_INNER_L4_UDP, 746 RTE_PTYPE_UNKNOWN 747 }; 748 749 if (dev->rx_pkt_burst == mlx5_rx_burst || 750 dev->rx_pkt_burst == mlx5_rx_burst_mprq || 751 dev->rx_pkt_burst == mlx5_rx_burst_vec) 752 return ptypes; 753 return NULL; 754 } 755 756 /** 757 * Retrieve the master device for representor in the same switch domain. 758 * 759 * @param dev 760 * Pointer to representor Ethernet device structure. 761 * 762 * @return 763 * Master device structure on success, NULL otherwise. 764 */ 765 766 static struct rte_eth_dev * 767 mlx5_find_master_dev(struct rte_eth_dev *dev) 768 { 769 struct mlx5_priv *priv; 770 uint16_t port_id; 771 uint16_t domain_id; 772 773 priv = dev->data->dev_private; 774 domain_id = priv->domain_id; 775 assert(priv->representor); 776 RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { 777 priv = rte_eth_devices[port_id].data->dev_private; 778 if (priv && 779 priv->master && 780 priv->domain_id == domain_id) 781 return &rte_eth_devices[port_id]; 782 } 783 return NULL; 784 } 785 786 /** 787 * DPDK callback to retrieve physical link information. 788 * 789 * @param dev 790 * Pointer to Ethernet device structure. 791 * @param[out] link 792 * Storage for current link status. 793 * 794 * @return 795 * 0 on success, a negative errno value otherwise and rte_errno is set. 796 */ 797 static int 798 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, 799 struct rte_eth_link *link) 800 { 801 struct mlx5_priv *priv = dev->data->dev_private; 802 struct ethtool_cmd edata = { 803 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 804 }; 805 struct ifreq ifr; 806 struct rte_eth_link dev_link; 807 int link_speed = 0; 808 int ret; 809 810 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 811 if (ret) { 812 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 813 dev->data->port_id, strerror(rte_errno)); 814 return ret; 815 } 816 dev_link = (struct rte_eth_link) { 817 .link_status = ((ifr.ifr_flags & IFF_UP) && 818 (ifr.ifr_flags & IFF_RUNNING)), 819 }; 820 ifr = (struct ifreq) { 821 .ifr_data = (void *)&edata, 822 }; 823 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 824 if (ret) { 825 if (ret == -ENOTSUP && priv->representor) { 826 struct rte_eth_dev *master; 827 828 /* 829 * For representors we can try to inherit link 830 * settings from the master device. Actually 831 * link settings do not make a lot of sense 832 * for representors due to missing physical 833 * link. The old kernel drivers supported 834 * emulated settings query for representors, 835 * the new ones do not, so we have to add 836 * this code for compatibility issues. 837 */ 838 master = mlx5_find_master_dev(dev); 839 if (master) { 840 ifr = (struct ifreq) { 841 .ifr_data = (void *)&edata, 842 }; 843 /* 844 * Use special version of mlx5_ifreq() 845 * to get master device name with local 846 * device Netlink socket. Using master 847 * device Netlink socket is not thread 848 * safe. 849 */ 850 ret = mlx5_ifreq_base(dev, master, 851 SIOCETHTOOL, &ifr); 852 } 853 } 854 if (ret) { 855 DRV_LOG(WARNING, 856 "port %u ioctl(SIOCETHTOOL," 857 " ETHTOOL_GSET) failed: %s", 858 dev->data->port_id, strerror(rte_errno)); 859 return ret; 860 } 861 } 862 link_speed = ethtool_cmd_speed(&edata); 863 if (link_speed == -1) 864 dev_link.link_speed = ETH_SPEED_NUM_NONE; 865 else 866 dev_link.link_speed = link_speed; 867 priv->link_speed_capa = 0; 868 if (edata.supported & SUPPORTED_Autoneg) 869 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 870 if (edata.supported & (SUPPORTED_1000baseT_Full | 871 SUPPORTED_1000baseKX_Full)) 872 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 873 if (edata.supported & SUPPORTED_10000baseKR_Full) 874 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 875 if (edata.supported & (SUPPORTED_40000baseKR4_Full | 876 SUPPORTED_40000baseCR4_Full | 877 SUPPORTED_40000baseSR4_Full | 878 SUPPORTED_40000baseLR4_Full)) 879 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 880 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 881 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 882 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 883 ETH_LINK_SPEED_FIXED); 884 if (((dev_link.link_speed && !dev_link.link_status) || 885 (!dev_link.link_speed && dev_link.link_status))) { 886 rte_errno = EAGAIN; 887 return -rte_errno; 888 } 889 *link = dev_link; 890 return 0; 891 } 892 893 /** 894 * Retrieve physical link information (unlocked version using new ioctl). 895 * 896 * @param dev 897 * Pointer to Ethernet device structure. 898 * @param[out] link 899 * Storage for current link status. 900 * 901 * @return 902 * 0 on success, a negative errno value otherwise and rte_errno is set. 903 */ 904 static int 905 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, 906 struct rte_eth_link *link) 907 908 { 909 struct mlx5_priv *priv = dev->data->dev_private; 910 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 911 struct ifreq ifr; 912 struct rte_eth_link dev_link; 913 struct rte_eth_dev *master = NULL; 914 uint64_t sc; 915 int ret; 916 917 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 918 if (ret) { 919 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 920 dev->data->port_id, strerror(rte_errno)); 921 return ret; 922 } 923 dev_link = (struct rte_eth_link) { 924 .link_status = ((ifr.ifr_flags & IFF_UP) && 925 (ifr.ifr_flags & IFF_RUNNING)), 926 }; 927 ifr = (struct ifreq) { 928 .ifr_data = (void *)&gcmd, 929 }; 930 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 931 if (ret) { 932 if (ret == -ENOTSUP && priv->representor) { 933 /* 934 * For representors we can try to inherit link 935 * settings from the master device. Actually 936 * link settings do not make a lot of sense 937 * for representors due to missing physical 938 * link. The old kernel drivers supported 939 * emulated settings query for representors, 940 * the new ones do not, so we have to add 941 * this code for compatibility issues. 942 */ 943 master = mlx5_find_master_dev(dev); 944 if (master) { 945 ifr = (struct ifreq) { 946 .ifr_data = (void *)&gcmd, 947 }; 948 /* 949 * Avoid using master Netlink socket. 950 * This is not thread-safe. 951 */ 952 ret = mlx5_ifreq_base(dev, master, 953 SIOCETHTOOL, &ifr); 954 } 955 } 956 if (ret) { 957 DRV_LOG(DEBUG, 958 "port %u ioctl(SIOCETHTOOL," 959 " ETHTOOL_GLINKSETTINGS) failed: %s", 960 dev->data->port_id, strerror(rte_errno)); 961 return ret; 962 } 963 964 } 965 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 966 967 alignas(struct ethtool_link_settings) 968 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 969 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 970 struct ethtool_link_settings *ecmd = (void *)data; 971 972 *ecmd = gcmd; 973 ifr.ifr_data = (void *)ecmd; 974 ret = mlx5_ifreq_base(dev, master ? master : dev, SIOCETHTOOL, &ifr); 975 if (ret) { 976 DRV_LOG(DEBUG, 977 "port %u ioctl(SIOCETHTOOL," 978 "ETHTOOL_GLINKSETTINGS) failed: %s", 979 dev->data->port_id, strerror(rte_errno)); 980 return ret; 981 } 982 dev_link.link_speed = ecmd->speed; 983 sc = ecmd->link_mode_masks[0] | 984 ((uint64_t)ecmd->link_mode_masks[1] << 32); 985 priv->link_speed_capa = 0; 986 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) 987 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 988 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 989 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 990 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 991 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 992 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 993 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 994 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 995 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 996 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 997 priv->link_speed_capa |= ETH_LINK_SPEED_20G; 998 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 999 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 1000 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 1001 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 1002 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 1003 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 1004 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 1005 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 1006 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 1007 priv->link_speed_capa |= ETH_LINK_SPEED_56G; 1008 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 1009 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 1010 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 1011 priv->link_speed_capa |= ETH_LINK_SPEED_25G; 1012 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 1013 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 1014 priv->link_speed_capa |= ETH_LINK_SPEED_50G; 1015 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 1016 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 1017 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 1018 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 1019 priv->link_speed_capa |= ETH_LINK_SPEED_100G; 1020 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 1021 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 1022 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 1023 ETH_LINK_SPEED_FIXED); 1024 if (((dev_link.link_speed && !dev_link.link_status) || 1025 (!dev_link.link_speed && dev_link.link_status))) { 1026 rte_errno = EAGAIN; 1027 return -rte_errno; 1028 } 1029 *link = dev_link; 1030 return 0; 1031 } 1032 1033 /** 1034 * DPDK callback to retrieve physical link information. 1035 * 1036 * @param dev 1037 * Pointer to Ethernet device structure. 1038 * @param wait_to_complete 1039 * Wait for request completion. 1040 * 1041 * @return 1042 * 0 if link status was not updated, positive if it was, a negative errno 1043 * value otherwise and rte_errno is set. 1044 */ 1045 int 1046 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1047 { 1048 int ret; 1049 struct rte_eth_link dev_link; 1050 time_t start_time = time(NULL); 1051 1052 do { 1053 ret = mlx5_link_update_unlocked_gs(dev, &dev_link); 1054 if (ret == -ENOTSUP) 1055 ret = mlx5_link_update_unlocked_gset(dev, &dev_link); 1056 if (ret == 0) 1057 break; 1058 /* Handle wait to complete situation. */ 1059 if (wait_to_complete && ret == -EAGAIN) { 1060 if (abs((int)difftime(time(NULL), start_time)) < 1061 MLX5_LINK_STATUS_TIMEOUT) { 1062 usleep(0); 1063 continue; 1064 } else { 1065 rte_errno = EBUSY; 1066 return -rte_errno; 1067 } 1068 } else if (ret < 0) { 1069 return ret; 1070 } 1071 } while (wait_to_complete); 1072 ret = !!memcmp(&dev->data->dev_link, &dev_link, 1073 sizeof(struct rte_eth_link)); 1074 dev->data->dev_link = dev_link; 1075 return ret; 1076 } 1077 1078 /** 1079 * DPDK callback to change the MTU. 1080 * 1081 * @param dev 1082 * Pointer to Ethernet device structure. 1083 * @param in_mtu 1084 * New MTU. 1085 * 1086 * @return 1087 * 0 on success, a negative errno value otherwise and rte_errno is set. 1088 */ 1089 int 1090 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1091 { 1092 struct mlx5_priv *priv = dev->data->dev_private; 1093 uint16_t kern_mtu = 0; 1094 int ret; 1095 1096 ret = mlx5_get_mtu(dev, &kern_mtu); 1097 if (ret) 1098 return ret; 1099 /* Set kernel interface MTU first. */ 1100 ret = mlx5_set_mtu(dev, mtu); 1101 if (ret) 1102 return ret; 1103 ret = mlx5_get_mtu(dev, &kern_mtu); 1104 if (ret) 1105 return ret; 1106 if (kern_mtu == mtu) { 1107 priv->mtu = mtu; 1108 DRV_LOG(DEBUG, "port %u adapter MTU set to %u", 1109 dev->data->port_id, mtu); 1110 return 0; 1111 } 1112 rte_errno = EAGAIN; 1113 return -rte_errno; 1114 } 1115 1116 /** 1117 * DPDK callback to get flow control status. 1118 * 1119 * @param dev 1120 * Pointer to Ethernet device structure. 1121 * @param[out] fc_conf 1122 * Flow control output buffer. 1123 * 1124 * @return 1125 * 0 on success, a negative errno value otherwise and rte_errno is set. 1126 */ 1127 int 1128 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1129 { 1130 struct ifreq ifr; 1131 struct ethtool_pauseparam ethpause = { 1132 .cmd = ETHTOOL_GPAUSEPARAM 1133 }; 1134 int ret; 1135 1136 ifr.ifr_data = (void *)ðpause; 1137 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1138 if (ret) { 1139 DRV_LOG(WARNING, 1140 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 1141 " %s", 1142 dev->data->port_id, strerror(rte_errno)); 1143 return ret; 1144 } 1145 fc_conf->autoneg = ethpause.autoneg; 1146 if (ethpause.rx_pause && ethpause.tx_pause) 1147 fc_conf->mode = RTE_FC_FULL; 1148 else if (ethpause.rx_pause) 1149 fc_conf->mode = RTE_FC_RX_PAUSE; 1150 else if (ethpause.tx_pause) 1151 fc_conf->mode = RTE_FC_TX_PAUSE; 1152 else 1153 fc_conf->mode = RTE_FC_NONE; 1154 return 0; 1155 } 1156 1157 /** 1158 * DPDK callback to modify flow control parameters. 1159 * 1160 * @param dev 1161 * Pointer to Ethernet device structure. 1162 * @param[in] fc_conf 1163 * Flow control parameters. 1164 * 1165 * @return 1166 * 0 on success, a negative errno value otherwise and rte_errno is set. 1167 */ 1168 int 1169 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1170 { 1171 struct ifreq ifr; 1172 struct ethtool_pauseparam ethpause = { 1173 .cmd = ETHTOOL_SPAUSEPARAM 1174 }; 1175 int ret; 1176 1177 ifr.ifr_data = (void *)ðpause; 1178 ethpause.autoneg = fc_conf->autoneg; 1179 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1180 (fc_conf->mode & RTE_FC_RX_PAUSE)) 1181 ethpause.rx_pause = 1; 1182 else 1183 ethpause.rx_pause = 0; 1184 1185 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1186 (fc_conf->mode & RTE_FC_TX_PAUSE)) 1187 ethpause.tx_pause = 1; 1188 else 1189 ethpause.tx_pause = 0; 1190 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1191 if (ret) { 1192 DRV_LOG(WARNING, 1193 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 1194 " failed: %s", 1195 dev->data->port_id, strerror(rte_errno)); 1196 return ret; 1197 } 1198 return 0; 1199 } 1200 1201 /** 1202 * Get PCI information from struct ibv_device. 1203 * 1204 * @param device 1205 * Pointer to Ethernet device structure. 1206 * @param[out] pci_addr 1207 * PCI bus address output buffer. 1208 * 1209 * @return 1210 * 0 on success, a negative errno value otherwise and rte_errno is set. 1211 */ 1212 int 1213 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 1214 struct rte_pci_addr *pci_addr) 1215 { 1216 FILE *file; 1217 char line[32]; 1218 MKSTR(path, "%s/device/uevent", device->ibdev_path); 1219 1220 file = fopen(path, "rb"); 1221 if (file == NULL) { 1222 rte_errno = errno; 1223 return -rte_errno; 1224 } 1225 while (fgets(line, sizeof(line), file) == line) { 1226 size_t len = strlen(line); 1227 int ret; 1228 1229 /* Truncate long lines. */ 1230 if (len == (sizeof(line) - 1)) 1231 while (line[(len - 1)] != '\n') { 1232 ret = fgetc(file); 1233 if (ret == EOF) 1234 break; 1235 line[(len - 1)] = ret; 1236 } 1237 /* Extract information. */ 1238 if (sscanf(line, 1239 "PCI_SLOT_NAME=" 1240 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 1241 &pci_addr->domain, 1242 &pci_addr->bus, 1243 &pci_addr->devid, 1244 &pci_addr->function) == 4) { 1245 ret = 0; 1246 break; 1247 } 1248 } 1249 fclose(file); 1250 return 0; 1251 } 1252 1253 /** 1254 * Handle asynchronous removal event for entire multiport device. 1255 * 1256 * @param sh 1257 * Infiniband device shared context. 1258 */ 1259 static void 1260 mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh) 1261 { 1262 uint32_t i; 1263 1264 for (i = 0; i < sh->max_port; ++i) { 1265 struct rte_eth_dev *dev; 1266 1267 if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { 1268 /* 1269 * Or not existing port either no 1270 * handler installed for this port. 1271 */ 1272 continue; 1273 } 1274 dev = &rte_eth_devices[sh->port[i].ih_port_id]; 1275 assert(dev); 1276 if (dev->data->dev_conf.intr_conf.rmv) 1277 _rte_eth_dev_callback_process 1278 (dev, RTE_ETH_EVENT_INTR_RMV, NULL); 1279 } 1280 } 1281 1282 /** 1283 * Handle shared asynchronous events the NIC (removal event 1284 * and link status change). Supports multiport IB device. 1285 * 1286 * @param cb_arg 1287 * Callback argument. 1288 */ 1289 void 1290 mlx5_dev_interrupt_handler(void *cb_arg) 1291 { 1292 struct mlx5_ibv_shared *sh = cb_arg; 1293 struct ibv_async_event event; 1294 1295 /* Read all message from the IB device and acknowledge them. */ 1296 for (;;) { 1297 struct rte_eth_dev *dev; 1298 uint32_t tmp; 1299 1300 if (mlx5_glue->get_async_event(sh->ctx, &event)) 1301 break; 1302 /* Retrieve and check IB port index. */ 1303 tmp = (uint32_t)event.element.port_num; 1304 if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) { 1305 /* 1306 * The DEVICE_FATAL event is called once for 1307 * entire device without port specifying. 1308 * We should notify all existing ports. 1309 */ 1310 mlx5_glue->ack_async_event(&event); 1311 mlx5_dev_interrupt_device_fatal(sh); 1312 continue; 1313 } 1314 assert(tmp && (tmp <= sh->max_port)); 1315 if (!tmp) { 1316 /* Unsupported devive level event. */ 1317 mlx5_glue->ack_async_event(&event); 1318 DRV_LOG(DEBUG, 1319 "unsupported common event (type %d)", 1320 event.event_type); 1321 continue; 1322 } 1323 if (tmp > sh->max_port) { 1324 /* Invalid IB port index. */ 1325 mlx5_glue->ack_async_event(&event); 1326 DRV_LOG(DEBUG, 1327 "cannot handle an event (type %d)" 1328 "due to invalid IB port index (%u)", 1329 event.event_type, tmp); 1330 continue; 1331 } 1332 if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { 1333 /* No handler installed. */ 1334 mlx5_glue->ack_async_event(&event); 1335 DRV_LOG(DEBUG, 1336 "cannot handle an event (type %d)" 1337 "due to no handler installed for port %u", 1338 event.event_type, tmp); 1339 continue; 1340 } 1341 /* Retrieve ethernet device descriptor. */ 1342 tmp = sh->port[tmp - 1].ih_port_id; 1343 dev = &rte_eth_devices[tmp]; 1344 assert(dev); 1345 if ((event.event_type == IBV_EVENT_PORT_ACTIVE || 1346 event.event_type == IBV_EVENT_PORT_ERR) && 1347 dev->data->dev_conf.intr_conf.lsc) { 1348 mlx5_glue->ack_async_event(&event); 1349 if (mlx5_link_update(dev, 0) == -EAGAIN) { 1350 usleep(0); 1351 continue; 1352 } 1353 _rte_eth_dev_callback_process 1354 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1355 continue; 1356 } 1357 DRV_LOG(DEBUG, 1358 "port %u cannot handle an unknown event (type %d)", 1359 dev->data->port_id, event.event_type); 1360 mlx5_glue->ack_async_event(&event); 1361 } 1362 } 1363 1364 /* 1365 * Unregister callback handler safely. The handler may be active 1366 * while we are trying to unregister it, in this case code -EAGAIN 1367 * is returned by rte_intr_callback_unregister(). This routine checks 1368 * the return code and tries to unregister handler again. 1369 * 1370 * @param handle 1371 * interrupt handle 1372 * @param cb_fn 1373 * pointer to callback routine 1374 * @cb_arg 1375 * opaque callback parameter 1376 */ 1377 void 1378 mlx5_intr_callback_unregister(const struct rte_intr_handle *handle, 1379 rte_intr_callback_fn cb_fn, void *cb_arg) 1380 { 1381 /* 1382 * Try to reduce timeout management overhead by not calling 1383 * the timer related routines on the first iteration. If the 1384 * unregistering succeeds on first call there will be no 1385 * timer calls at all. 1386 */ 1387 uint64_t twait = 0; 1388 uint64_t start = 0; 1389 1390 do { 1391 int ret; 1392 1393 ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg); 1394 if (ret >= 0) 1395 return; 1396 if (ret != -EAGAIN) { 1397 DRV_LOG(INFO, "failed to unregister interrupt" 1398 " handler (error: %d)", ret); 1399 assert(false); 1400 return; 1401 } 1402 if (twait) { 1403 struct timespec onems; 1404 1405 /* Wait one millisecond and try again. */ 1406 onems.tv_sec = 0; 1407 onems.tv_nsec = NS_PER_S / MS_PER_S; 1408 nanosleep(&onems, 0); 1409 /* Check whether one second elapsed. */ 1410 if ((rte_get_timer_cycles() - start) <= twait) 1411 continue; 1412 } else { 1413 /* 1414 * We get the amount of timer ticks for one second. 1415 * If this amount elapsed it means we spent one 1416 * second in waiting. This branch is executed once 1417 * on first iteration. 1418 */ 1419 twait = rte_get_timer_hz(); 1420 assert(twait); 1421 } 1422 /* 1423 * Timeout elapsed, show message (once a second) and retry. 1424 * We have no other acceptable option here, if we ignore 1425 * the unregistering return code the handler will not 1426 * be unregistered, fd will be closed and we may get the 1427 * crush. Hanging and messaging in the loop seems not to be 1428 * the worst choice. 1429 */ 1430 DRV_LOG(INFO, "Retrying to unregister interrupt handler"); 1431 start = rte_get_timer_cycles(); 1432 } while (true); 1433 } 1434 1435 /** 1436 * Handle DEVX interrupts from the NIC. 1437 * This function is probably called from the DPDK host thread. 1438 * 1439 * @param cb_arg 1440 * Callback argument. 1441 */ 1442 void 1443 mlx5_dev_interrupt_handler_devx(void *cb_arg) 1444 { 1445 #ifndef HAVE_IBV_DEVX_ASYNC 1446 (void)cb_arg; 1447 return; 1448 #else 1449 struct mlx5_ibv_shared *sh = cb_arg; 1450 union { 1451 struct mlx5dv_devx_async_cmd_hdr cmd_resp; 1452 uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) + 1453 MLX5_ST_SZ_BYTES(traffic_counter) + 1454 sizeof(struct mlx5dv_devx_async_cmd_hdr)]; 1455 } out; 1456 uint8_t *buf = out.buf + sizeof(out.cmd_resp); 1457 1458 while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp, 1459 &out.cmd_resp, 1460 sizeof(out.buf))) 1461 mlx5_flow_async_pool_query_handle 1462 (sh, (uint64_t)out.cmd_resp.wr_id, 1463 mlx5_devx_get_out_command_status(buf)); 1464 #endif /* HAVE_IBV_DEVX_ASYNC */ 1465 } 1466 1467 /** 1468 * Uninstall shared asynchronous device events handler. 1469 * This function is implemented to support event sharing 1470 * between multiple ports of single IB device. 1471 * 1472 * @param dev 1473 * Pointer to Ethernet device. 1474 */ 1475 static void 1476 mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev) 1477 { 1478 struct mlx5_priv *priv = dev->data->dev_private; 1479 struct mlx5_ibv_shared *sh = priv->sh; 1480 1481 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1482 return; 1483 pthread_mutex_lock(&sh->intr_mutex); 1484 assert(priv->ibv_port); 1485 assert(priv->ibv_port <= sh->max_port); 1486 assert(dev->data->port_id < RTE_MAX_ETHPORTS); 1487 if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS) 1488 goto exit; 1489 assert(sh->port[priv->ibv_port - 1].ih_port_id == 1490 (uint32_t)dev->data->port_id); 1491 assert(sh->intr_cnt); 1492 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 1493 if (!sh->intr_cnt || --sh->intr_cnt) 1494 goto exit; 1495 mlx5_intr_callback_unregister(&sh->intr_handle, 1496 mlx5_dev_interrupt_handler, sh); 1497 sh->intr_handle.fd = 0; 1498 sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 1499 if (sh->intr_handle_devx.fd) { 1500 rte_intr_callback_unregister(&sh->intr_handle_devx, 1501 mlx5_dev_interrupt_handler_devx, 1502 sh); 1503 sh->intr_handle_devx.fd = 0; 1504 sh->intr_handle_devx.type = RTE_INTR_HANDLE_UNKNOWN; 1505 } 1506 if (sh->devx_comp) { 1507 mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); 1508 sh->devx_comp = NULL; 1509 } 1510 exit: 1511 pthread_mutex_unlock(&sh->intr_mutex); 1512 } 1513 1514 /** 1515 * Install shared asynchronous device events handler. 1516 * This function is implemented to support event sharing 1517 * between multiple ports of single IB device. 1518 * 1519 * @param dev 1520 * Pointer to Ethernet device. 1521 */ 1522 static void 1523 mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) 1524 { 1525 struct mlx5_priv *priv = dev->data->dev_private; 1526 struct mlx5_ibv_shared *sh = priv->sh; 1527 int ret; 1528 int flags; 1529 1530 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1531 return; 1532 pthread_mutex_lock(&sh->intr_mutex); 1533 assert(priv->ibv_port); 1534 assert(priv->ibv_port <= sh->max_port); 1535 assert(dev->data->port_id < RTE_MAX_ETHPORTS); 1536 if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) { 1537 /* The handler is already installed for this port. */ 1538 assert(sh->intr_cnt); 1539 goto exit; 1540 } 1541 sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id; 1542 if (sh->intr_cnt) { 1543 sh->intr_cnt++; 1544 goto exit; 1545 } 1546 /* No shared handler installed. */ 1547 assert(sh->ctx->async_fd > 0); 1548 flags = fcntl(sh->ctx->async_fd, F_GETFL); 1549 ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); 1550 if (ret) { 1551 DRV_LOG(INFO, "failed to change file descriptor" 1552 " async event queue"); 1553 goto error; 1554 } 1555 sh->intr_handle.fd = sh->ctx->async_fd; 1556 sh->intr_handle.type = RTE_INTR_HANDLE_EXT; 1557 rte_intr_callback_register(&sh->intr_handle, 1558 mlx5_dev_interrupt_handler, sh); 1559 if (priv->config.devx) { 1560 #ifndef HAVE_IBV_DEVX_ASYNC 1561 goto error_unregister; 1562 #else 1563 sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx); 1564 if (sh->devx_comp) { 1565 flags = fcntl(sh->devx_comp->fd, F_GETFL); 1566 ret = fcntl(sh->devx_comp->fd, F_SETFL, 1567 flags | O_NONBLOCK); 1568 if (ret) { 1569 DRV_LOG(INFO, "failed to change file descriptor" 1570 " devx async event queue"); 1571 goto error_unregister; 1572 } 1573 sh->intr_handle_devx.fd = sh->devx_comp->fd; 1574 sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; 1575 rte_intr_callback_register 1576 (&sh->intr_handle_devx, 1577 mlx5_dev_interrupt_handler_devx, sh); 1578 } else { 1579 DRV_LOG(INFO, "failed to create devx async command " 1580 "completion"); 1581 goto error_unregister; 1582 } 1583 #endif /* HAVE_IBV_DEVX_ASYNC */ 1584 } 1585 sh->intr_cnt++; 1586 goto exit; 1587 error_unregister: 1588 rte_intr_callback_unregister(&sh->intr_handle, 1589 mlx5_dev_interrupt_handler, sh); 1590 error: 1591 /* Indicate there will be no interrupts. */ 1592 dev->data->dev_conf.intr_conf.lsc = 0; 1593 dev->data->dev_conf.intr_conf.rmv = 0; 1594 sh->intr_handle.fd = 0; 1595 sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 1596 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 1597 exit: 1598 pthread_mutex_unlock(&sh->intr_mutex); 1599 } 1600 1601 /** 1602 * Uninstall interrupt handler. 1603 * 1604 * @param dev 1605 * Pointer to Ethernet device. 1606 */ 1607 void 1608 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) 1609 { 1610 mlx5_dev_shared_handler_uninstall(dev); 1611 } 1612 1613 /** 1614 * Install interrupt handler. 1615 * 1616 * @param dev 1617 * Pointer to Ethernet device. 1618 */ 1619 void 1620 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) 1621 { 1622 mlx5_dev_shared_handler_install(dev); 1623 } 1624 1625 /** 1626 * DPDK callback to bring the link DOWN. 1627 * 1628 * @param dev 1629 * Pointer to Ethernet device structure. 1630 * 1631 * @return 1632 * 0 on success, a negative errno value otherwise and rte_errno is set. 1633 */ 1634 int 1635 mlx5_set_link_down(struct rte_eth_dev *dev) 1636 { 1637 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 1638 } 1639 1640 /** 1641 * DPDK callback to bring the link UP. 1642 * 1643 * @param dev 1644 * Pointer to Ethernet device structure. 1645 * 1646 * @return 1647 * 0 on success, a negative errno value otherwise and rte_errno is set. 1648 */ 1649 int 1650 mlx5_set_link_up(struct rte_eth_dev *dev) 1651 { 1652 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 1653 } 1654 1655 /** 1656 * Configure the TX function to use. 1657 * 1658 * @param dev 1659 * Pointer to private data structure. 1660 * 1661 * @return 1662 * Pointer to selected Tx burst function. 1663 */ 1664 eth_tx_burst_t 1665 mlx5_select_tx_function(struct rte_eth_dev *dev) 1666 { 1667 struct mlx5_priv *priv = dev->data->dev_private; 1668 eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; 1669 struct mlx5_dev_config *config = &priv->config; 1670 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 1671 int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 1672 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1673 DEV_TX_OFFLOAD_GRE_TNL_TSO | 1674 DEV_TX_OFFLOAD_IP_TNL_TSO | 1675 DEV_TX_OFFLOAD_UDP_TNL_TSO)); 1676 int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | 1677 DEV_TX_OFFLOAD_UDP_TNL_TSO | 1678 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); 1679 int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); 1680 1681 assert(priv != NULL); 1682 /* Select appropriate TX function. */ 1683 if (vlan_insert || tso || swp) 1684 return tx_pkt_burst; 1685 if (config->mps == MLX5_MPW_ENHANCED) { 1686 if (mlx5_check_vec_tx_support(dev) > 0) { 1687 if (mlx5_check_raw_vec_tx_support(dev) > 0) 1688 tx_pkt_burst = mlx5_tx_burst_raw_vec; 1689 else 1690 tx_pkt_burst = mlx5_tx_burst_vec; 1691 DRV_LOG(DEBUG, 1692 "port %u selected enhanced MPW Tx vectorized" 1693 " function", 1694 dev->data->port_id); 1695 } else { 1696 tx_pkt_burst = mlx5_tx_burst_empw; 1697 DRV_LOG(DEBUG, 1698 "port %u selected enhanced MPW Tx function", 1699 dev->data->port_id); 1700 } 1701 } else if (config->mps && (config->txq_inline > 0)) { 1702 tx_pkt_burst = mlx5_tx_burst_mpw_inline; 1703 DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", 1704 dev->data->port_id); 1705 } else if (config->mps) { 1706 tx_pkt_burst = mlx5_tx_burst_mpw; 1707 DRV_LOG(DEBUG, "port %u selected MPW Tx function", 1708 dev->data->port_id); 1709 } 1710 return tx_pkt_burst; 1711 } 1712 1713 /** 1714 * Configure the RX function to use. 1715 * 1716 * @param dev 1717 * Pointer to private data structure. 1718 * 1719 * @return 1720 * Pointer to selected Rx burst function. 1721 */ 1722 eth_rx_burst_t 1723 mlx5_select_rx_function(struct rte_eth_dev *dev) 1724 { 1725 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; 1726 1727 assert(dev != NULL); 1728 if (mlx5_check_vec_rx_support(dev) > 0) { 1729 rx_pkt_burst = mlx5_rx_burst_vec; 1730 DRV_LOG(DEBUG, "port %u selected Rx vectorized function", 1731 dev->data->port_id); 1732 } else if (mlx5_mprq_enabled(dev)) { 1733 rx_pkt_burst = mlx5_rx_burst_mprq; 1734 } 1735 return rx_pkt_burst; 1736 } 1737 1738 /** 1739 * Check if mlx5 device was removed. 1740 * 1741 * @param dev 1742 * Pointer to Ethernet device structure. 1743 * 1744 * @return 1745 * 1 when device is removed, otherwise 0. 1746 */ 1747 int 1748 mlx5_is_removed(struct rte_eth_dev *dev) 1749 { 1750 struct ibv_device_attr device_attr; 1751 struct mlx5_priv *priv = dev->data->dev_private; 1752 1753 if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO) 1754 return 1; 1755 return 0; 1756 } 1757 1758 /** 1759 * Get port ID list of mlx5 instances sharing a common device. 1760 * 1761 * @param[in] dev 1762 * Device to look for. 1763 * @param[out] port_list 1764 * Result buffer for collected port IDs. 1765 * @param port_list_n 1766 * Maximum number of entries in result buffer. If 0, @p port_list can be 1767 * NULL. 1768 * 1769 * @return 1770 * Number of matching instances regardless of the @p port_list_n 1771 * parameter, 0 if none were found. 1772 */ 1773 unsigned int 1774 mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, 1775 unsigned int port_list_n) 1776 { 1777 uint16_t id; 1778 unsigned int n = 0; 1779 1780 RTE_ETH_FOREACH_DEV_OF(id, dev) { 1781 if (n < port_list_n) 1782 port_list[n] = id; 1783 n++; 1784 } 1785 return n; 1786 } 1787 1788 /** 1789 * Get the E-Switch domain id this port belongs to. 1790 * 1791 * @param[in] port 1792 * Device port id. 1793 * @param[out] es_domain_id 1794 * E-Switch domain id. 1795 * @param[out] es_port_id 1796 * The port id of the port in the E-Switch. 1797 * 1798 * @return 1799 * 0 on success, a negative errno value otherwise and rte_errno is set. 1800 */ 1801 int 1802 mlx5_port_to_eswitch_info(uint16_t port, 1803 uint16_t *es_domain_id, uint16_t *es_port_id) 1804 { 1805 struct rte_eth_dev *dev; 1806 struct mlx5_priv *priv; 1807 1808 if (port >= RTE_MAX_ETHPORTS) { 1809 rte_errno = EINVAL; 1810 return -rte_errno; 1811 } 1812 if (!rte_eth_dev_is_valid_port(port)) { 1813 rte_errno = ENODEV; 1814 return -rte_errno; 1815 } 1816 dev = &rte_eth_devices[port]; 1817 priv = dev->data->dev_private; 1818 if (!(priv->representor || priv->master)) { 1819 rte_errno = EINVAL; 1820 return -rte_errno; 1821 } 1822 if (es_domain_id) 1823 *es_domain_id = priv->domain_id; 1824 if (es_port_id) 1825 *es_port_id = priv->vport_id; 1826 return 0; 1827 } 1828 1829 /** 1830 * Get switch information associated with network interface. 1831 * 1832 * @param ifindex 1833 * Network interface index. 1834 * @param[out] info 1835 * Switch information object, populated in case of success. 1836 * 1837 * @return 1838 * 0 on success, a negative errno value otherwise and rte_errno is set. 1839 */ 1840 int 1841 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) 1842 { 1843 char ifname[IF_NAMESIZE]; 1844 char port_name[IF_NAMESIZE]; 1845 FILE *file; 1846 struct mlx5_switch_info data = { 1847 .master = 0, 1848 .representor = 0, 1849 .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET, 1850 .port_name = 0, 1851 .switch_id = 0, 1852 }; 1853 DIR *dir; 1854 bool port_switch_id_set = false; 1855 bool device_dir = false; 1856 char c; 1857 int ret; 1858 1859 if (!if_indextoname(ifindex, ifname)) { 1860 rte_errno = errno; 1861 return -rte_errno; 1862 } 1863 1864 MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", 1865 ifname); 1866 MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", 1867 ifname); 1868 MKSTR(pci_device, "/sys/class/net/%s/device", 1869 ifname); 1870 1871 file = fopen(phys_port_name, "rb"); 1872 if (file != NULL) { 1873 ret = fscanf(file, "%s", port_name); 1874 fclose(file); 1875 if (ret == 1) 1876 mlx5_translate_port_name(port_name, &data); 1877 } 1878 file = fopen(phys_switch_id, "rb"); 1879 if (file == NULL) { 1880 rte_errno = errno; 1881 return -rte_errno; 1882 } 1883 port_switch_id_set = 1884 fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && 1885 c == '\n'; 1886 fclose(file); 1887 dir = opendir(pci_device); 1888 if (dir != NULL) { 1889 closedir(dir); 1890 device_dir = true; 1891 } 1892 if (port_switch_id_set) { 1893 /* We have some E-Switch configuration. */ 1894 mlx5_sysfs_check_switch_info(device_dir, &data); 1895 } 1896 *info = data; 1897 assert(!(data.master && data.representor)); 1898 if (data.master && data.representor) { 1899 DRV_LOG(ERR, "ifindex %u device is recognized as master" 1900 " and as representor", ifindex); 1901 rte_errno = ENODEV; 1902 return -rte_errno; 1903 } 1904 return 0; 1905 } 1906 1907 /** 1908 * Analyze gathered port parameters via Netlink to recognize master 1909 * and representor devices for E-Switch configuration. 1910 * 1911 * @param[in] num_vf_set 1912 * flag of presence of number of VFs port attribute. 1913 * @param[inout] switch_info 1914 * Port information, including port name as a number and port name 1915 * type if recognized 1916 * 1917 * @return 1918 * master and representor flags are set in switch_info according to 1919 * recognized parameters (if any). 1920 */ 1921 void 1922 mlx5_nl_check_switch_info(bool num_vf_set, 1923 struct mlx5_switch_info *switch_info) 1924 { 1925 switch (switch_info->name_type) { 1926 case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: 1927 /* 1928 * Name is not recognized, assume the master, 1929 * check the number of VFs key presence. 1930 */ 1931 switch_info->master = num_vf_set; 1932 break; 1933 case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: 1934 /* 1935 * Name is not set, this assumes the legacy naming 1936 * schema for master, just check if there is a 1937 * number of VFs key. 1938 */ 1939 switch_info->master = num_vf_set; 1940 break; 1941 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 1942 /* New uplink naming schema recognized. */ 1943 switch_info->master = 1; 1944 break; 1945 case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: 1946 /* Legacy representors naming schema. */ 1947 switch_info->representor = !num_vf_set; 1948 break; 1949 case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 1950 /* New representors naming schema. */ 1951 switch_info->representor = 1; 1952 break; 1953 } 1954 } 1955 1956 /** 1957 * Analyze gathered port parameters via sysfs to recognize master 1958 * and representor devices for E-Switch configuration. 1959 * 1960 * @param[in] device_dir 1961 * flag of presence of "device" directory under port device key. 1962 * @param[inout] switch_info 1963 * Port information, including port name as a number and port name 1964 * type if recognized 1965 * 1966 * @return 1967 * master and representor flags are set in switch_info according to 1968 * recognized parameters (if any). 1969 */ 1970 void 1971 mlx5_sysfs_check_switch_info(bool device_dir, 1972 struct mlx5_switch_info *switch_info) 1973 { 1974 switch (switch_info->name_type) { 1975 case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: 1976 /* 1977 * Name is not recognized, assume the master, 1978 * check the device directory presence. 1979 */ 1980 switch_info->master = device_dir; 1981 break; 1982 case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: 1983 /* 1984 * Name is not set, this assumes the legacy naming 1985 * schema for master, just check if there is 1986 * a device directory. 1987 */ 1988 switch_info->master = device_dir; 1989 break; 1990 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 1991 /* New uplink naming schema recognized. */ 1992 switch_info->master = 1; 1993 break; 1994 case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: 1995 /* Legacy representors naming schema. */ 1996 switch_info->representor = !device_dir; 1997 break; 1998 case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 1999 /* New representors naming schema. */ 2000 switch_info->representor = 1; 2001 break; 2002 } 2003 } 2004 2005 /** 2006 * Extract port name, as a number, from sysfs or netlink information. 2007 * 2008 * @param[in] port_name_in 2009 * String representing the port name. 2010 * @param[out] port_info_out 2011 * Port information, including port name as a number and port name 2012 * type if recognized 2013 * 2014 * @return 2015 * port_name field set according to recognized name format. 2016 */ 2017 void 2018 mlx5_translate_port_name(const char *port_name_in, 2019 struct mlx5_switch_info *port_info_out) 2020 { 2021 char pf_c1, pf_c2, vf_c1, vf_c2; 2022 char *end; 2023 int sc_items; 2024 2025 /* 2026 * Check for port-name as a string of the form pf0vf0 2027 * (support kernel ver >= 5.0 or OFED ver >= 4.6). 2028 */ 2029 sc_items = sscanf(port_name_in, "%c%c%d%c%c%d", 2030 &pf_c1, &pf_c2, &port_info_out->pf_num, 2031 &vf_c1, &vf_c2, &port_info_out->port_name); 2032 if (sc_items == 6 && 2033 pf_c1 == 'p' && pf_c2 == 'f' && 2034 vf_c1 == 'v' && vf_c2 == 'f') { 2035 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_PFVF; 2036 return; 2037 } 2038 /* 2039 * Check for port-name as a string of the form p0 2040 * (support kernel ver >= 5.0, or OFED ver >= 4.6). 2041 */ 2042 sc_items = sscanf(port_name_in, "%c%d", 2043 &pf_c1, &port_info_out->port_name); 2044 if (sc_items == 2 && pf_c1 == 'p') { 2045 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK; 2046 return; 2047 } 2048 /* Check for port-name as a number (support kernel ver < 5.0 */ 2049 errno = 0; 2050 port_info_out->port_name = strtol(port_name_in, &end, 0); 2051 if (!errno && 2052 (size_t)(end - port_name_in) == strlen(port_name_in)) { 2053 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_LEGACY; 2054 return; 2055 } 2056 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN; 2057 return; 2058 } 2059