1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #include <stddef.h> 7 #include <assert.h> 8 #include <inttypes.h> 9 #include <unistd.h> 10 #include <stdint.h> 11 #include <stdio.h> 12 #include <string.h> 13 #include <stdlib.h> 14 #include <errno.h> 15 #include <dirent.h> 16 #include <net/if.h> 17 #include <sys/ioctl.h> 18 #include <sys/socket.h> 19 #include <netinet/in.h> 20 #include <linux/ethtool.h> 21 #include <linux/sockios.h> 22 #include <fcntl.h> 23 #include <stdalign.h> 24 #include <sys/un.h> 25 #include <time.h> 26 27 #include <rte_atomic.h> 28 #include <rte_ethdev_driver.h> 29 #include <rte_bus_pci.h> 30 #include <rte_mbuf.h> 31 #include <rte_common.h> 32 #include <rte_interrupts.h> 33 #include <rte_malloc.h> 34 #include <rte_string_fns.h> 35 #include <rte_rwlock.h> 36 #include <rte_cycles.h> 37 38 #include "mlx5.h" 39 #include "mlx5_glue.h" 40 #include "mlx5_rxtx.h" 41 #include "mlx5_utils.h" 42 43 /* Supported speed values found in /usr/include/linux/ethtool.h */ 44 #ifndef HAVE_SUPPORTED_40000baseKR4_Full 45 #define SUPPORTED_40000baseKR4_Full (1 << 23) 46 #endif 47 #ifndef HAVE_SUPPORTED_40000baseCR4_Full 48 #define SUPPORTED_40000baseCR4_Full (1 << 24) 49 #endif 50 #ifndef HAVE_SUPPORTED_40000baseSR4_Full 51 #define SUPPORTED_40000baseSR4_Full (1 << 25) 52 #endif 53 #ifndef HAVE_SUPPORTED_40000baseLR4_Full 54 #define SUPPORTED_40000baseLR4_Full (1 << 26) 55 #endif 56 #ifndef HAVE_SUPPORTED_56000baseKR4_Full 57 #define SUPPORTED_56000baseKR4_Full (1 << 27) 58 #endif 59 #ifndef HAVE_SUPPORTED_56000baseCR4_Full 60 #define SUPPORTED_56000baseCR4_Full (1 << 28) 61 #endif 62 #ifndef HAVE_SUPPORTED_56000baseSR4_Full 63 #define SUPPORTED_56000baseSR4_Full (1 << 29) 64 #endif 65 #ifndef HAVE_SUPPORTED_56000baseLR4_Full 66 #define SUPPORTED_56000baseLR4_Full (1 << 30) 67 #endif 68 69 /* Add defines in case the running kernel is not the same as user headers. */ 70 #ifndef ETHTOOL_GLINKSETTINGS 71 struct ethtool_link_settings { 72 uint32_t cmd; 73 uint32_t speed; 74 uint8_t duplex; 75 uint8_t port; 76 uint8_t phy_address; 77 uint8_t autoneg; 78 uint8_t mdio_support; 79 uint8_t eth_to_mdix; 80 uint8_t eth_tp_mdix_ctrl; 81 int8_t link_mode_masks_nwords; 82 uint32_t reserved[8]; 83 uint32_t link_mode_masks[]; 84 }; 85 86 #define ETHTOOL_GLINKSETTINGS 0x0000004c 87 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 88 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 89 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 90 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 91 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 92 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 93 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 94 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 95 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 96 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 97 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 98 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 99 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 100 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 101 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 102 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 103 #endif 104 #ifndef HAVE_ETHTOOL_LINK_MODE_25G 105 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 106 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 107 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 108 #endif 109 #ifndef HAVE_ETHTOOL_LINK_MODE_50G 110 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 111 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 112 #endif 113 #ifndef HAVE_ETHTOOL_LINK_MODE_100G 114 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 115 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 116 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 117 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 118 #endif 119 120 /** 121 * Get master interface name from private structure. 122 * 123 * @param[in] dev 124 * Pointer to Ethernet device. 125 * @param[out] ifname 126 * Interface name output buffer. 127 * 128 * @return 129 * 0 on success, a negative errno value otherwise and rte_errno is set. 130 */ 131 int 132 mlx5_get_master_ifname(const char *ibdev_path, char (*ifname)[IF_NAMESIZE]) 133 { 134 DIR *dir; 135 struct dirent *dent; 136 unsigned int dev_type = 0; 137 unsigned int dev_port_prev = ~0u; 138 char match[IF_NAMESIZE] = ""; 139 140 assert(ibdev_path); 141 { 142 MKSTR(path, "%s/device/net", ibdev_path); 143 144 dir = opendir(path); 145 if (dir == NULL) { 146 rte_errno = errno; 147 return -rte_errno; 148 } 149 } 150 while ((dent = readdir(dir)) != NULL) { 151 char *name = dent->d_name; 152 FILE *file; 153 unsigned int dev_port; 154 int r; 155 156 if ((name[0] == '.') && 157 ((name[1] == '\0') || 158 ((name[1] == '.') && (name[2] == '\0')))) 159 continue; 160 161 MKSTR(path, "%s/device/net/%s/%s", 162 ibdev_path, name, 163 (dev_type ? "dev_id" : "dev_port")); 164 165 file = fopen(path, "rb"); 166 if (file == NULL) { 167 if (errno != ENOENT) 168 continue; 169 /* 170 * Switch to dev_id when dev_port does not exist as 171 * is the case with Linux kernel versions < 3.15. 172 */ 173 try_dev_id: 174 match[0] = '\0'; 175 if (dev_type) 176 break; 177 dev_type = 1; 178 dev_port_prev = ~0u; 179 rewinddir(dir); 180 continue; 181 } 182 r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port); 183 fclose(file); 184 if (r != 1) 185 continue; 186 /* 187 * Switch to dev_id when dev_port returns the same value for 188 * all ports. May happen when using a MOFED release older than 189 * 3.0 with a Linux kernel >= 3.15. 190 */ 191 if (dev_port == dev_port_prev) 192 goto try_dev_id; 193 dev_port_prev = dev_port; 194 if (dev_port == 0) 195 strlcpy(match, name, sizeof(match)); 196 } 197 closedir(dir); 198 if (match[0] == '\0') { 199 rte_errno = ENOENT; 200 return -rte_errno; 201 } 202 strncpy(*ifname, match, sizeof(*ifname)); 203 return 0; 204 } 205 206 /** 207 * Get interface name from private structure. 208 * 209 * This is a port representor-aware version of mlx5_get_master_ifname(). 210 * 211 * @param[in] dev 212 * Pointer to Ethernet device. 213 * @param[out] ifname 214 * Interface name output buffer. 215 * 216 * @return 217 * 0 on success, a negative errno value otherwise and rte_errno is set. 218 */ 219 int 220 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]) 221 { 222 struct mlx5_priv *priv = dev->data->dev_private; 223 unsigned int ifindex; 224 225 assert(priv); 226 assert(priv->sh); 227 ifindex = priv->nl_socket_rdma >= 0 ? 228 mlx5_nl_ifindex(priv->nl_socket_rdma, 229 priv->sh->ibdev_name, 230 priv->ibv_port) : 0; 231 if (!ifindex) { 232 if (!priv->representor) 233 return mlx5_get_master_ifname(priv->sh->ibdev_path, 234 ifname); 235 rte_errno = ENXIO; 236 return -rte_errno; 237 } 238 if (if_indextoname(ifindex, &(*ifname)[0])) 239 return 0; 240 rte_errno = errno; 241 return -rte_errno; 242 } 243 244 /** 245 * Get the interface index from device name. 246 * 247 * @param[in] dev 248 * Pointer to Ethernet device. 249 * 250 * @return 251 * Nonzero interface index on success, zero otherwise and rte_errno is set. 252 */ 253 unsigned int 254 mlx5_ifindex(const struct rte_eth_dev *dev) 255 { 256 char ifname[IF_NAMESIZE]; 257 unsigned int ifindex; 258 259 if (mlx5_get_ifname(dev, &ifname)) 260 return 0; 261 ifindex = if_nametoindex(ifname); 262 if (!ifindex) 263 rte_errno = errno; 264 return ifindex; 265 } 266 267 /** 268 * Perform ifreq ioctl() on associated Ethernet device. 269 * 270 * @param[in] dev 271 * Pointer to Ethernet device. 272 * @param req 273 * Request number to pass to ioctl(). 274 * @param[out] ifr 275 * Interface request structure output buffer. 276 * 277 * @return 278 * 0 on success, a negative errno value otherwise and rte_errno is set. 279 */ 280 int 281 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) 282 { 283 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 284 int ret = 0; 285 286 if (sock == -1) { 287 rte_errno = errno; 288 return -rte_errno; 289 } 290 ret = mlx5_get_ifname(dev, &ifr->ifr_name); 291 if (ret) 292 goto error; 293 ret = ioctl(sock, req, ifr); 294 if (ret == -1) { 295 rte_errno = errno; 296 goto error; 297 } 298 close(sock); 299 return 0; 300 error: 301 close(sock); 302 return -rte_errno; 303 } 304 305 /** 306 * Get device MTU. 307 * 308 * @param dev 309 * Pointer to Ethernet device. 310 * @param[out] mtu 311 * MTU value output buffer. 312 * 313 * @return 314 * 0 on success, a negative errno value otherwise and rte_errno is set. 315 */ 316 int 317 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 318 { 319 struct ifreq request; 320 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); 321 322 if (ret) 323 return ret; 324 *mtu = request.ifr_mtu; 325 return 0; 326 } 327 328 /** 329 * Set device MTU. 330 * 331 * @param dev 332 * Pointer to Ethernet device. 333 * @param mtu 334 * MTU value to set. 335 * 336 * @return 337 * 0 on success, a negative errno value otherwise and rte_errno is set. 338 */ 339 static int 340 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 341 { 342 struct ifreq request = { .ifr_mtu = mtu, }; 343 344 return mlx5_ifreq(dev, SIOCSIFMTU, &request); 345 } 346 347 /** 348 * Set device flags. 349 * 350 * @param dev 351 * Pointer to Ethernet device. 352 * @param keep 353 * Bitmask for flags that must remain untouched. 354 * @param flags 355 * Bitmask for flags to modify. 356 * 357 * @return 358 * 0 on success, a negative errno value otherwise and rte_errno is set. 359 */ 360 int 361 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 362 { 363 struct ifreq request; 364 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); 365 366 if (ret) 367 return ret; 368 request.ifr_flags &= keep; 369 request.ifr_flags |= flags & ~keep; 370 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); 371 } 372 373 /** 374 * DPDK callback for Ethernet device configuration. 375 * 376 * @param dev 377 * Pointer to Ethernet device structure. 378 * 379 * @return 380 * 0 on success, a negative errno value otherwise and rte_errno is set. 381 */ 382 int 383 mlx5_dev_configure(struct rte_eth_dev *dev) 384 { 385 struct mlx5_priv *priv = dev->data->dev_private; 386 unsigned int rxqs_n = dev->data->nb_rx_queues; 387 unsigned int txqs_n = dev->data->nb_tx_queues; 388 unsigned int i; 389 unsigned int j; 390 unsigned int reta_idx_n; 391 const uint8_t use_app_rss_key = 392 !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; 393 int ret = 0; 394 395 if (use_app_rss_key && 396 (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != 397 MLX5_RSS_HASH_KEY_LEN)) { 398 DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long", 399 dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN)); 400 rte_errno = EINVAL; 401 return -rte_errno; 402 } 403 priv->rss_conf.rss_key = 404 rte_realloc(priv->rss_conf.rss_key, 405 MLX5_RSS_HASH_KEY_LEN, 0); 406 if (!priv->rss_conf.rss_key) { 407 DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)", 408 dev->data->port_id, rxqs_n); 409 rte_errno = ENOMEM; 410 return -rte_errno; 411 } 412 memcpy(priv->rss_conf.rss_key, 413 use_app_rss_key ? 414 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key : 415 rss_hash_default_key, 416 MLX5_RSS_HASH_KEY_LEN); 417 priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN; 418 priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; 419 priv->rxqs = (void *)dev->data->rx_queues; 420 priv->txqs = (void *)dev->data->tx_queues; 421 if (txqs_n != priv->txqs_n) { 422 DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u", 423 dev->data->port_id, priv->txqs_n, txqs_n); 424 priv->txqs_n = txqs_n; 425 } 426 if (rxqs_n > priv->config.ind_table_max_size) { 427 DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)", 428 dev->data->port_id, rxqs_n); 429 rte_errno = EINVAL; 430 return -rte_errno; 431 } 432 if (rxqs_n != priv->rxqs_n) { 433 DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", 434 dev->data->port_id, priv->rxqs_n, rxqs_n); 435 priv->rxqs_n = rxqs_n; 436 /* 437 * If the requested number of RX queues is not a power of two, 438 * use the maximum indirection table size for better balancing. 439 * The result is always rounded to the next power of two. 440 */ 441 reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ? 442 priv->config.ind_table_max_size : 443 rxqs_n)); 444 ret = mlx5_rss_reta_index_resize(dev, reta_idx_n); 445 if (ret) 446 return ret; 447 /* 448 * When the number of RX queues is not a power of two, 449 * the remaining table entries are padded with reused WQs 450 * and hashes are not spread uniformly. 451 */ 452 for (i = 0, j = 0; (i != reta_idx_n); ++i) { 453 (*priv->reta_idx)[i] = j; 454 if (++j == rxqs_n) 455 j = 0; 456 } 457 } 458 ret = mlx5_proc_priv_init(dev); 459 if (ret) 460 return ret; 461 return 0; 462 } 463 464 /** 465 * Sets default tuning parameters. 466 * 467 * @param dev 468 * Pointer to Ethernet device. 469 * @param[out] info 470 * Info structure output buffer. 471 */ 472 static void 473 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 474 { 475 struct mlx5_priv *priv = dev->data->dev_private; 476 477 /* Minimum CPU utilization. */ 478 info->default_rxportconf.ring_size = 256; 479 info->default_txportconf.ring_size = 256; 480 info->default_rxportconf.burst_size = 64; 481 info->default_txportconf.burst_size = 64; 482 if (priv->link_speed_capa & ETH_LINK_SPEED_100G) { 483 info->default_rxportconf.nb_queues = 16; 484 info->default_txportconf.nb_queues = 16; 485 if (dev->data->nb_rx_queues > 2 || 486 dev->data->nb_tx_queues > 2) { 487 /* Max Throughput. */ 488 info->default_rxportconf.ring_size = 2048; 489 info->default_txportconf.ring_size = 2048; 490 } 491 } else { 492 info->default_rxportconf.nb_queues = 8; 493 info->default_txportconf.nb_queues = 8; 494 if (dev->data->nb_rx_queues > 2 || 495 dev->data->nb_tx_queues > 2) { 496 /* Max Throughput. */ 497 info->default_rxportconf.ring_size = 4096; 498 info->default_txportconf.ring_size = 4096; 499 } 500 } 501 } 502 503 /** 504 * DPDK callback to get information about the device. 505 * 506 * @param dev 507 * Pointer to Ethernet device structure. 508 * @param[out] info 509 * Info structure output buffer. 510 */ 511 void 512 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) 513 { 514 struct mlx5_priv *priv = dev->data->dev_private; 515 struct mlx5_dev_config *config = &priv->config; 516 unsigned int max; 517 char ifname[IF_NAMESIZE]; 518 519 /* FIXME: we should ask the device for these values. */ 520 info->min_rx_bufsize = 32; 521 info->max_rx_pktlen = 65536; 522 /* 523 * Since we need one CQ per QP, the limit is the minimum number 524 * between the two values. 525 */ 526 max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq, 527 priv->sh->device_attr.orig_attr.max_qp); 528 /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ 529 if (max >= 65535) 530 max = 65535; 531 info->max_rx_queues = max; 532 info->max_tx_queues = max; 533 info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; 534 info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); 535 info->rx_offload_capa = (mlx5_get_rx_port_offloads() | 536 info->rx_queue_offload_capa); 537 info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); 538 if (mlx5_get_ifname(dev, &ifname) == 0) 539 info->if_index = if_nametoindex(ifname); 540 info->reta_size = priv->reta_idx_n ? 541 priv->reta_idx_n : config->ind_table_max_size; 542 info->hash_key_size = MLX5_RSS_HASH_KEY_LEN; 543 info->speed_capa = priv->link_speed_capa; 544 info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; 545 mlx5_set_default_params(dev, info); 546 info->switch_info.name = dev->data->name; 547 info->switch_info.domain_id = priv->domain_id; 548 info->switch_info.port_id = priv->representor_id; 549 if (priv->representor) { 550 unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); 551 uint16_t port_id[i]; 552 553 i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); 554 while (i--) { 555 struct mlx5_priv *opriv = 556 rte_eth_devices[port_id[i]].data->dev_private; 557 558 if (!opriv || 559 opriv->representor || 560 opriv->domain_id != priv->domain_id) 561 continue; 562 /* 563 * Override switch name with that of the master 564 * device. 565 */ 566 info->switch_info.name = opriv->dev_data->name; 567 break; 568 } 569 } 570 } 571 572 /** 573 * Get device current raw clock counter 574 * 575 * @param dev 576 * Pointer to Ethernet device structure. 577 * @param[out] time 578 * Current raw clock counter of the device. 579 * 580 * @return 581 * 0 if the clock has correctly been read 582 * The value of errno in case of error 583 */ 584 int 585 mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock) 586 { 587 struct mlx5_priv *priv = dev->data->dev_private; 588 struct ibv_context *ctx = priv->sh->ctx; 589 struct ibv_values_ex values; 590 int err = 0; 591 592 values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK; 593 err = mlx5_glue->query_rt_values_ex(ctx, &values); 594 if (err != 0) { 595 DRV_LOG(WARNING, "Could not query the clock !"); 596 return err; 597 } 598 *clock = values.raw_clock.tv_nsec; 599 return 0; 600 } 601 602 /** 603 * Get firmware version of a device. 604 * 605 * @param dev 606 * Ethernet device port. 607 * @param fw_ver 608 * String output allocated by caller. 609 * @param fw_size 610 * Size of the output string, including terminating null byte. 611 * 612 * @return 613 * 0 on success, or the size of the non truncated string if too big. 614 */ 615 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) 616 { 617 struct mlx5_priv *priv = dev->data->dev_private; 618 struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr; 619 size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1; 620 621 if (fw_size < size) 622 return size; 623 if (fw_ver != NULL) 624 strlcpy(fw_ver, attr->fw_ver, fw_size); 625 return 0; 626 } 627 628 /** 629 * Get supported packet types. 630 * 631 * @param dev 632 * Pointer to Ethernet device structure. 633 * 634 * @return 635 * A pointer to the supported Packet types array. 636 */ 637 const uint32_t * 638 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) 639 { 640 static const uint32_t ptypes[] = { 641 /* refers to rxq_cq_to_pkt_type() */ 642 RTE_PTYPE_L2_ETHER, 643 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 644 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 645 RTE_PTYPE_L4_NONFRAG, 646 RTE_PTYPE_L4_FRAG, 647 RTE_PTYPE_L4_TCP, 648 RTE_PTYPE_L4_UDP, 649 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 650 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 651 RTE_PTYPE_INNER_L4_NONFRAG, 652 RTE_PTYPE_INNER_L4_FRAG, 653 RTE_PTYPE_INNER_L4_TCP, 654 RTE_PTYPE_INNER_L4_UDP, 655 RTE_PTYPE_UNKNOWN 656 }; 657 658 if (dev->rx_pkt_burst == mlx5_rx_burst || 659 dev->rx_pkt_burst == mlx5_rx_burst_mprq || 660 dev->rx_pkt_burst == mlx5_rx_burst_vec) 661 return ptypes; 662 return NULL; 663 } 664 665 /** 666 * Retrieve the master device for representor in the same switch domain. 667 * 668 * @param dev 669 * Pointer to representor Ethernet device structure. 670 * 671 * @return 672 * Master device structure on success, NULL otherwise. 673 */ 674 675 static struct rte_eth_dev * 676 mlx5_find_master_dev(struct rte_eth_dev *dev) 677 { 678 struct mlx5_priv *priv; 679 uint16_t port_id; 680 uint16_t domain_id; 681 682 priv = dev->data->dev_private; 683 domain_id = priv->domain_id; 684 assert(priv->representor); 685 RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { 686 priv = rte_eth_devices[port_id].data->dev_private; 687 if (priv && 688 priv->master && 689 priv->domain_id == domain_id) 690 return &rte_eth_devices[port_id]; 691 } 692 return NULL; 693 } 694 695 /** 696 * DPDK callback to retrieve physical link information. 697 * 698 * @param dev 699 * Pointer to Ethernet device structure. 700 * @param[out] link 701 * Storage for current link status. 702 * 703 * @return 704 * 0 on success, a negative errno value otherwise and rte_errno is set. 705 */ 706 static int 707 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, 708 struct rte_eth_link *link) 709 { 710 struct mlx5_priv *priv = dev->data->dev_private; 711 struct ethtool_cmd edata = { 712 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 713 }; 714 struct ifreq ifr; 715 struct rte_eth_link dev_link; 716 int link_speed = 0; 717 int ret; 718 719 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 720 if (ret) { 721 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 722 dev->data->port_id, strerror(rte_errno)); 723 return ret; 724 } 725 dev_link = (struct rte_eth_link) { 726 .link_status = ((ifr.ifr_flags & IFF_UP) && 727 (ifr.ifr_flags & IFF_RUNNING)), 728 }; 729 ifr = (struct ifreq) { 730 .ifr_data = (void *)&edata, 731 }; 732 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 733 if (ret) { 734 if (ret == -ENOTSUP && priv->representor) { 735 struct rte_eth_dev *master; 736 737 /* 738 * For representors we can try to inherit link 739 * settings from the master device. Actually 740 * link settings do not make a lot of sense 741 * for representors due to missing physical 742 * link. The old kernel drivers supported 743 * emulated settings query for representors, 744 * the new ones do not, so we have to add 745 * this code for compatibility issues. 746 */ 747 master = mlx5_find_master_dev(dev); 748 if (master) { 749 ifr = (struct ifreq) { 750 .ifr_data = (void *)&edata, 751 }; 752 ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 753 } 754 } 755 if (ret) { 756 DRV_LOG(WARNING, 757 "port %u ioctl(SIOCETHTOOL," 758 " ETHTOOL_GSET) failed: %s", 759 dev->data->port_id, strerror(rte_errno)); 760 return ret; 761 } 762 } 763 link_speed = ethtool_cmd_speed(&edata); 764 if (link_speed == -1) 765 dev_link.link_speed = ETH_SPEED_NUM_NONE; 766 else 767 dev_link.link_speed = link_speed; 768 priv->link_speed_capa = 0; 769 if (edata.supported & SUPPORTED_Autoneg) 770 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 771 if (edata.supported & (SUPPORTED_1000baseT_Full | 772 SUPPORTED_1000baseKX_Full)) 773 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 774 if (edata.supported & SUPPORTED_10000baseKR_Full) 775 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 776 if (edata.supported & (SUPPORTED_40000baseKR4_Full | 777 SUPPORTED_40000baseCR4_Full | 778 SUPPORTED_40000baseSR4_Full | 779 SUPPORTED_40000baseLR4_Full)) 780 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 781 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 782 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 783 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 784 ETH_LINK_SPEED_FIXED); 785 if (((dev_link.link_speed && !dev_link.link_status) || 786 (!dev_link.link_speed && dev_link.link_status))) { 787 rte_errno = EAGAIN; 788 return -rte_errno; 789 } 790 *link = dev_link; 791 return 0; 792 } 793 794 /** 795 * Retrieve physical link information (unlocked version using new ioctl). 796 * 797 * @param dev 798 * Pointer to Ethernet device structure. 799 * @param[out] link 800 * Storage for current link status. 801 * 802 * @return 803 * 0 on success, a negative errno value otherwise and rte_errno is set. 804 */ 805 static int 806 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, 807 struct rte_eth_link *link) 808 809 { 810 struct mlx5_priv *priv = dev->data->dev_private; 811 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 812 struct ifreq ifr; 813 struct rte_eth_link dev_link; 814 struct rte_eth_dev *master = NULL; 815 uint64_t sc; 816 int ret; 817 818 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 819 if (ret) { 820 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 821 dev->data->port_id, strerror(rte_errno)); 822 return ret; 823 } 824 dev_link = (struct rte_eth_link) { 825 .link_status = ((ifr.ifr_flags & IFF_UP) && 826 (ifr.ifr_flags & IFF_RUNNING)), 827 }; 828 ifr = (struct ifreq) { 829 .ifr_data = (void *)&gcmd, 830 }; 831 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 832 if (ret) { 833 if (ret == -ENOTSUP && priv->representor) { 834 /* 835 * For representors we can try to inherit link 836 * settings from the master device. Actually 837 * link settings do not make a lot of sense 838 * for representors due to missing physical 839 * link. The old kernel drivers supported 840 * emulated settings query for representors, 841 * the new ones do not, so we have to add 842 * this code for compatibility issues. 843 */ 844 master = mlx5_find_master_dev(dev); 845 if (master) { 846 ifr = (struct ifreq) { 847 .ifr_data = (void *)&gcmd, 848 }; 849 ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 850 } 851 } 852 if (ret) { 853 DRV_LOG(DEBUG, 854 "port %u ioctl(SIOCETHTOOL," 855 " ETHTOOL_GLINKSETTINGS) failed: %s", 856 dev->data->port_id, strerror(rte_errno)); 857 return ret; 858 } 859 860 } 861 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 862 863 alignas(struct ethtool_link_settings) 864 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 865 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 866 struct ethtool_link_settings *ecmd = (void *)data; 867 868 *ecmd = gcmd; 869 ifr.ifr_data = (void *)ecmd; 870 ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr); 871 if (ret) { 872 DRV_LOG(DEBUG, 873 "port %u ioctl(SIOCETHTOOL," 874 "ETHTOOL_GLINKSETTINGS) failed: %s", 875 dev->data->port_id, strerror(rte_errno)); 876 return ret; 877 } 878 dev_link.link_speed = ecmd->speed; 879 sc = ecmd->link_mode_masks[0] | 880 ((uint64_t)ecmd->link_mode_masks[1] << 32); 881 priv->link_speed_capa = 0; 882 if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) 883 priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 884 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 885 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 886 priv->link_speed_capa |= ETH_LINK_SPEED_1G; 887 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 888 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 889 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 890 priv->link_speed_capa |= ETH_LINK_SPEED_10G; 891 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 892 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 893 priv->link_speed_capa |= ETH_LINK_SPEED_20G; 894 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 895 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 896 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 897 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 898 priv->link_speed_capa |= ETH_LINK_SPEED_40G; 899 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 900 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 901 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 902 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 903 priv->link_speed_capa |= ETH_LINK_SPEED_56G; 904 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 905 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 906 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 907 priv->link_speed_capa |= ETH_LINK_SPEED_25G; 908 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 909 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 910 priv->link_speed_capa |= ETH_LINK_SPEED_50G; 911 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 912 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 913 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 914 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 915 priv->link_speed_capa |= ETH_LINK_SPEED_100G; 916 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 917 ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 918 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 919 ETH_LINK_SPEED_FIXED); 920 if (((dev_link.link_speed && !dev_link.link_status) || 921 (!dev_link.link_speed && dev_link.link_status))) { 922 rte_errno = EAGAIN; 923 return -rte_errno; 924 } 925 *link = dev_link; 926 return 0; 927 } 928 929 /** 930 * DPDK callback to retrieve physical link information. 931 * 932 * @param dev 933 * Pointer to Ethernet device structure. 934 * @param wait_to_complete 935 * Wait for request completion. 936 * 937 * @return 938 * 0 if link status was not updated, positive if it was, a negative errno 939 * value otherwise and rte_errno is set. 940 */ 941 int 942 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) 943 { 944 int ret; 945 struct rte_eth_link dev_link; 946 time_t start_time = time(NULL); 947 948 do { 949 ret = mlx5_link_update_unlocked_gs(dev, &dev_link); 950 if (ret) 951 ret = mlx5_link_update_unlocked_gset(dev, &dev_link); 952 if (ret == 0) 953 break; 954 /* Handle wait to complete situation. */ 955 if (wait_to_complete && ret == -EAGAIN) { 956 if (abs((int)difftime(time(NULL), start_time)) < 957 MLX5_LINK_STATUS_TIMEOUT) { 958 usleep(0); 959 continue; 960 } else { 961 rte_errno = EBUSY; 962 return -rte_errno; 963 } 964 } else if (ret < 0) { 965 return ret; 966 } 967 } while (wait_to_complete); 968 ret = !!memcmp(&dev->data->dev_link, &dev_link, 969 sizeof(struct rte_eth_link)); 970 dev->data->dev_link = dev_link; 971 return ret; 972 } 973 974 /** 975 * DPDK callback to change the MTU. 976 * 977 * @param dev 978 * Pointer to Ethernet device structure. 979 * @param in_mtu 980 * New MTU. 981 * 982 * @return 983 * 0 on success, a negative errno value otherwise and rte_errno is set. 984 */ 985 int 986 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 987 { 988 struct mlx5_priv *priv = dev->data->dev_private; 989 uint16_t kern_mtu = 0; 990 int ret; 991 992 ret = mlx5_get_mtu(dev, &kern_mtu); 993 if (ret) 994 return ret; 995 /* Set kernel interface MTU first. */ 996 ret = mlx5_set_mtu(dev, mtu); 997 if (ret) 998 return ret; 999 ret = mlx5_get_mtu(dev, &kern_mtu); 1000 if (ret) 1001 return ret; 1002 if (kern_mtu == mtu) { 1003 priv->mtu = mtu; 1004 DRV_LOG(DEBUG, "port %u adapter MTU set to %u", 1005 dev->data->port_id, mtu); 1006 return 0; 1007 } 1008 rte_errno = EAGAIN; 1009 return -rte_errno; 1010 } 1011 1012 /** 1013 * DPDK callback to get flow control status. 1014 * 1015 * @param dev 1016 * Pointer to Ethernet device structure. 1017 * @param[out] fc_conf 1018 * Flow control output buffer. 1019 * 1020 * @return 1021 * 0 on success, a negative errno value otherwise and rte_errno is set. 1022 */ 1023 int 1024 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1025 { 1026 struct ifreq ifr; 1027 struct ethtool_pauseparam ethpause = { 1028 .cmd = ETHTOOL_GPAUSEPARAM 1029 }; 1030 int ret; 1031 1032 ifr.ifr_data = (void *)ðpause; 1033 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1034 if (ret) { 1035 DRV_LOG(WARNING, 1036 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 1037 " %s", 1038 dev->data->port_id, strerror(rte_errno)); 1039 return ret; 1040 } 1041 fc_conf->autoneg = ethpause.autoneg; 1042 if (ethpause.rx_pause && ethpause.tx_pause) 1043 fc_conf->mode = RTE_FC_FULL; 1044 else if (ethpause.rx_pause) 1045 fc_conf->mode = RTE_FC_RX_PAUSE; 1046 else if (ethpause.tx_pause) 1047 fc_conf->mode = RTE_FC_TX_PAUSE; 1048 else 1049 fc_conf->mode = RTE_FC_NONE; 1050 return 0; 1051 } 1052 1053 /** 1054 * DPDK callback to modify flow control parameters. 1055 * 1056 * @param dev 1057 * Pointer to Ethernet device structure. 1058 * @param[in] fc_conf 1059 * Flow control parameters. 1060 * 1061 * @return 1062 * 0 on success, a negative errno value otherwise and rte_errno is set. 1063 */ 1064 int 1065 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1066 { 1067 struct ifreq ifr; 1068 struct ethtool_pauseparam ethpause = { 1069 .cmd = ETHTOOL_SPAUSEPARAM 1070 }; 1071 int ret; 1072 1073 ifr.ifr_data = (void *)ðpause; 1074 ethpause.autoneg = fc_conf->autoneg; 1075 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1076 (fc_conf->mode & RTE_FC_RX_PAUSE)) 1077 ethpause.rx_pause = 1; 1078 else 1079 ethpause.rx_pause = 0; 1080 1081 if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 1082 (fc_conf->mode & RTE_FC_TX_PAUSE)) 1083 ethpause.tx_pause = 1; 1084 else 1085 ethpause.tx_pause = 0; 1086 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1087 if (ret) { 1088 DRV_LOG(WARNING, 1089 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 1090 " failed: %s", 1091 dev->data->port_id, strerror(rte_errno)); 1092 return ret; 1093 } 1094 return 0; 1095 } 1096 1097 /** 1098 * Get PCI information from struct ibv_device. 1099 * 1100 * @param device 1101 * Pointer to Ethernet device structure. 1102 * @param[out] pci_addr 1103 * PCI bus address output buffer. 1104 * 1105 * @return 1106 * 0 on success, a negative errno value otherwise and rte_errno is set. 1107 */ 1108 int 1109 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device, 1110 struct rte_pci_addr *pci_addr) 1111 { 1112 FILE *file; 1113 char line[32]; 1114 MKSTR(path, "%s/device/uevent", device->ibdev_path); 1115 1116 file = fopen(path, "rb"); 1117 if (file == NULL) { 1118 rte_errno = errno; 1119 return -rte_errno; 1120 } 1121 while (fgets(line, sizeof(line), file) == line) { 1122 size_t len = strlen(line); 1123 int ret; 1124 1125 /* Truncate long lines. */ 1126 if (len == (sizeof(line) - 1)) 1127 while (line[(len - 1)] != '\n') { 1128 ret = fgetc(file); 1129 if (ret == EOF) 1130 break; 1131 line[(len - 1)] = ret; 1132 } 1133 /* Extract information. */ 1134 if (sscanf(line, 1135 "PCI_SLOT_NAME=" 1136 "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n", 1137 &pci_addr->domain, 1138 &pci_addr->bus, 1139 &pci_addr->devid, 1140 &pci_addr->function) == 4) { 1141 ret = 0; 1142 break; 1143 } 1144 } 1145 fclose(file); 1146 return 0; 1147 } 1148 1149 /** 1150 * Handle asynchronous removal event for entire multiport device. 1151 * 1152 * @param sh 1153 * Infiniband device shared context. 1154 */ 1155 static void 1156 mlx5_dev_interrupt_device_fatal(struct mlx5_ibv_shared *sh) 1157 { 1158 uint32_t i; 1159 1160 for (i = 0; i < sh->max_port; ++i) { 1161 struct rte_eth_dev *dev; 1162 1163 if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { 1164 /* 1165 * Or not existing port either no 1166 * handler installed for this port. 1167 */ 1168 continue; 1169 } 1170 dev = &rte_eth_devices[sh->port[i].ih_port_id]; 1171 assert(dev); 1172 if (dev->data->dev_conf.intr_conf.rmv) 1173 _rte_eth_dev_callback_process 1174 (dev, RTE_ETH_EVENT_INTR_RMV, NULL); 1175 } 1176 } 1177 1178 /** 1179 * Handle shared asynchronous events the NIC (removal event 1180 * and link status change). Supports multiport IB device. 1181 * 1182 * @param cb_arg 1183 * Callback argument. 1184 */ 1185 void 1186 mlx5_dev_interrupt_handler(void *cb_arg) 1187 { 1188 struct mlx5_ibv_shared *sh = cb_arg; 1189 struct ibv_async_event event; 1190 1191 /* Read all message from the IB device and acknowledge them. */ 1192 for (;;) { 1193 struct rte_eth_dev *dev; 1194 uint32_t tmp; 1195 1196 if (mlx5_glue->get_async_event(sh->ctx, &event)) 1197 break; 1198 /* Retrieve and check IB port index. */ 1199 tmp = (uint32_t)event.element.port_num; 1200 if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) { 1201 /* 1202 * The DEVICE_FATAL event is called once for 1203 * entire device without port specifying. 1204 * We should notify all existing ports. 1205 */ 1206 mlx5_glue->ack_async_event(&event); 1207 mlx5_dev_interrupt_device_fatal(sh); 1208 continue; 1209 } 1210 assert(tmp && (tmp <= sh->max_port)); 1211 if (!tmp) { 1212 /* Unsupported devive level event. */ 1213 mlx5_glue->ack_async_event(&event); 1214 DRV_LOG(DEBUG, 1215 "unsupported common event (type %d)", 1216 event.event_type); 1217 continue; 1218 } 1219 if (tmp > sh->max_port) { 1220 /* Invalid IB port index. */ 1221 mlx5_glue->ack_async_event(&event); 1222 DRV_LOG(DEBUG, 1223 "cannot handle an event (type %d)" 1224 "due to invalid IB port index (%u)", 1225 event.event_type, tmp); 1226 continue; 1227 } 1228 if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { 1229 /* No handler installed. */ 1230 mlx5_glue->ack_async_event(&event); 1231 DRV_LOG(DEBUG, 1232 "cannot handle an event (type %d)" 1233 "due to no handler installed for port %u", 1234 event.event_type, tmp); 1235 continue; 1236 } 1237 /* Retrieve ethernet device descriptor. */ 1238 tmp = sh->port[tmp - 1].ih_port_id; 1239 dev = &rte_eth_devices[tmp]; 1240 assert(dev); 1241 if ((event.event_type == IBV_EVENT_PORT_ACTIVE || 1242 event.event_type == IBV_EVENT_PORT_ERR) && 1243 dev->data->dev_conf.intr_conf.lsc) { 1244 mlx5_glue->ack_async_event(&event); 1245 if (mlx5_link_update(dev, 0) == -EAGAIN) { 1246 usleep(0); 1247 continue; 1248 } 1249 _rte_eth_dev_callback_process 1250 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 1251 continue; 1252 } 1253 DRV_LOG(DEBUG, 1254 "port %u cannot handle an unknown event (type %d)", 1255 dev->data->port_id, event.event_type); 1256 mlx5_glue->ack_async_event(&event); 1257 } 1258 } 1259 1260 /* 1261 * Unregister callback handler safely. The handler may be active 1262 * while we are trying to unregister it, in this case code -EAGAIN 1263 * is returned by rte_intr_callback_unregister(). This routine checks 1264 * the return code and tries to unregister handler again. 1265 * 1266 * @param handle 1267 * interrupt handle 1268 * @param cb_fn 1269 * pointer to callback routine 1270 * @cb_arg 1271 * opaque callback parameter 1272 */ 1273 void 1274 mlx5_intr_callback_unregister(const struct rte_intr_handle *handle, 1275 rte_intr_callback_fn cb_fn, void *cb_arg) 1276 { 1277 /* 1278 * Try to reduce timeout management overhead by not calling 1279 * the timer related routines on the first iteration. If the 1280 * unregistering succeeds on first call there will be no 1281 * timer calls at all. 1282 */ 1283 uint64_t twait = 0; 1284 uint64_t start = 0; 1285 1286 do { 1287 int ret; 1288 1289 ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg); 1290 if (ret >= 0) 1291 return; 1292 if (ret != -EAGAIN) { 1293 DRV_LOG(INFO, "failed to unregister interrupt" 1294 " handler (error: %d)", ret); 1295 assert(false); 1296 return; 1297 } 1298 if (twait) { 1299 struct timespec onems; 1300 1301 /* Wait one millisecond and try again. */ 1302 onems.tv_sec = 0; 1303 onems.tv_nsec = NS_PER_S / MS_PER_S; 1304 nanosleep(&onems, 0); 1305 /* Check whether one second elapsed. */ 1306 if ((rte_get_timer_cycles() - start) <= twait) 1307 continue; 1308 } else { 1309 /* 1310 * We get the amount of timer ticks for one second. 1311 * If this amount elapsed it means we spent one 1312 * second in waiting. This branch is executed once 1313 * on first iteration. 1314 */ 1315 twait = rte_get_timer_hz(); 1316 assert(twait); 1317 } 1318 /* 1319 * Timeout elapsed, show message (once a second) and retry. 1320 * We have no other acceptable option here, if we ignore 1321 * the unregistering return code the handler will not 1322 * be unregistered, fd will be closed and we may get the 1323 * crush. Hanging and messaging in the loop seems not to be 1324 * the worst choice. 1325 */ 1326 DRV_LOG(INFO, "Retrying to unregister interrupt handler"); 1327 start = rte_get_timer_cycles(); 1328 } while (true); 1329 } 1330 1331 /** 1332 * Uninstall shared asynchronous device events handler. 1333 * This function is implemented to support event sharing 1334 * between multiple ports of single IB device. 1335 * 1336 * @param dev 1337 * Pointer to Ethernet device. 1338 */ 1339 static void 1340 mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev) 1341 { 1342 struct mlx5_priv *priv = dev->data->dev_private; 1343 struct mlx5_ibv_shared *sh = priv->sh; 1344 1345 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1346 return; 1347 pthread_mutex_lock(&sh->intr_mutex); 1348 assert(priv->ibv_port); 1349 assert(priv->ibv_port <= sh->max_port); 1350 assert(dev->data->port_id < RTE_MAX_ETHPORTS); 1351 if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS) 1352 goto exit; 1353 assert(sh->port[priv->ibv_port - 1].ih_port_id == 1354 (uint32_t)dev->data->port_id); 1355 assert(sh->intr_cnt); 1356 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 1357 if (!sh->intr_cnt || --sh->intr_cnt) 1358 goto exit; 1359 mlx5_intr_callback_unregister(&sh->intr_handle, 1360 mlx5_dev_interrupt_handler, sh); 1361 sh->intr_handle.fd = 0; 1362 sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; 1363 exit: 1364 pthread_mutex_unlock(&sh->intr_mutex); 1365 } 1366 1367 /** 1368 * Install shared asynchronous device events handler. 1369 * This function is implemented to support event sharing 1370 * between multiple ports of single IB device. 1371 * 1372 * @param dev 1373 * Pointer to Ethernet device. 1374 */ 1375 static void 1376 mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) 1377 { 1378 struct mlx5_priv *priv = dev->data->dev_private; 1379 struct mlx5_ibv_shared *sh = priv->sh; 1380 int ret; 1381 int flags; 1382 1383 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1384 return; 1385 pthread_mutex_lock(&sh->intr_mutex); 1386 assert(priv->ibv_port); 1387 assert(priv->ibv_port <= sh->max_port); 1388 assert(dev->data->port_id < RTE_MAX_ETHPORTS); 1389 if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) { 1390 /* The handler is already installed for this port. */ 1391 assert(sh->intr_cnt); 1392 goto exit; 1393 } 1394 sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id; 1395 if (sh->intr_cnt) { 1396 sh->intr_cnt++; 1397 goto exit; 1398 } 1399 /* No shared handler installed. */ 1400 assert(sh->ctx->async_fd > 0); 1401 flags = fcntl(sh->ctx->async_fd, F_GETFL); 1402 ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); 1403 if (ret) { 1404 DRV_LOG(INFO, "failed to change file descriptor" 1405 " async event queue"); 1406 /* Indicate there will be no interrupts. */ 1407 dev->data->dev_conf.intr_conf.lsc = 0; 1408 dev->data->dev_conf.intr_conf.rmv = 0; 1409 sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; 1410 goto exit; 1411 } 1412 sh->intr_handle.fd = sh->ctx->async_fd; 1413 sh->intr_handle.type = RTE_INTR_HANDLE_EXT; 1414 rte_intr_callback_register(&sh->intr_handle, 1415 mlx5_dev_interrupt_handler, sh); 1416 sh->intr_cnt++; 1417 exit: 1418 pthread_mutex_unlock(&sh->intr_mutex); 1419 } 1420 1421 /** 1422 * Uninstall interrupt handler. 1423 * 1424 * @param dev 1425 * Pointer to Ethernet device. 1426 */ 1427 void 1428 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev) 1429 { 1430 mlx5_dev_shared_handler_uninstall(dev); 1431 } 1432 1433 /** 1434 * Install interrupt handler. 1435 * 1436 * @param dev 1437 * Pointer to Ethernet device. 1438 */ 1439 void 1440 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) 1441 { 1442 mlx5_dev_shared_handler_install(dev); 1443 } 1444 1445 /** 1446 * DPDK callback to bring the link DOWN. 1447 * 1448 * @param dev 1449 * Pointer to Ethernet device structure. 1450 * 1451 * @return 1452 * 0 on success, a negative errno value otherwise and rte_errno is set. 1453 */ 1454 int 1455 mlx5_set_link_down(struct rte_eth_dev *dev) 1456 { 1457 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 1458 } 1459 1460 /** 1461 * DPDK callback to bring the link UP. 1462 * 1463 * @param dev 1464 * Pointer to Ethernet device structure. 1465 * 1466 * @return 1467 * 0 on success, a negative errno value otherwise and rte_errno is set. 1468 */ 1469 int 1470 mlx5_set_link_up(struct rte_eth_dev *dev) 1471 { 1472 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 1473 } 1474 1475 /** 1476 * Configure the TX function to use. 1477 * 1478 * @param dev 1479 * Pointer to private data structure. 1480 * 1481 * @return 1482 * Pointer to selected Tx burst function. 1483 */ 1484 eth_tx_burst_t 1485 mlx5_select_tx_function(struct rte_eth_dev *dev) 1486 { 1487 struct mlx5_priv *priv = dev->data->dev_private; 1488 eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst; 1489 struct mlx5_dev_config *config = &priv->config; 1490 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 1491 int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO | 1492 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1493 DEV_TX_OFFLOAD_GRE_TNL_TSO | 1494 DEV_TX_OFFLOAD_IP_TNL_TSO | 1495 DEV_TX_OFFLOAD_UDP_TNL_TSO)); 1496 int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO | 1497 DEV_TX_OFFLOAD_UDP_TNL_TSO | 1498 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); 1499 int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT); 1500 1501 assert(priv != NULL); 1502 /* Select appropriate TX function. */ 1503 if (vlan_insert || tso || swp) 1504 return tx_pkt_burst; 1505 if (config->mps == MLX5_MPW_ENHANCED) { 1506 if (mlx5_check_vec_tx_support(dev) > 0) { 1507 if (mlx5_check_raw_vec_tx_support(dev) > 0) 1508 tx_pkt_burst = mlx5_tx_burst_raw_vec; 1509 else 1510 tx_pkt_burst = mlx5_tx_burst_vec; 1511 DRV_LOG(DEBUG, 1512 "port %u selected enhanced MPW Tx vectorized" 1513 " function", 1514 dev->data->port_id); 1515 } else { 1516 tx_pkt_burst = mlx5_tx_burst_empw; 1517 DRV_LOG(DEBUG, 1518 "port %u selected enhanced MPW Tx function", 1519 dev->data->port_id); 1520 } 1521 } else if (config->mps && (config->txq_inline > 0)) { 1522 tx_pkt_burst = mlx5_tx_burst_mpw_inline; 1523 DRV_LOG(DEBUG, "port %u selected MPW inline Tx function", 1524 dev->data->port_id); 1525 } else if (config->mps) { 1526 tx_pkt_burst = mlx5_tx_burst_mpw; 1527 DRV_LOG(DEBUG, "port %u selected MPW Tx function", 1528 dev->data->port_id); 1529 } 1530 return tx_pkt_burst; 1531 } 1532 1533 /** 1534 * Configure the RX function to use. 1535 * 1536 * @param dev 1537 * Pointer to private data structure. 1538 * 1539 * @return 1540 * Pointer to selected Rx burst function. 1541 */ 1542 eth_rx_burst_t 1543 mlx5_select_rx_function(struct rte_eth_dev *dev) 1544 { 1545 eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst; 1546 1547 assert(dev != NULL); 1548 if (mlx5_check_vec_rx_support(dev) > 0) { 1549 rx_pkt_burst = mlx5_rx_burst_vec; 1550 DRV_LOG(DEBUG, "port %u selected Rx vectorized function", 1551 dev->data->port_id); 1552 } else if (mlx5_mprq_enabled(dev)) { 1553 rx_pkt_burst = mlx5_rx_burst_mprq; 1554 } 1555 return rx_pkt_burst; 1556 } 1557 1558 /** 1559 * Check if mlx5 device was removed. 1560 * 1561 * @param dev 1562 * Pointer to Ethernet device structure. 1563 * 1564 * @return 1565 * 1 when device is removed, otherwise 0. 1566 */ 1567 int 1568 mlx5_is_removed(struct rte_eth_dev *dev) 1569 { 1570 struct ibv_device_attr device_attr; 1571 struct mlx5_priv *priv = dev->data->dev_private; 1572 1573 if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO) 1574 return 1; 1575 return 0; 1576 } 1577 1578 /** 1579 * Get port ID list of mlx5 instances sharing a common device. 1580 * 1581 * @param[in] dev 1582 * Device to look for. 1583 * @param[out] port_list 1584 * Result buffer for collected port IDs. 1585 * @param port_list_n 1586 * Maximum number of entries in result buffer. If 0, @p port_list can be 1587 * NULL. 1588 * 1589 * @return 1590 * Number of matching instances regardless of the @p port_list_n 1591 * parameter, 0 if none were found. 1592 */ 1593 unsigned int 1594 mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, 1595 unsigned int port_list_n) 1596 { 1597 uint16_t id; 1598 unsigned int n = 0; 1599 1600 RTE_ETH_FOREACH_DEV_OF(id, dev) { 1601 if (n < port_list_n) 1602 port_list[n] = id; 1603 n++; 1604 } 1605 return n; 1606 } 1607 1608 /** 1609 * Get the E-Switch domain id this port belongs to. 1610 * 1611 * @param[in] port 1612 * Device port id. 1613 * @param[out] es_domain_id 1614 * E-Switch domain id. 1615 * @param[out] es_port_id 1616 * The port id of the port in the E-Switch. 1617 * 1618 * @return 1619 * 0 on success, a negative errno value otherwise and rte_errno is set. 1620 */ 1621 int 1622 mlx5_port_to_eswitch_info(uint16_t port, 1623 uint16_t *es_domain_id, uint16_t *es_port_id) 1624 { 1625 struct rte_eth_dev *dev; 1626 struct mlx5_priv *priv; 1627 1628 if (port >= RTE_MAX_ETHPORTS) { 1629 rte_errno = EINVAL; 1630 return -rte_errno; 1631 } 1632 if (!rte_eth_dev_is_valid_port(port)) { 1633 rte_errno = ENODEV; 1634 return -rte_errno; 1635 } 1636 dev = &rte_eth_devices[port]; 1637 priv = dev->data->dev_private; 1638 if (!(priv->representor || priv->master)) { 1639 rte_errno = EINVAL; 1640 return -rte_errno; 1641 } 1642 if (es_domain_id) 1643 *es_domain_id = priv->domain_id; 1644 if (es_port_id) 1645 *es_port_id = priv->vport_id; 1646 return 0; 1647 } 1648 1649 /** 1650 * Get switch information associated with network interface. 1651 * 1652 * @param ifindex 1653 * Network interface index. 1654 * @param[out] info 1655 * Switch information object, populated in case of success. 1656 * 1657 * @return 1658 * 0 on success, a negative errno value otherwise and rte_errno is set. 1659 */ 1660 int 1661 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) 1662 { 1663 char ifname[IF_NAMESIZE]; 1664 char port_name[IF_NAMESIZE]; 1665 FILE *file; 1666 struct mlx5_switch_info data = { 1667 .master = 0, 1668 .representor = 0, 1669 .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET, 1670 .port_name = 0, 1671 .switch_id = 0, 1672 }; 1673 DIR *dir; 1674 bool port_switch_id_set = false; 1675 bool device_dir = false; 1676 char c; 1677 int ret; 1678 1679 if (!if_indextoname(ifindex, ifname)) { 1680 rte_errno = errno; 1681 return -rte_errno; 1682 } 1683 1684 MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", 1685 ifname); 1686 MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", 1687 ifname); 1688 MKSTR(pci_device, "/sys/class/net/%s/device", 1689 ifname); 1690 1691 file = fopen(phys_port_name, "rb"); 1692 if (file != NULL) { 1693 ret = fscanf(file, "%s", port_name); 1694 fclose(file); 1695 if (ret == 1) 1696 mlx5_translate_port_name(port_name, &data); 1697 } 1698 file = fopen(phys_switch_id, "rb"); 1699 if (file == NULL) { 1700 rte_errno = errno; 1701 return -rte_errno; 1702 } 1703 port_switch_id_set = 1704 fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && 1705 c == '\n'; 1706 fclose(file); 1707 dir = opendir(pci_device); 1708 if (dir != NULL) { 1709 closedir(dir); 1710 device_dir = true; 1711 } 1712 if (port_switch_id_set) { 1713 /* We have some E-Switch configuration. */ 1714 mlx5_sysfs_check_switch_info(device_dir, &data); 1715 } 1716 *info = data; 1717 assert(!(data.master && data.representor)); 1718 if (data.master && data.representor) { 1719 DRV_LOG(ERR, "ifindex %u device is recognized as master" 1720 " and as representor", ifindex); 1721 rte_errno = ENODEV; 1722 return -rte_errno; 1723 } 1724 return 0; 1725 } 1726 1727 /** 1728 * Analyze gathered port parameters via Netlink to recognize master 1729 * and representor devices for E-Switch configuration. 1730 * 1731 * @param[in] num_vf_set 1732 * flag of presence of number of VFs port attribute. 1733 * @param[inout] switch_info 1734 * Port information, including port name as a number and port name 1735 * type if recognized 1736 * 1737 * @return 1738 * master and representor flags are set in switch_info according to 1739 * recognized parameters (if any). 1740 */ 1741 void 1742 mlx5_nl_check_switch_info(bool num_vf_set, 1743 struct mlx5_switch_info *switch_info) 1744 { 1745 switch (switch_info->name_type) { 1746 case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: 1747 /* 1748 * Name is not recognized, assume the master, 1749 * check the number of VFs key presence. 1750 */ 1751 switch_info->master = num_vf_set; 1752 break; 1753 case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: 1754 /* 1755 * Name is not set, this assumes the legacy naming 1756 * schema for master, just check if there is a 1757 * number of VFs key. 1758 */ 1759 switch_info->master = num_vf_set; 1760 break; 1761 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 1762 /* New uplink naming schema recognized. */ 1763 switch_info->master = 1; 1764 break; 1765 case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: 1766 /* Legacy representors naming schema. */ 1767 switch_info->representor = !num_vf_set; 1768 break; 1769 case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 1770 /* New representors naming schema. */ 1771 switch_info->representor = 1; 1772 break; 1773 } 1774 } 1775 1776 /** 1777 * Analyze gathered port parameters via sysfs to recognize master 1778 * and representor devices for E-Switch configuration. 1779 * 1780 * @param[in] device_dir 1781 * flag of presence of "device" directory under port device key. 1782 * @param[inout] switch_info 1783 * Port information, including port name as a number and port name 1784 * type if recognized 1785 * 1786 * @return 1787 * master and representor flags are set in switch_info according to 1788 * recognized parameters (if any). 1789 */ 1790 void 1791 mlx5_sysfs_check_switch_info(bool device_dir, 1792 struct mlx5_switch_info *switch_info) 1793 { 1794 switch (switch_info->name_type) { 1795 case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: 1796 /* 1797 * Name is not recognized, assume the master, 1798 * check the device directory presence. 1799 */ 1800 switch_info->master = device_dir; 1801 break; 1802 case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: 1803 /* 1804 * Name is not set, this assumes the legacy naming 1805 * schema for master, just check if there is 1806 * a device directory. 1807 */ 1808 switch_info->master = device_dir; 1809 break; 1810 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 1811 /* New uplink naming schema recognized. */ 1812 switch_info->master = 1; 1813 break; 1814 case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: 1815 /* Legacy representors naming schema. */ 1816 switch_info->representor = !device_dir; 1817 break; 1818 case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 1819 /* New representors naming schema. */ 1820 switch_info->representor = 1; 1821 break; 1822 } 1823 } 1824 1825 /** 1826 * Extract port name, as a number, from sysfs or netlink information. 1827 * 1828 * @param[in] port_name_in 1829 * String representing the port name. 1830 * @param[out] port_info_out 1831 * Port information, including port name as a number and port name 1832 * type if recognized 1833 * 1834 * @return 1835 * port_name field set according to recognized name format. 1836 */ 1837 void 1838 mlx5_translate_port_name(const char *port_name_in, 1839 struct mlx5_switch_info *port_info_out) 1840 { 1841 char pf_c1, pf_c2, vf_c1, vf_c2; 1842 char *end; 1843 int sc_items; 1844 1845 /* 1846 * Check for port-name as a string of the form pf0vf0 1847 * (support kernel ver >= 5.0 or OFED ver >= 4.6). 1848 */ 1849 sc_items = sscanf(port_name_in, "%c%c%d%c%c%d", 1850 &pf_c1, &pf_c2, &port_info_out->pf_num, 1851 &vf_c1, &vf_c2, &port_info_out->port_name); 1852 if (sc_items == 6 && 1853 pf_c1 == 'p' && pf_c2 == 'f' && 1854 vf_c1 == 'v' && vf_c2 == 'f') { 1855 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_PFVF; 1856 return; 1857 } 1858 /* 1859 * Check for port-name as a string of the form p0 1860 * (support kernel ver >= 5.0, or OFED ver >= 4.6). 1861 */ 1862 sc_items = sscanf(port_name_in, "%c%d", 1863 &pf_c1, &port_info_out->port_name); 1864 if (sc_items == 2 && pf_c1 == 'p') { 1865 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK; 1866 return; 1867 } 1868 /* Check for port-name as a number (support kernel ver < 5.0 */ 1869 errno = 0; 1870 port_info_out->port_name = strtol(port_name_in, &end, 0); 1871 if (!errno && 1872 (size_t)(end - port_name_in) == strlen(port_name_in)) { 1873 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_LEGACY; 1874 return; 1875 } 1876 port_info_out->name_type = MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN; 1877 return; 1878 } 1879