1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #include <stddef.h> 7 #include <inttypes.h> 8 #include <unistd.h> 9 #include <stdbool.h> 10 #include <stdint.h> 11 #include <stdio.h> 12 #include <string.h> 13 #include <stdlib.h> 14 #include <errno.h> 15 #include <dirent.h> 16 #include <net/if.h> 17 #include <sys/ioctl.h> 18 #include <sys/socket.h> 19 #include <netinet/in.h> 20 #include <linux/ethtool.h> 21 #include <linux/sockios.h> 22 #include <fcntl.h> 23 #include <stdalign.h> 24 #include <sys/un.h> 25 #include <time.h> 26 27 #include <ethdev_driver.h> 28 #include <bus_pci_driver.h> 29 #include <rte_mbuf.h> 30 #include <rte_common.h> 31 #include <rte_eal_paging.h> 32 #include <rte_interrupts.h> 33 #include <rte_malloc.h> 34 #include <rte_string_fns.h> 35 #include <rte_rwlock.h> 36 #include <rte_cycles.h> 37 38 #include <mlx5_glue.h> 39 #include <mlx5_devx_cmds.h> 40 #include <mlx5_common.h> 41 #include <mlx5_malloc.h> 42 #include <mlx5_nl.h> 43 44 #include "mlx5.h" 45 #include "mlx5_rxtx.h" 46 #include "mlx5_utils.h" 47 48 /* Supported speed values found in /usr/include/linux/ethtool.h */ 49 #ifndef HAVE_SUPPORTED_40000baseKR4_Full 50 #define SUPPORTED_40000baseKR4_Full (1 << 23) 51 #endif 52 #ifndef HAVE_SUPPORTED_40000baseCR4_Full 53 #define SUPPORTED_40000baseCR4_Full (1 << 24) 54 #endif 55 #ifndef HAVE_SUPPORTED_40000baseSR4_Full 56 #define SUPPORTED_40000baseSR4_Full (1 << 25) 57 #endif 58 #ifndef HAVE_SUPPORTED_40000baseLR4_Full 59 #define SUPPORTED_40000baseLR4_Full (1 << 26) 60 #endif 61 #ifndef HAVE_SUPPORTED_56000baseKR4_Full 62 #define SUPPORTED_56000baseKR4_Full (1 << 27) 63 #endif 64 #ifndef HAVE_SUPPORTED_56000baseCR4_Full 65 #define SUPPORTED_56000baseCR4_Full (1 << 28) 66 #endif 67 #ifndef HAVE_SUPPORTED_56000baseSR4_Full 68 #define SUPPORTED_56000baseSR4_Full (1 << 29) 69 #endif 70 #ifndef HAVE_SUPPORTED_56000baseLR4_Full 71 #define SUPPORTED_56000baseLR4_Full (1 << 30) 72 #endif 73 74 /* Add defines in case the running kernel is not the same as user headers. */ 75 #ifndef ETHTOOL_GLINKSETTINGS 76 struct ethtool_link_settings { 77 uint32_t cmd; 78 uint32_t speed; 79 uint8_t duplex; 80 uint8_t port; 81 uint8_t phy_address; 82 uint8_t autoneg; 83 uint8_t mdio_support; 84 uint8_t eth_to_mdix; 85 uint8_t eth_tp_mdix_ctrl; 86 int8_t link_mode_masks_nwords; 87 uint32_t reserved[8]; 88 uint32_t link_mode_masks[]; 89 }; 90 91 /* The kernel values can be found in /include/uapi/linux/ethtool.h */ 92 #define ETHTOOL_GLINKSETTINGS 0x0000004c 93 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 94 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 95 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 96 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 97 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 98 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 99 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 100 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 101 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 102 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 103 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 104 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 105 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 106 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 107 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 108 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 109 #endif 110 #ifndef HAVE_ETHTOOL_LINK_MODE_25G 111 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 112 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 113 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 114 #endif 115 #ifndef HAVE_ETHTOOL_LINK_MODE_50G 116 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 117 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 118 #endif 119 #ifndef HAVE_ETHTOOL_LINK_MODE_100G 120 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 121 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 122 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 123 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 124 #endif 125 #ifndef HAVE_ETHTOOL_LINK_MODE_200G 126 #define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62 127 #define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63 128 #define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 0 /* 64 - 64 */ 129 #define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 1 /* 65 - 64 */ 130 #define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 2 /* 66 - 64 */ 131 #endif 132 133 /* Get interface index from SubFunction device name. */ 134 int 135 mlx5_auxiliary_get_ifindex(const char *sf_name) 136 { 137 char if_name[IF_NAMESIZE] = { 0 }; 138 139 if (mlx5_auxiliary_get_child_name(sf_name, "/net", 140 if_name, sizeof(if_name)) != 0) 141 return -rte_errno; 142 return if_nametoindex(if_name); 143 } 144 145 /** 146 * Get interface name from private structure. 147 * 148 * This is a port representor-aware version of mlx5_get_ifname_sysfs(). 149 * 150 * @param[in] dev 151 * Pointer to Ethernet device. 152 * @param[out] ifname 153 * Interface name output buffer. 154 * 155 * @return 156 * 0 on success, a negative errno value otherwise and rte_errno is set. 157 */ 158 int 159 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE]) 160 { 161 struct mlx5_priv *priv = dev->data->dev_private; 162 unsigned int ifindex; 163 164 MLX5_ASSERT(priv); 165 MLX5_ASSERT(priv->sh); 166 if (priv->master && priv->sh->bond.ifindex > 0) { 167 memcpy(ifname, priv->sh->bond.ifname, MLX5_NAMESIZE); 168 return 0; 169 } 170 ifindex = mlx5_ifindex(dev); 171 if (!ifindex) { 172 if (!priv->representor) 173 return mlx5_get_ifname_sysfs(priv->sh->ibdev_path, 174 *ifname); 175 rte_errno = ENXIO; 176 return -rte_errno; 177 } 178 if (if_indextoname(ifindex, &(*ifname)[0])) 179 return 0; 180 rte_errno = errno; 181 return -rte_errno; 182 } 183 184 /** 185 * Perform ifreq ioctl() on associated netdev ifname. 186 * 187 * @param[in] ifname 188 * Pointer to netdev name. 189 * @param req 190 * Request number to pass to ioctl(). 191 * @param[out] ifr 192 * Interface request structure output buffer. 193 * 194 * @return 195 * 0 on success, a negative errno value otherwise and rte_errno is set. 196 */ 197 static int 198 mlx5_ifreq_by_ifname(const char *ifname, int req, struct ifreq *ifr) 199 { 200 int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 201 int ret = 0; 202 203 if (sock == -1) { 204 rte_errno = errno; 205 return -rte_errno; 206 } 207 rte_strscpy(ifr->ifr_name, ifname, sizeof(ifr->ifr_name)); 208 ret = ioctl(sock, req, ifr); 209 if (ret == -1) { 210 rte_errno = errno; 211 goto error; 212 } 213 close(sock); 214 return 0; 215 error: 216 close(sock); 217 return -rte_errno; 218 } 219 220 /** 221 * Perform ifreq ioctl() on associated Ethernet device. 222 * 223 * @param[in] dev 224 * Pointer to Ethernet device. 225 * @param req 226 * Request number to pass to ioctl(). 227 * @param[out] ifr 228 * Interface request structure output buffer. 229 * 230 * @return 231 * 0 on success, a negative errno value otherwise and rte_errno is set. 232 */ 233 static int 234 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) 235 { 236 char ifname[sizeof(ifr->ifr_name)]; 237 int ret; 238 239 ret = mlx5_get_ifname(dev, &ifname); 240 if (ret) 241 return -rte_errno; 242 return mlx5_ifreq_by_ifname(ifname, req, ifr); 243 } 244 245 /** 246 * Get device MTU. 247 * 248 * @param dev 249 * Pointer to Ethernet device. 250 * @param[out] mtu 251 * MTU value output buffer. 252 * 253 * @return 254 * 0 on success, a negative errno value otherwise and rte_errno is set. 255 */ 256 int 257 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 258 { 259 struct ifreq request; 260 int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); 261 262 if (ret) 263 return ret; 264 *mtu = request.ifr_mtu; 265 return 0; 266 } 267 268 /** 269 * Set device MTU. 270 * 271 * @param dev 272 * Pointer to Ethernet device. 273 * @param mtu 274 * MTU value to set. 275 * 276 * @return 277 * 0 on success, a negative errno value otherwise and rte_errno is set. 278 */ 279 int 280 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 281 { 282 struct ifreq request = { .ifr_mtu = mtu, }; 283 284 return mlx5_ifreq(dev, SIOCSIFMTU, &request); 285 } 286 287 /** 288 * Set device flags. 289 * 290 * @param dev 291 * Pointer to Ethernet device. 292 * @param keep 293 * Bitmask for flags that must remain untouched. 294 * @param flags 295 * Bitmask for flags to modify. 296 * 297 * @return 298 * 0 on success, a negative errno value otherwise and rte_errno is set. 299 */ 300 static int 301 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 302 { 303 struct ifreq request; 304 int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); 305 306 if (ret) 307 return ret; 308 request.ifr_flags &= keep; 309 request.ifr_flags |= flags & ~keep; 310 return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); 311 } 312 313 /** 314 * Get device current raw clock counter 315 * 316 * @param dev 317 * Pointer to Ethernet device structure. 318 * @param[out] time 319 * Current raw clock counter of the device. 320 * 321 * @return 322 * 0 if the clock has correctly been read 323 * The value of errno in case of error 324 */ 325 int 326 mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock) 327 { 328 struct mlx5_priv *priv = dev->data->dev_private; 329 struct ibv_context *ctx = priv->sh->cdev->ctx; 330 struct ibv_values_ex values; 331 int err = 0; 332 333 values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK; 334 err = mlx5_glue->query_rt_values_ex(ctx, &values); 335 if (err != 0) { 336 DRV_LOG(WARNING, "Could not query the clock !"); 337 return err; 338 } 339 *clock = values.raw_clock.tv_nsec; 340 return 0; 341 } 342 343 /** 344 * Retrieve the master device for representor in the same switch domain. 345 * 346 * @param dev 347 * Pointer to representor Ethernet device structure. 348 * 349 * @return 350 * Master device structure on success, NULL otherwise. 351 */ 352 static struct rte_eth_dev * 353 mlx5_find_master_dev(struct rte_eth_dev *dev) 354 { 355 struct mlx5_priv *priv; 356 uint16_t port_id; 357 uint16_t domain_id; 358 359 priv = dev->data->dev_private; 360 domain_id = priv->domain_id; 361 MLX5_ASSERT(priv->representor); 362 MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 363 struct mlx5_priv *opriv = 364 rte_eth_devices[port_id].data->dev_private; 365 if (opriv && 366 opriv->master && 367 opriv->domain_id == domain_id && 368 opriv->sh == priv->sh) 369 return &rte_eth_devices[port_id]; 370 } 371 return NULL; 372 } 373 374 /** 375 * DPDK callback to retrieve physical link information. 376 * 377 * @param dev 378 * Pointer to Ethernet device structure. 379 * @param[out] link 380 * Storage for current link status. 381 * 382 * @return 383 * 0 on success, a negative errno value otherwise and rte_errno is set. 384 */ 385 static int 386 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, 387 struct rte_eth_link *link) 388 { 389 struct mlx5_priv *priv = dev->data->dev_private; 390 struct ethtool_cmd edata = { 391 .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 392 }; 393 struct ifreq ifr; 394 struct rte_eth_link dev_link; 395 int link_speed = 0; 396 int ret; 397 398 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 399 if (ret) { 400 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 401 dev->data->port_id, strerror(rte_errno)); 402 return ret; 403 } 404 dev_link = (struct rte_eth_link) { 405 .link_status = ((ifr.ifr_flags & IFF_UP) && 406 (ifr.ifr_flags & IFF_RUNNING)), 407 }; 408 ifr = (struct ifreq) { 409 .ifr_data = (void *)&edata, 410 }; 411 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 412 if (ret) { 413 if (ret == -ENOTSUP && priv->representor) { 414 struct rte_eth_dev *master; 415 416 /* 417 * For representors we can try to inherit link 418 * settings from the master device. Actually 419 * link settings do not make a lot of sense 420 * for representors due to missing physical 421 * link. The old kernel drivers supported 422 * emulated settings query for representors, 423 * the new ones do not, so we have to add 424 * this code for compatibility issues. 425 */ 426 master = mlx5_find_master_dev(dev); 427 if (master) { 428 ifr = (struct ifreq) { 429 .ifr_data = (void *)&edata, 430 }; 431 ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 432 } 433 } 434 if (ret) { 435 DRV_LOG(WARNING, 436 "port %u ioctl(SIOCETHTOOL," 437 " ETHTOOL_GSET) failed: %s", 438 dev->data->port_id, strerror(rte_errno)); 439 return ret; 440 } 441 } 442 link_speed = ethtool_cmd_speed(&edata); 443 if (link_speed == -1) 444 dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 445 else 446 dev_link.link_speed = link_speed; 447 priv->link_speed_capa = 0; 448 if (edata.supported & (SUPPORTED_1000baseT_Full | 449 SUPPORTED_1000baseKX_Full)) 450 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G; 451 if (edata.supported & SUPPORTED_10000baseKR_Full) 452 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G; 453 if (edata.supported & (SUPPORTED_40000baseKR4_Full | 454 SUPPORTED_40000baseCR4_Full | 455 SUPPORTED_40000baseSR4_Full | 456 SUPPORTED_40000baseLR4_Full)) 457 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G; 458 dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 459 RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX); 460 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 461 RTE_ETH_LINK_SPEED_FIXED); 462 *link = dev_link; 463 return 0; 464 } 465 466 /** 467 * Retrieve physical link information (unlocked version using new ioctl). 468 * 469 * @param dev 470 * Pointer to Ethernet device structure. 471 * @param[out] link 472 * Storage for current link status. 473 * 474 * @return 475 * 0 on success, a negative errno value otherwise and rte_errno is set. 476 */ 477 static int 478 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, 479 struct rte_eth_link *link) 480 481 { 482 struct mlx5_priv *priv = dev->data->dev_private; 483 struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 484 struct ifreq ifr; 485 struct rte_eth_link dev_link; 486 struct rte_eth_dev *master = NULL; 487 uint64_t sc; 488 int ret; 489 490 ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 491 if (ret) { 492 DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 493 dev->data->port_id, strerror(rte_errno)); 494 return ret; 495 } 496 dev_link = (struct rte_eth_link) { 497 .link_status = ((ifr.ifr_flags & IFF_UP) && 498 (ifr.ifr_flags & IFF_RUNNING)), 499 }; 500 ifr = (struct ifreq) { 501 .ifr_data = (void *)&gcmd, 502 }; 503 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 504 if (ret) { 505 if (ret == -ENOTSUP && priv->representor) { 506 /* 507 * For representors we can try to inherit link 508 * settings from the master device. Actually 509 * link settings do not make a lot of sense 510 * for representors due to missing physical 511 * link. The old kernel drivers supported 512 * emulated settings query for representors, 513 * the new ones do not, so we have to add 514 * this code for compatibility issues. 515 */ 516 master = mlx5_find_master_dev(dev); 517 if (master) { 518 ifr = (struct ifreq) { 519 .ifr_data = (void *)&gcmd, 520 }; 521 ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 522 } 523 } 524 if (ret) { 525 DRV_LOG(DEBUG, 526 "port %u ioctl(SIOCETHTOOL," 527 " ETHTOOL_GLINKSETTINGS) failed: %s", 528 dev->data->port_id, strerror(rte_errno)); 529 return ret; 530 } 531 } 532 gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 533 534 alignas(struct ethtool_link_settings) 535 uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 536 sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 537 struct ethtool_link_settings *ecmd = (void *)data; 538 539 *ecmd = gcmd; 540 ifr.ifr_data = (void *)ecmd; 541 ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr); 542 if (ret) { 543 DRV_LOG(DEBUG, 544 "port %u ioctl(SIOCETHTOOL," 545 "ETHTOOL_GLINKSETTINGS) failed: %s", 546 dev->data->port_id, strerror(rte_errno)); 547 return ret; 548 } 549 dev_link.link_speed = (ecmd->speed == UINT32_MAX) ? 550 RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed; 551 sc = ecmd->link_mode_masks[0] | 552 ((uint64_t)ecmd->link_mode_masks[1] << 32); 553 priv->link_speed_capa = 0; 554 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 555 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 556 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G; 557 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 558 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 559 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 560 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G; 561 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 562 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 563 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G; 564 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 565 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 566 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 567 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 568 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G; 569 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 570 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 571 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 572 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 573 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G; 574 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 575 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 576 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 577 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G; 578 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 579 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 580 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G; 581 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 582 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 583 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 584 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 585 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G; 586 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) | 587 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT))) 588 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G; 589 590 sc = ecmd->link_mode_masks[2] | 591 ((uint64_t)ecmd->link_mode_masks[3] << 32); 592 if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT) | 593 MLX5_BITSHIFT 594 (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) | 595 MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT))) 596 priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G; 597 dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 598 RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX); 599 dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 600 RTE_ETH_LINK_SPEED_FIXED); 601 *link = dev_link; 602 return 0; 603 } 604 605 /** 606 * DPDK callback to retrieve physical link information. 607 * 608 * @param dev 609 * Pointer to Ethernet device structure. 610 * @param wait_to_complete 611 * Wait for request completion. 612 * 613 * @return 614 * 0 if link status was not updated, positive if it was, a negative errno 615 * value otherwise and rte_errno is set. 616 */ 617 int 618 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) 619 { 620 int ret; 621 struct rte_eth_link dev_link; 622 time_t start_time = time(NULL); 623 int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT; 624 625 do { 626 ret = mlx5_link_update_unlocked_gs(dev, &dev_link); 627 if (ret == -ENOTSUP) 628 ret = mlx5_link_update_unlocked_gset(dev, &dev_link); 629 if (ret == 0) 630 break; 631 /* Handle wait to complete situation. */ 632 if ((wait_to_complete || retry) && ret == -EAGAIN) { 633 if (abs((int)difftime(time(NULL), start_time)) < 634 MLX5_LINK_STATUS_TIMEOUT) { 635 usleep(0); 636 continue; 637 } else { 638 rte_errno = EBUSY; 639 return -rte_errno; 640 } 641 } else if (ret < 0) { 642 return ret; 643 } 644 } while (wait_to_complete || retry-- > 0); 645 ret = !!memcmp(&dev->data->dev_link, &dev_link, 646 sizeof(struct rte_eth_link)); 647 dev->data->dev_link = dev_link; 648 return ret; 649 } 650 651 /** 652 * DPDK callback to get flow control status. 653 * 654 * @param dev 655 * Pointer to Ethernet device structure. 656 * @param[out] fc_conf 657 * Flow control output buffer. 658 * 659 * @return 660 * 0 on success, a negative errno value otherwise and rte_errno is set. 661 */ 662 int 663 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 664 { 665 struct ifreq ifr; 666 struct ethtool_pauseparam ethpause = { 667 .cmd = ETHTOOL_GPAUSEPARAM 668 }; 669 int ret; 670 671 ifr.ifr_data = (void *)ðpause; 672 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 673 if (ret) { 674 DRV_LOG(WARNING, 675 "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 676 " %s", 677 dev->data->port_id, strerror(rte_errno)); 678 return ret; 679 } 680 fc_conf->autoneg = ethpause.autoneg; 681 if (ethpause.rx_pause && ethpause.tx_pause) 682 fc_conf->mode = RTE_ETH_FC_FULL; 683 else if (ethpause.rx_pause) 684 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 685 else if (ethpause.tx_pause) 686 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 687 else 688 fc_conf->mode = RTE_ETH_FC_NONE; 689 return 0; 690 } 691 692 /** 693 * DPDK callback to modify flow control parameters. 694 * 695 * @param dev 696 * Pointer to Ethernet device structure. 697 * @param[in] fc_conf 698 * Flow control parameters. 699 * 700 * @return 701 * 0 on success, a negative errno value otherwise and rte_errno is set. 702 */ 703 int 704 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 705 { 706 struct ifreq ifr; 707 struct ethtool_pauseparam ethpause = { 708 .cmd = ETHTOOL_SPAUSEPARAM 709 }; 710 int ret; 711 712 ifr.ifr_data = (void *)ðpause; 713 ethpause.autoneg = fc_conf->autoneg; 714 if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) || 715 (fc_conf->mode & RTE_ETH_FC_RX_PAUSE)) 716 ethpause.rx_pause = 1; 717 else 718 ethpause.rx_pause = 0; 719 720 if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) || 721 (fc_conf->mode & RTE_ETH_FC_TX_PAUSE)) 722 ethpause.tx_pause = 1; 723 else 724 ethpause.tx_pause = 0; 725 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 726 if (ret) { 727 DRV_LOG(WARNING, 728 "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 729 " failed: %s", 730 dev->data->port_id, strerror(rte_errno)); 731 return ret; 732 } 733 return 0; 734 } 735 736 /** 737 * Handle asynchronous removal event for entire multiport device. 738 * 739 * @param sh 740 * Infiniband device shared context. 741 */ 742 static void 743 mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) 744 { 745 uint32_t i; 746 747 for (i = 0; i < sh->max_port; ++i) { 748 struct rte_eth_dev *dev; 749 struct mlx5_priv *priv; 750 751 if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { 752 /* 753 * Or not existing port either no 754 * handler installed for this port. 755 */ 756 continue; 757 } 758 dev = &rte_eth_devices[sh->port[i].ih_port_id]; 759 MLX5_ASSERT(dev); 760 priv = dev->data->dev_private; 761 MLX5_ASSERT(priv); 762 if (!priv->rmv_notified && dev->data->dev_conf.intr_conf.rmv) { 763 /* Notify driver about removal only once. */ 764 priv->rmv_notified = 1; 765 rte_eth_dev_callback_process 766 (dev, RTE_ETH_EVENT_INTR_RMV, NULL); 767 } 768 } 769 } 770 771 static void 772 mlx5_dev_interrupt_nl_cb(struct nlmsghdr *hdr, void *cb_arg) 773 { 774 struct mlx5_dev_ctx_shared *sh = cb_arg; 775 uint32_t i; 776 uint32_t if_index; 777 778 if (mlx5_nl_parse_link_status_update(hdr, &if_index) < 0) 779 return; 780 for (i = 0; i < sh->max_port; i++) { 781 struct mlx5_dev_shared_port *port = &sh->port[i]; 782 struct rte_eth_dev *dev; 783 struct mlx5_priv *priv; 784 785 if (port->nl_ih_port_id >= RTE_MAX_ETHPORTS) 786 continue; 787 dev = &rte_eth_devices[port->nl_ih_port_id]; 788 /* Probing may initiate an LSC before configuration is done. */ 789 if (dev->data->dev_configured && 790 !dev->data->dev_conf.intr_conf.lsc) 791 break; 792 priv = dev->data->dev_private; 793 if (priv->if_index == if_index) { 794 /* Block logical LSC events. */ 795 uint16_t prev_status = dev->data->dev_link.link_status; 796 797 if (mlx5_link_update(dev, 0) < 0) 798 DRV_LOG(ERR, "Failed to update link status: %s", 799 rte_strerror(rte_errno)); 800 else if (prev_status != dev->data->dev_link.link_status) 801 rte_eth_dev_callback_process 802 (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 803 break; 804 } 805 } 806 } 807 808 void 809 mlx5_dev_interrupt_handler_nl(void *arg) 810 { 811 struct mlx5_dev_ctx_shared *sh = arg; 812 int nlsk_fd = rte_intr_fd_get(sh->intr_handle_nl); 813 814 if (nlsk_fd < 0) 815 return; 816 if (mlx5_nl_read_events(nlsk_fd, mlx5_dev_interrupt_nl_cb, sh) < 0) 817 DRV_LOG(ERR, "Failed to process Netlink events: %s", 818 rte_strerror(rte_errno)); 819 } 820 821 /** 822 * Handle shared asynchronous events the NIC (removal event 823 * and link status change). Supports multiport IB device. 824 * 825 * @param cb_arg 826 * Callback argument. 827 */ 828 void 829 mlx5_dev_interrupt_handler(void *cb_arg) 830 { 831 struct mlx5_dev_ctx_shared *sh = cb_arg; 832 struct ibv_async_event event; 833 834 /* Read all message from the IB device and acknowledge them. */ 835 for (;;) { 836 struct rte_eth_dev *dev; 837 uint32_t tmp; 838 839 if (mlx5_glue->get_async_event(sh->cdev->ctx, &event)) { 840 if (errno == EIO) { 841 DRV_LOG(DEBUG, 842 "IBV async event queue closed on: %s", 843 sh->ibdev_name); 844 mlx5_dev_interrupt_device_fatal(sh); 845 } 846 break; 847 } 848 if (event.event_type == IBV_EVENT_DEVICE_FATAL) { 849 /* 850 * The DEVICE_FATAL event can be called by kernel 851 * twice - from mlx5 and uverbs layers, and port 852 * index is not applicable. We should notify all 853 * existing ports. 854 */ 855 mlx5_dev_interrupt_device_fatal(sh); 856 mlx5_glue->ack_async_event(&event); 857 continue; 858 } 859 /* Retrieve and check IB port index. */ 860 tmp = (uint32_t)event.element.port_num; 861 MLX5_ASSERT(tmp <= sh->max_port); 862 if (!tmp) { 863 /* Unsupported device level event. */ 864 mlx5_glue->ack_async_event(&event); 865 DRV_LOG(DEBUG, 866 "unsupported common event (type %d)", 867 event.event_type); 868 continue; 869 } 870 if (tmp > sh->max_port) { 871 /* Invalid IB port index. */ 872 mlx5_glue->ack_async_event(&event); 873 DRV_LOG(DEBUG, 874 "cannot handle an event (type %d)" 875 "due to invalid IB port index (%u)", 876 event.event_type, tmp); 877 continue; 878 } 879 if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { 880 /* No handler installed. */ 881 mlx5_glue->ack_async_event(&event); 882 DRV_LOG(DEBUG, 883 "cannot handle an event (type %d)" 884 "due to no handler installed for port %u", 885 event.event_type, tmp); 886 continue; 887 } 888 /* Retrieve ethernet device descriptor. */ 889 tmp = sh->port[tmp - 1].ih_port_id; 890 dev = &rte_eth_devices[tmp]; 891 MLX5_ASSERT(dev); 892 DRV_LOG(DEBUG, 893 "port %u cannot handle an unknown event (type %d)", 894 dev->data->port_id, event.event_type); 895 mlx5_glue->ack_async_event(&event); 896 } 897 } 898 899 /** 900 * Handle DEVX interrupts from the NIC. 901 * This function is probably called from the DPDK host thread. 902 * 903 * @param cb_arg 904 * Callback argument. 905 */ 906 void 907 mlx5_dev_interrupt_handler_devx(void *cb_arg) 908 { 909 #ifndef HAVE_IBV_DEVX_ASYNC 910 (void)cb_arg; 911 return; 912 #else 913 struct mlx5_dev_ctx_shared *sh = cb_arg; 914 union { 915 struct mlx5dv_devx_async_cmd_hdr cmd_resp; 916 uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) + 917 MLX5_ST_SZ_BYTES(traffic_counter) + 918 sizeof(struct mlx5dv_devx_async_cmd_hdr)]; 919 } out; 920 uint8_t *buf = out.buf + sizeof(out.cmd_resp); 921 922 while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp, 923 &out.cmd_resp, 924 sizeof(out.buf))) 925 mlx5_flow_async_pool_query_handle 926 (sh, (uint64_t)out.cmd_resp.wr_id, 927 mlx5_devx_get_out_command_status(buf)); 928 #endif /* HAVE_IBV_DEVX_ASYNC */ 929 } 930 931 /** 932 * DPDK callback to bring the link DOWN. 933 * 934 * @param dev 935 * Pointer to Ethernet device structure. 936 * 937 * @return 938 * 0 on success, a negative errno value otherwise and rte_errno is set. 939 */ 940 int 941 mlx5_set_link_down(struct rte_eth_dev *dev) 942 { 943 return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 944 } 945 946 /** 947 * DPDK callback to bring the link UP. 948 * 949 * @param dev 950 * Pointer to Ethernet device structure. 951 * 952 * @return 953 * 0 on success, a negative errno value otherwise and rte_errno is set. 954 */ 955 int 956 mlx5_set_link_up(struct rte_eth_dev *dev) 957 { 958 return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 959 } 960 961 /** 962 * Check if mlx5 device was removed. 963 * 964 * @param dev 965 * Pointer to Ethernet device structure. 966 * 967 * @return 968 * 1 when device is removed, otherwise 0. 969 */ 970 int 971 mlx5_is_removed(struct rte_eth_dev *dev) 972 { 973 struct ibv_device_attr device_attr; 974 struct mlx5_priv *priv = dev->data->dev_private; 975 976 if (mlx5_glue->query_device(priv->sh->cdev->ctx, &device_attr) == EIO) 977 return 1; 978 return 0; 979 } 980 981 /** 982 * Analyze gathered port parameters via sysfs to recognize master 983 * and representor devices for E-Switch configuration. 984 * 985 * @param[in] device_dir 986 * flag of presence of "device" directory under port device key. 987 * @param[inout] switch_info 988 * Port information, including port name as a number and port name 989 * type if recognized 990 * 991 * @return 992 * master and representor flags are set in switch_info according to 993 * recognized parameters (if any). 994 */ 995 static void 996 mlx5_sysfs_check_switch_info(bool device_dir, 997 struct mlx5_switch_info *switch_info) 998 { 999 switch (switch_info->name_type) { 1000 case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: 1001 /* 1002 * Name is not recognized, assume the master, 1003 * check the device directory presence. 1004 */ 1005 switch_info->master = device_dir; 1006 break; 1007 case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: 1008 /* 1009 * Name is not set, this assumes the legacy naming 1010 * schema for master, just check if there is 1011 * a device directory. 1012 */ 1013 switch_info->master = device_dir; 1014 break; 1015 case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 1016 /* New uplink naming schema recognized. */ 1017 switch_info->master = 1; 1018 break; 1019 case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: 1020 /* Legacy representors naming schema. */ 1021 switch_info->representor = !device_dir; 1022 break; 1023 case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: 1024 /* Fallthrough */ 1025 case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 1026 /* Fallthrough */ 1027 case MLX5_PHYS_PORT_NAME_TYPE_PFSF: 1028 /* New representors naming schema. */ 1029 switch_info->representor = 1; 1030 break; 1031 default: 1032 switch_info->master = device_dir; 1033 break; 1034 } 1035 } 1036 1037 /** 1038 * Get switch information associated with network interface. 1039 * 1040 * @param ifindex 1041 * Network interface index. 1042 * @param[out] info 1043 * Switch information object, populated in case of success. 1044 * 1045 * @return 1046 * 0 on success, a negative errno value otherwise and rte_errno is set. 1047 */ 1048 int 1049 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) 1050 { 1051 char ifname[IF_NAMESIZE]; 1052 char *port_name = NULL; 1053 size_t port_name_size = 0; 1054 FILE *file; 1055 struct mlx5_switch_info data = { 1056 .master = 0, 1057 .representor = 0, 1058 .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET, 1059 .port_name = 0, 1060 .switch_id = 0, 1061 }; 1062 DIR *dir; 1063 bool port_switch_id_set = false; 1064 bool device_dir = false; 1065 char c; 1066 ssize_t line_size; 1067 1068 if (!if_indextoname(ifindex, ifname)) { 1069 rte_errno = errno; 1070 return -rte_errno; 1071 } 1072 1073 MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", 1074 ifname); 1075 MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", 1076 ifname); 1077 MKSTR(pci_device, "/sys/class/net/%s/device", 1078 ifname); 1079 1080 file = fopen(phys_port_name, "rb"); 1081 if (file != NULL) { 1082 char *tail_nl; 1083 1084 line_size = getline(&port_name, &port_name_size, file); 1085 if (line_size < 0) { 1086 free(port_name); 1087 fclose(file); 1088 rte_errno = errno; 1089 return -rte_errno; 1090 } else if (line_size > 0) { 1091 /* Remove tailing newline character. */ 1092 tail_nl = strchr(port_name, '\n'); 1093 if (tail_nl) 1094 *tail_nl = '\0'; 1095 mlx5_translate_port_name(port_name, &data); 1096 } 1097 free(port_name); 1098 fclose(file); 1099 } 1100 file = fopen(phys_switch_id, "rb"); 1101 if (file == NULL) { 1102 rte_errno = errno; 1103 return -rte_errno; 1104 } 1105 port_switch_id_set = 1106 fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && 1107 c == '\n'; 1108 fclose(file); 1109 dir = opendir(pci_device); 1110 if (dir != NULL) { 1111 closedir(dir); 1112 device_dir = true; 1113 } 1114 if (port_switch_id_set) { 1115 /* We have some E-Switch configuration. */ 1116 mlx5_sysfs_check_switch_info(device_dir, &data); 1117 } 1118 *info = data; 1119 MLX5_ASSERT(!(data.master && data.representor)); 1120 if (data.master && data.representor) { 1121 DRV_LOG(ERR, "ifindex %u device is recognized as master" 1122 " and as representor", ifindex); 1123 rte_errno = ENODEV; 1124 return -rte_errno; 1125 } 1126 return 0; 1127 } 1128 1129 /** 1130 * Get bond information associated with network interface. 1131 * 1132 * @param pf_ifindex 1133 * Network interface index of bond slave interface 1134 * @param[out] ifindex 1135 * Pointer to bond ifindex. 1136 * @param[out] ifname 1137 * Pointer to bond ifname. 1138 * 1139 * @return 1140 * 0 on success, a negative errno value otherwise and rte_errno is set. 1141 */ 1142 int 1143 mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex, 1144 char *ifname) 1145 { 1146 char name[IF_NAMESIZE]; 1147 FILE *file; 1148 unsigned int index; 1149 int ret; 1150 1151 if (!if_indextoname(pf_ifindex, name) || !strlen(name)) { 1152 rte_errno = errno; 1153 return -rte_errno; 1154 } 1155 MKSTR(bond_if, "/sys/class/net/%s/master/ifindex", name); 1156 /* read bond ifindex */ 1157 file = fopen(bond_if, "rb"); 1158 if (file == NULL) { 1159 rte_errno = errno; 1160 return -rte_errno; 1161 } 1162 ret = fscanf(file, "%u", &index); 1163 fclose(file); 1164 if (ret <= 0) { 1165 rte_errno = errno; 1166 return -rte_errno; 1167 } 1168 if (ifindex) 1169 *ifindex = index; 1170 1171 /* read bond device name from symbol link */ 1172 if (ifname) { 1173 if (!if_indextoname(index, ifname)) { 1174 rte_errno = errno; 1175 return -rte_errno; 1176 } 1177 } 1178 return 0; 1179 } 1180 1181 /** 1182 * DPDK callback to retrieve plug-in module EEPROM information (type and size). 1183 * 1184 * @param dev 1185 * Pointer to Ethernet device structure. 1186 * @param[out] modinfo 1187 * Storage for plug-in module EEPROM information. 1188 * 1189 * @return 1190 * 0 on success, a negative errno value otherwise and rte_errno is set. 1191 */ 1192 int 1193 mlx5_get_module_info(struct rte_eth_dev *dev, 1194 struct rte_eth_dev_module_info *modinfo) 1195 { 1196 struct ethtool_modinfo info = { 1197 .cmd = ETHTOOL_GMODULEINFO, 1198 }; 1199 struct ifreq ifr = (struct ifreq) { 1200 .ifr_data = (void *)&info, 1201 }; 1202 int ret = 0; 1203 1204 if (!dev) { 1205 DRV_LOG(WARNING, "missing argument, cannot get module info"); 1206 rte_errno = EINVAL; 1207 return -rte_errno; 1208 } 1209 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1210 if (ret) { 1211 DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", 1212 dev->data->port_id, strerror(rte_errno)); 1213 return ret; 1214 } 1215 modinfo->type = info.type; 1216 modinfo->eeprom_len = info.eeprom_len; 1217 return ret; 1218 } 1219 1220 /** 1221 * DPDK callback to retrieve plug-in module EEPROM data. 1222 * 1223 * @param dev 1224 * Pointer to Ethernet device structure. 1225 * @param[out] info 1226 * Storage for plug-in module EEPROM data. 1227 * 1228 * @return 1229 * 0 on success, a negative errno value otherwise and rte_errno is set. 1230 */ 1231 int mlx5_get_module_eeprom(struct rte_eth_dev *dev, 1232 struct rte_dev_eeprom_info *info) 1233 { 1234 struct ethtool_eeprom *eeprom; 1235 struct ifreq ifr; 1236 int ret = 0; 1237 1238 if (!dev) { 1239 DRV_LOG(WARNING, "missing argument, cannot get module eeprom"); 1240 rte_errno = EINVAL; 1241 return -rte_errno; 1242 } 1243 eeprom = mlx5_malloc(MLX5_MEM_ZERO, 1244 (sizeof(struct ethtool_eeprom) + info->length), 0, 1245 SOCKET_ID_ANY); 1246 if (!eeprom) { 1247 DRV_LOG(WARNING, "port %u cannot allocate memory for " 1248 "eeprom data", dev->data->port_id); 1249 rte_errno = ENOMEM; 1250 return -rte_errno; 1251 } 1252 eeprom->cmd = ETHTOOL_GMODULEEEPROM; 1253 eeprom->offset = info->offset; 1254 eeprom->len = info->length; 1255 ifr = (struct ifreq) { 1256 .ifr_data = (void *)eeprom, 1257 }; 1258 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1259 if (ret) 1260 DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", 1261 dev->data->port_id, strerror(rte_errno)); 1262 else 1263 rte_memcpy(info->data, eeprom->data, info->length); 1264 mlx5_free(eeprom); 1265 return ret; 1266 } 1267 1268 /** 1269 * Read device counters table. 1270 * 1271 * @param dev 1272 * Pointer to Ethernet device. 1273 * @param[in] pf 1274 * PF index in case of bonding device, -1 otherwise 1275 * @param[out] stats 1276 * Counters table output buffer. 1277 * 1278 * @return 1279 * 0 on success and stats is filled, negative errno value otherwise and 1280 * rte_errno is set. 1281 */ 1282 static int 1283 _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) 1284 { 1285 struct mlx5_priv *priv = dev->data->dev_private; 1286 struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 1287 unsigned int i; 1288 struct ifreq ifr; 1289 unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd); 1290 unsigned int stats_sz = max_stats_n * sizeof(uint64_t); 1291 unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; 1292 struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; 1293 int ret; 1294 uint16_t i_idx, o_idx; 1295 1296 et_stats->cmd = ETHTOOL_GSTATS; 1297 /* Pass the maximum value, the driver may ignore this. */ 1298 et_stats->n_stats = max_stats_n; 1299 ifr.ifr_data = (caddr_t)et_stats; 1300 if (pf >= 0) 1301 ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, 1302 SIOCETHTOOL, &ifr); 1303 else 1304 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1305 if (ret) { 1306 DRV_LOG(WARNING, 1307 "port %u unable to read statistic values from device", 1308 dev->data->port_id); 1309 return ret; 1310 } 1311 if (pf <= 0) { 1312 for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { 1313 i_idx = xstats_ctrl->dev_table_idx[i]; 1314 if (i_idx == UINT16_MAX || xstats_ctrl->info[i].dev) 1315 continue; 1316 o_idx = xstats_ctrl->xstats_o_idx[i]; 1317 stats[o_idx] += (uint64_t)et_stats->data[i_idx]; 1318 } 1319 } else { 1320 for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { 1321 i_idx = xstats_ctrl->dev_table_idx_2nd[i]; 1322 if (i_idx == UINT16_MAX) 1323 continue; 1324 o_idx = xstats_ctrl->xstats_o_idx_2nd[i]; 1325 stats[o_idx] += (uint64_t)et_stats->data[i_idx]; 1326 } 1327 } 1328 return 0; 1329 } 1330 1331 /* 1332 * Read device counters. 1333 * 1334 * @param dev 1335 * Pointer to Ethernet device. 1336 * @param bond_master 1337 * Indicate if the device is a bond master. 1338 * @param stats 1339 * Counters table output buffer. 1340 * 1341 * @return 1342 * 0 on success and stats is filled, negative errno value otherwise and 1343 * rte_errno is set. 1344 */ 1345 int 1346 mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) 1347 { 1348 struct mlx5_priv *priv = dev->data->dev_private; 1349 struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 1350 int ret = 0, i; 1351 1352 memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); 1353 /* Read ifreq counters. */ 1354 if (bond_master) { 1355 /* Sum xstats from bonding device member ports. */ 1356 for (i = 0; i < priv->sh->bond.n_port; i++) { 1357 ret = _mlx5_os_read_dev_counters(dev, i, stats); 1358 if (ret) 1359 return ret; 1360 } 1361 } else { 1362 ret = _mlx5_os_read_dev_counters(dev, -1, stats); 1363 if (ret) 1364 return ret; 1365 } 1366 /* 1367 * Read IB counters. 1368 * The counters are unique per IB device but not per net IF. 1369 * In bonding mode, getting the stats name only from 1 port is enough. 1370 */ 1371 for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { 1372 if (!xstats_ctrl->info[i].dev) 1373 continue; 1374 /* return last xstats counter if fail to read. */ 1375 if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, 1376 &stats[i]) == 0) 1377 xstats_ctrl->xstats[i] = stats[i]; 1378 else 1379 stats[i] = xstats_ctrl->xstats[i]; 1380 } 1381 return ret; 1382 } 1383 1384 /* 1385 * Query the number of statistics provided by ETHTOOL. 1386 * 1387 * @param dev 1388 * Pointer to Ethernet device. 1389 * @param bond_master 1390 * Indicate if the device is a bond master. 1391 * @param n_stats 1392 * Pointer to number of stats to store. 1393 * @param n_stats_sec 1394 * Pointer to number of stats to store for the 2nd port of the bond. 1395 * 1396 * @return 1397 * 0 on success, negative errno value otherwise and rte_errno is set. 1398 */ 1399 int 1400 mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, 1401 uint16_t *n_stats, uint16_t *n_stats_sec) 1402 { 1403 struct mlx5_priv *priv = dev->data->dev_private; 1404 struct ethtool_drvinfo drvinfo; 1405 struct ifreq ifr; 1406 int ret; 1407 1408 drvinfo.cmd = ETHTOOL_GDRVINFO; 1409 ifr.ifr_data = (caddr_t)&drvinfo; 1410 /* Bonding PFs. */ 1411 if (bond_master) { 1412 ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, 1413 SIOCETHTOOL, &ifr); 1414 if (ret) { 1415 DRV_LOG(WARNING, "bonding port %u unable to query number of" 1416 " statistics for the 1st slave, %d", PORT_ID(priv), ret); 1417 return ret; 1418 } 1419 *n_stats = drvinfo.n_stats; 1420 ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, 1421 SIOCETHTOOL, &ifr); 1422 if (ret) { 1423 DRV_LOG(WARNING, "bonding port %u unable to query number of" 1424 " statistics for the 2nd slave, %d", PORT_ID(priv), ret); 1425 return ret; 1426 } 1427 *n_stats_sec = drvinfo.n_stats; 1428 } else { 1429 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1430 if (ret) { 1431 DRV_LOG(WARNING, "port %u unable to query number of statistics", 1432 PORT_ID(priv)); 1433 return ret; 1434 } 1435 *n_stats = drvinfo.n_stats; 1436 } 1437 return 0; 1438 } 1439 1440 static const struct mlx5_counter_ctrl mlx5_counters_init[] = { 1441 { 1442 .dpdk_name = "rx_unicast_bytes", 1443 .ctr_name = "rx_vport_unicast_bytes", 1444 }, 1445 { 1446 .dpdk_name = "rx_multicast_bytes", 1447 .ctr_name = "rx_vport_multicast_bytes", 1448 }, 1449 { 1450 .dpdk_name = "rx_broadcast_bytes", 1451 .ctr_name = "rx_vport_broadcast_bytes", 1452 }, 1453 { 1454 .dpdk_name = "rx_unicast_packets", 1455 .ctr_name = "rx_vport_unicast_packets", 1456 }, 1457 { 1458 .dpdk_name = "rx_multicast_packets", 1459 .ctr_name = "rx_vport_multicast_packets", 1460 }, 1461 { 1462 .dpdk_name = "rx_broadcast_packets", 1463 .ctr_name = "rx_vport_broadcast_packets", 1464 }, 1465 { 1466 .dpdk_name = "tx_unicast_bytes", 1467 .ctr_name = "tx_vport_unicast_bytes", 1468 }, 1469 { 1470 .dpdk_name = "tx_multicast_bytes", 1471 .ctr_name = "tx_vport_multicast_bytes", 1472 }, 1473 { 1474 .dpdk_name = "tx_broadcast_bytes", 1475 .ctr_name = "tx_vport_broadcast_bytes", 1476 }, 1477 { 1478 .dpdk_name = "tx_unicast_packets", 1479 .ctr_name = "tx_vport_unicast_packets", 1480 }, 1481 { 1482 .dpdk_name = "tx_multicast_packets", 1483 .ctr_name = "tx_vport_multicast_packets", 1484 }, 1485 { 1486 .dpdk_name = "tx_broadcast_packets", 1487 .ctr_name = "tx_vport_broadcast_packets", 1488 }, 1489 { 1490 .dpdk_name = "rx_wqe_errors", 1491 .ctr_name = "rx_wqe_err", 1492 }, 1493 { 1494 .dpdk_name = "rx_phy_crc_errors", 1495 .ctr_name = "rx_crc_errors_phy", 1496 }, 1497 { 1498 .dpdk_name = "rx_phy_in_range_len_errors", 1499 .ctr_name = "rx_in_range_len_errors_phy", 1500 }, 1501 { 1502 .dpdk_name = "rx_phy_symbol_errors", 1503 .ctr_name = "rx_symbol_err_phy", 1504 }, 1505 { 1506 .dpdk_name = "tx_phy_errors", 1507 .ctr_name = "tx_errors_phy", 1508 }, 1509 { 1510 .dpdk_name = "rx_out_of_buffer", 1511 .ctr_name = "out_of_buffer", 1512 .dev = 1, 1513 }, 1514 { 1515 .dpdk_name = "tx_phy_packets", 1516 .ctr_name = "tx_packets_phy", 1517 }, 1518 { 1519 .dpdk_name = "rx_phy_packets", 1520 .ctr_name = "rx_packets_phy", 1521 }, 1522 { 1523 .dpdk_name = "tx_phy_discard_packets", 1524 .ctr_name = "tx_discards_phy", 1525 }, 1526 { 1527 .dpdk_name = "rx_phy_discard_packets", 1528 .ctr_name = "rx_discards_phy", 1529 }, 1530 { 1531 .dpdk_name = "rx_prio0_buf_discard_packets", 1532 .ctr_name = "rx_prio0_buf_discard", 1533 }, 1534 { 1535 .dpdk_name = "rx_prio1_buf_discard_packets", 1536 .ctr_name = "rx_prio1_buf_discard", 1537 }, 1538 { 1539 .dpdk_name = "rx_prio2_buf_discard_packets", 1540 .ctr_name = "rx_prio2_buf_discard", 1541 }, 1542 { 1543 .dpdk_name = "rx_prio3_buf_discard_packets", 1544 .ctr_name = "rx_prio3_buf_discard", 1545 }, 1546 { 1547 .dpdk_name = "rx_prio4_buf_discard_packets", 1548 .ctr_name = "rx_prio4_buf_discard", 1549 }, 1550 { 1551 .dpdk_name = "rx_prio5_buf_discard_packets", 1552 .ctr_name = "rx_prio5_buf_discard", 1553 }, 1554 { 1555 .dpdk_name = "rx_prio6_buf_discard_packets", 1556 .ctr_name = "rx_prio6_buf_discard", 1557 }, 1558 { 1559 .dpdk_name = "rx_prio7_buf_discard_packets", 1560 .ctr_name = "rx_prio7_buf_discard", 1561 }, 1562 { 1563 .dpdk_name = "rx_prio0_cong_discard_packets", 1564 .ctr_name = "rx_prio0_cong_discard", 1565 }, 1566 { 1567 .dpdk_name = "rx_prio1_cong_discard_packets", 1568 .ctr_name = "rx_prio1_cong_discard", 1569 }, 1570 { 1571 .dpdk_name = "rx_prio2_cong_discard_packets", 1572 .ctr_name = "rx_prio2_cong_discard", 1573 }, 1574 { 1575 .dpdk_name = "rx_prio3_cong_discard_packets", 1576 .ctr_name = "rx_prio3_cong_discard", 1577 }, 1578 { 1579 .dpdk_name = "rx_prio4_cong_discard_packets", 1580 .ctr_name = "rx_prio4_cong_discard", 1581 }, 1582 { 1583 .dpdk_name = "rx_prio5_cong_discard_packets", 1584 .ctr_name = "rx_prio5_cong_discard", 1585 }, 1586 { 1587 .dpdk_name = "rx_prio6_cong_discard_packets", 1588 .ctr_name = "rx_prio6_cong_discard", 1589 }, 1590 { 1591 .dpdk_name = "rx_prio7_cong_discard_packets", 1592 .ctr_name = "rx_prio7_cong_discard", 1593 }, 1594 { 1595 .dpdk_name = "tx_phy_bytes", 1596 .ctr_name = "tx_bytes_phy", 1597 }, 1598 { 1599 .dpdk_name = "rx_phy_bytes", 1600 .ctr_name = "rx_bytes_phy", 1601 }, 1602 /* Representor only */ 1603 { 1604 .dpdk_name = "rx_vport_packets", 1605 .ctr_name = "vport_rx_packets", 1606 }, 1607 { 1608 .dpdk_name = "rx_vport_bytes", 1609 .ctr_name = "vport_rx_bytes", 1610 }, 1611 { 1612 .dpdk_name = "tx_vport_packets", 1613 .ctr_name = "vport_tx_packets", 1614 }, 1615 { 1616 .dpdk_name = "tx_vport_bytes", 1617 .ctr_name = "vport_tx_bytes", 1618 }, 1619 /** 1620 * Device counters: These counters are for the 1621 * entire PCI device (NIC). These counters are 1622 * not counting on a per port/queue basis. 1623 */ 1624 { 1625 .dpdk_name = "rx_pci_signal_integrity", 1626 .ctr_name = "rx_pci_signal_integrity", 1627 }, 1628 { 1629 .dpdk_name = "tx_pci_signal_integrity", 1630 .ctr_name = "tx_pci_signal_integrity", 1631 }, 1632 { 1633 .dpdk_name = "outbound_pci_buffer_overflow", 1634 .ctr_name = "outbound_pci_buffer_overflow", 1635 }, 1636 { 1637 .dpdk_name = "outbound_pci_stalled_rd", 1638 .ctr_name = "outbound_pci_stalled_rd", 1639 }, 1640 { 1641 .dpdk_name = "outbound_pci_stalled_wr", 1642 .ctr_name = "outbound_pci_stalled_wr", 1643 }, 1644 { 1645 .dpdk_name = "outbound_pci_stalled_rd_events", 1646 .ctr_name = "outbound_pci_stalled_rd_events", 1647 }, 1648 { 1649 .dpdk_name = "outbound_pci_stalled_wr_events", 1650 .ctr_name = "outbound_pci_stalled_wr_events", 1651 }, 1652 { 1653 .dpdk_name = "dev_out_of_buffer", 1654 .ctr_name = "dev_out_of_buffer", 1655 }, 1656 }; 1657 1658 static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); 1659 1660 static int 1661 mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master, 1662 struct ethtool_gstrings *strings, 1663 uint32_t stats_n, uint32_t stats_n_2nd) 1664 { 1665 struct mlx5_priv *priv = dev->data->dev_private; 1666 struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 1667 struct ifreq ifr; 1668 int ret; 1669 uint32_t i, j, idx; 1670 1671 /* Ensure no out of bounds access before. */ 1672 MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS); 1673 strings->cmd = ETHTOOL_GSTRINGS; 1674 strings->string_set = ETH_SS_STATS; 1675 strings->len = stats_n; 1676 ifr.ifr_data = (caddr_t)strings; 1677 if (bond_master) 1678 ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, 1679 SIOCETHTOOL, &ifr); 1680 else 1681 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1682 if (ret) { 1683 DRV_LOG(WARNING, "port %u unable to get statistic names with %d", 1684 PORT_ID(priv), ret); 1685 return ret; 1686 } 1687 /* Reorganize the orders to reduce the iterations. */ 1688 for (j = 0; j < xstats_n; j++) { 1689 xstats_ctrl->dev_table_idx[j] = UINT16_MAX; 1690 for (i = 0; i < stats_n; i++) { 1691 const char *curr_string = 1692 (const char *)&strings->data[i * ETH_GSTRING_LEN]; 1693 1694 if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { 1695 idx = xstats_ctrl->mlx5_stats_n++; 1696 xstats_ctrl->dev_table_idx[j] = i; 1697 xstats_ctrl->xstats_o_idx[j] = idx; 1698 xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1699 } 1700 } 1701 } 1702 if (!bond_master) { 1703 /* Add dev counters, unique per IB device. */ 1704 for (j = 0; j != xstats_n; j++) { 1705 if (mlx5_counters_init[j].dev) { 1706 idx = xstats_ctrl->mlx5_stats_n++; 1707 xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1708 xstats_ctrl->hw_stats[idx] = 0; 1709 } 1710 } 1711 return 0; 1712 } 1713 1714 strings->len = stats_n_2nd; 1715 ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, 1716 SIOCETHTOOL, &ifr); 1717 if (ret) { 1718 DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d", 1719 PORT_ID(priv), ret); 1720 return ret; 1721 } 1722 /* The 2nd slave port may have a different strings set, based on the configuration. */ 1723 for (j = 0; j != xstats_n; j++) { 1724 xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX; 1725 for (i = 0; i != stats_n_2nd; i++) { 1726 const char *curr_string = 1727 (const char *)&strings->data[i * ETH_GSTRING_LEN]; 1728 1729 if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { 1730 xstats_ctrl->dev_table_idx_2nd[j] = i; 1731 if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) { 1732 /* Already mapped in the 1st slave port. */ 1733 idx = xstats_ctrl->xstats_o_idx[j]; 1734 xstats_ctrl->xstats_o_idx_2nd[j] = idx; 1735 } else { 1736 /* Append the new items to the end of the map. */ 1737 idx = xstats_ctrl->mlx5_stats_n++; 1738 xstats_ctrl->xstats_o_idx_2nd[j] = idx; 1739 xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1740 } 1741 } 1742 } 1743 } 1744 /* Dev counters are always at the last now. */ 1745 for (j = 0; j != xstats_n; j++) { 1746 if (mlx5_counters_init[j].dev) { 1747 idx = xstats_ctrl->mlx5_stats_n++; 1748 xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1749 xstats_ctrl->hw_stats[idx] = 0; 1750 } 1751 } 1752 return 0; 1753 } 1754 1755 /** 1756 * Init the structures to read device counters. 1757 * 1758 * @param dev 1759 * Pointer to Ethernet device. 1760 */ 1761 void 1762 mlx5_os_stats_init(struct rte_eth_dev *dev) 1763 { 1764 struct mlx5_priv *priv = dev->data->dev_private; 1765 struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 1766 struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; 1767 struct ethtool_gstrings *strings = NULL; 1768 uint16_t dev_stats_n = 0; 1769 uint16_t dev_stats_n_2nd = 0; 1770 unsigned int max_stats_n; 1771 unsigned int str_sz; 1772 int ret; 1773 bool bond_master = (priv->master && priv->pf_bond >= 0); 1774 1775 /* So that it won't aggregate for each init. */ 1776 xstats_ctrl->mlx5_stats_n = 0; 1777 ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd); 1778 if (ret < 0) { 1779 DRV_LOG(WARNING, "port %u no extended statistics available", 1780 dev->data->port_id); 1781 return; 1782 } 1783 max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd); 1784 /* Allocate memory to grab stat names and values. */ 1785 str_sz = max_stats_n * ETH_GSTRING_LEN; 1786 strings = (struct ethtool_gstrings *) 1787 mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, 1788 SOCKET_ID_ANY); 1789 if (!strings) { 1790 DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", 1791 dev->data->port_id); 1792 return; 1793 } 1794 ret = mlx5_os_get_stats_strings(dev, bond_master, strings, 1795 dev_stats_n, dev_stats_n_2nd); 1796 if (ret < 0) { 1797 DRV_LOG(WARNING, "port %u failed to get the stats strings", 1798 dev->data->port_id); 1799 goto free; 1800 } 1801 xstats_ctrl->stats_n = dev_stats_n; 1802 xstats_ctrl->stats_n_2nd = dev_stats_n_2nd; 1803 /* Copy to base at first time. */ 1804 ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base); 1805 if (ret) 1806 DRV_LOG(ERR, "port %u cannot read device counters: %s", 1807 dev->data->port_id, strerror(rte_errno)); 1808 mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); 1809 stats_ctrl->imissed = 0; 1810 free: 1811 mlx5_free(strings); 1812 } 1813 1814 /** 1815 * Get MAC address by querying netdevice. 1816 * 1817 * @param[in] dev 1818 * Pointer to Ethernet device. 1819 * @param[out] mac 1820 * MAC address output buffer. 1821 * 1822 * @return 1823 * 0 on success, a negative errno value otherwise and rte_errno is set. 1824 */ 1825 int 1826 mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) 1827 { 1828 struct ifreq request; 1829 int ret; 1830 1831 ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); 1832 if (ret) 1833 return ret; 1834 memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN); 1835 return 0; 1836 } 1837 1838 /* 1839 * Query dropless_rq private flag value provided by ETHTOOL. 1840 * 1841 * @param dev 1842 * Pointer to Ethernet device. 1843 * 1844 * @return 1845 * - 0 on success, flag is not set. 1846 * - 1 on success, flag is set. 1847 * - negative errno value otherwise and rte_errno is set. 1848 */ 1849 int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) 1850 { 1851 struct ethtool_sset_info *sset_info = NULL; 1852 struct ethtool_drvinfo drvinfo; 1853 struct ifreq ifr; 1854 struct ethtool_gstrings *strings = NULL; 1855 struct ethtool_value flags; 1856 const int32_t flag_len = sizeof(flags.data) * CHAR_BIT; 1857 int32_t str_sz; 1858 int32_t len; 1859 int32_t i; 1860 int ret; 1861 1862 sset_info = mlx5_malloc(0, sizeof(struct ethtool_sset_info) + 1863 sizeof(uint32_t), 0, SOCKET_ID_ANY); 1864 if (sset_info == NULL) { 1865 rte_errno = ENOMEM; 1866 return -rte_errno; 1867 } 1868 sset_info->cmd = ETHTOOL_GSSET_INFO; 1869 sset_info->reserved = 0; 1870 sset_info->sset_mask = 1ULL << ETH_SS_PRIV_FLAGS; 1871 ifr.ifr_data = (caddr_t)&sset_info; 1872 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1873 if (!ret) { 1874 const uint32_t *sset_lengths = sset_info->data; 1875 1876 len = sset_info->sset_mask ? sset_lengths[0] : 0; 1877 } else if (ret == -EOPNOTSUPP) { 1878 drvinfo.cmd = ETHTOOL_GDRVINFO; 1879 ifr.ifr_data = (caddr_t)&drvinfo; 1880 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1881 if (ret) { 1882 DRV_LOG(WARNING, "port %u cannot get the driver info", 1883 dev->data->port_id); 1884 goto exit; 1885 } 1886 len = *(uint32_t *)((char *)&drvinfo + 1887 offsetof(struct ethtool_drvinfo, n_priv_flags)); 1888 } else { 1889 DRV_LOG(WARNING, "port %u cannot get the sset info", 1890 dev->data->port_id); 1891 goto exit; 1892 } 1893 if (!len) { 1894 DRV_LOG(WARNING, "port %u does not have private flag", 1895 dev->data->port_id); 1896 rte_errno = EOPNOTSUPP; 1897 ret = -rte_errno; 1898 goto exit; 1899 } else if (len > flag_len) { 1900 DRV_LOG(WARNING, "port %u maximal private flags number is %d", 1901 dev->data->port_id, flag_len); 1902 len = flag_len; 1903 } 1904 str_sz = ETH_GSTRING_LEN * len; 1905 strings = (struct ethtool_gstrings *) 1906 mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, 1907 SOCKET_ID_ANY); 1908 if (!strings) { 1909 DRV_LOG(WARNING, "port %u unable to allocate memory for" 1910 " private flags", dev->data->port_id); 1911 rte_errno = ENOMEM; 1912 ret = -rte_errno; 1913 goto exit; 1914 } 1915 strings->cmd = ETHTOOL_GSTRINGS; 1916 strings->string_set = ETH_SS_PRIV_FLAGS; 1917 strings->len = len; 1918 ifr.ifr_data = (caddr_t)strings; 1919 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1920 if (ret) { 1921 DRV_LOG(WARNING, "port %u unable to get private flags strings", 1922 dev->data->port_id); 1923 goto exit; 1924 } 1925 for (i = 0; i < len; i++) { 1926 strings->data[(i + 1) * ETH_GSTRING_LEN - 1] = 0; 1927 if (!strcmp((const char *)strings->data + i * ETH_GSTRING_LEN, 1928 "dropless_rq")) 1929 break; 1930 } 1931 if (i == len) { 1932 DRV_LOG(WARNING, "port %u does not support dropless_rq", 1933 dev->data->port_id); 1934 rte_errno = EOPNOTSUPP; 1935 ret = -rte_errno; 1936 goto exit; 1937 } 1938 flags.cmd = ETHTOOL_GPFLAGS; 1939 ifr.ifr_data = (caddr_t)&flags; 1940 ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1941 if (ret) { 1942 DRV_LOG(WARNING, "port %u unable to get private flags status", 1943 dev->data->port_id); 1944 goto exit; 1945 } 1946 ret = !!(flags.data & (1U << i)); 1947 exit: 1948 mlx5_free(strings); 1949 mlx5_free(sset_info); 1950 return ret; 1951 } 1952 1953 /** 1954 * Unmaps HCA PCI BAR from the current process address space. 1955 * 1956 * @param dev 1957 * Pointer to Ethernet device structure. 1958 */ 1959 void mlx5_txpp_unmap_hca_bar(struct rte_eth_dev *dev) 1960 { 1961 struct mlx5_proc_priv *ppriv = dev->process_private; 1962 1963 if (ppriv && ppriv->hca_bar) { 1964 rte_mem_unmap(ppriv->hca_bar, MLX5_ST_SZ_BYTES(initial_seg)); 1965 ppriv->hca_bar = NULL; 1966 } 1967 } 1968 1969 /** 1970 * Maps HCA PCI BAR to the current process address space. 1971 * Stores pointer in the process private structure allowing 1972 * to read internal and real time counter directly from the HW. 1973 * 1974 * @param dev 1975 * Pointer to Ethernet device structure. 1976 * 1977 * @return 1978 * 0 on success and not NULL pointer to mapped area in process structure. 1979 * negative otherwise and NULL pointer 1980 */ 1981 int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev) 1982 { 1983 struct mlx5_proc_priv *ppriv = dev->process_private; 1984 char pci_addr[PCI_PRI_STR_SIZE] = { 0 }; 1985 void *base, *expected = NULL; 1986 int fd, ret; 1987 1988 if (!ppriv) { 1989 rte_errno = ENOMEM; 1990 return -rte_errno; 1991 } 1992 if (ppriv->hca_bar) 1993 return 0; 1994 ret = mlx5_dev_to_pci_str(dev->device, pci_addr, sizeof(pci_addr)); 1995 if (ret < 0) 1996 return -rte_errno; 1997 /* Open PCI device resource 0 - HCA initialize segment */ 1998 MKSTR(name, "/sys/bus/pci/devices/%s/resource0", pci_addr); 1999 fd = open(name, O_RDWR | O_SYNC); 2000 if (fd == -1) { 2001 rte_errno = ENOTSUP; 2002 return -ENOTSUP; 2003 } 2004 base = rte_mem_map(NULL, MLX5_ST_SZ_BYTES(initial_seg), 2005 RTE_PROT_READ, RTE_MAP_SHARED, fd, 0); 2006 close(fd); 2007 if (!base) { 2008 rte_errno = ENOTSUP; 2009 return -ENOTSUP; 2010 } 2011 /* Check there is no concurrent mapping in other thread. */ 2012 if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected, 2013 base, false, 2014 __ATOMIC_RELAXED, __ATOMIC_RELAXED)) 2015 rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg)); 2016 return 0; 2017 } 2018 2019