11256805dSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 21256805dSOphir Munk * Copyright 2015 6WIND S.A. 31256805dSOphir Munk * Copyright 2015 Mellanox Technologies, Ltd 41256805dSOphir Munk */ 51256805dSOphir Munk 61256805dSOphir Munk #include <stddef.h> 71256805dSOphir Munk #include <inttypes.h> 81256805dSOphir Munk #include <unistd.h> 91256805dSOphir Munk #include <stdbool.h> 101256805dSOphir Munk #include <stdint.h> 111256805dSOphir Munk #include <stdio.h> 121256805dSOphir Munk #include <string.h> 131256805dSOphir Munk #include <stdlib.h> 141256805dSOphir Munk #include <errno.h> 151256805dSOphir Munk #include <dirent.h> 161256805dSOphir Munk #include <net/if.h> 171256805dSOphir Munk #include <sys/ioctl.h> 181256805dSOphir Munk #include <sys/socket.h> 191256805dSOphir Munk #include <netinet/in.h> 201256805dSOphir Munk #include <linux/ethtool.h> 211256805dSOphir Munk #include <linux/sockios.h> 221256805dSOphir Munk #include <fcntl.h> 231256805dSOphir Munk #include <stdalign.h> 241256805dSOphir Munk #include <sys/un.h> 251256805dSOphir Munk #include <time.h> 261256805dSOphir Munk 27df96fd0dSBruce Richardson #include <ethdev_driver.h> 281f37cb2bSDavid Marchand #include <bus_pci_driver.h> 291256805dSOphir Munk #include <rte_mbuf.h> 301256805dSOphir Munk #include <rte_common.h> 319b31fc90SViacheslav Ovsiienko #include <rte_eal_paging.h> 321256805dSOphir Munk #include <rte_interrupts.h> 331256805dSOphir Munk #include <rte_malloc.h> 341256805dSOphir Munk #include <rte_string_fns.h> 351256805dSOphir Munk #include <rte_rwlock.h> 361256805dSOphir Munk #include <rte_cycles.h> 371256805dSOphir Munk 381256805dSOphir Munk #include <mlx5_glue.h> 391256805dSOphir Munk #include <mlx5_devx_cmds.h> 401256805dSOphir Munk #include <mlx5_common.h> 412175c4dcSSuanming Mou #include <mlx5_malloc.h> 4217f95513SDmitry Kozlyuk #include <mlx5_nl.h> 431256805dSOphir Munk 441256805dSOphir Munk #include "mlx5.h" 451256805dSOphir Munk #include "mlx5_rxtx.h" 461256805dSOphir Munk #include "mlx5_utils.h" 471256805dSOphir Munk 481256805dSOphir Munk /* Supported speed values found in /usr/include/linux/ethtool.h */ 491256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseKR4_Full 501256805dSOphir Munk #define SUPPORTED_40000baseKR4_Full (1 << 23) 511256805dSOphir Munk #endif 521256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseCR4_Full 531256805dSOphir Munk #define SUPPORTED_40000baseCR4_Full (1 << 24) 541256805dSOphir Munk #endif 551256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseSR4_Full 561256805dSOphir Munk #define SUPPORTED_40000baseSR4_Full (1 << 25) 571256805dSOphir Munk #endif 581256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseLR4_Full 591256805dSOphir Munk #define SUPPORTED_40000baseLR4_Full (1 << 26) 601256805dSOphir Munk #endif 611256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseKR4_Full 621256805dSOphir Munk #define SUPPORTED_56000baseKR4_Full (1 << 27) 631256805dSOphir Munk #endif 641256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseCR4_Full 651256805dSOphir Munk #define SUPPORTED_56000baseCR4_Full (1 << 28) 661256805dSOphir Munk #endif 671256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseSR4_Full 681256805dSOphir Munk #define SUPPORTED_56000baseSR4_Full (1 << 29) 691256805dSOphir Munk #endif 701256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseLR4_Full 711256805dSOphir Munk #define SUPPORTED_56000baseLR4_Full (1 << 30) 721256805dSOphir Munk #endif 731256805dSOphir Munk 741256805dSOphir Munk /* Add defines in case the running kernel is not the same as user headers. */ 751256805dSOphir Munk #ifndef ETHTOOL_GLINKSETTINGS 761256805dSOphir Munk struct ethtool_link_settings { 771256805dSOphir Munk uint32_t cmd; 781256805dSOphir Munk uint32_t speed; 791256805dSOphir Munk uint8_t duplex; 801256805dSOphir Munk uint8_t port; 811256805dSOphir Munk uint8_t phy_address; 821256805dSOphir Munk uint8_t autoneg; 831256805dSOphir Munk uint8_t mdio_support; 841256805dSOphir Munk uint8_t eth_to_mdix; 851256805dSOphir Munk uint8_t eth_tp_mdix_ctrl; 861256805dSOphir Munk int8_t link_mode_masks_nwords; 871256805dSOphir Munk uint32_t reserved[8]; 881256805dSOphir Munk uint32_t link_mode_masks[]; 891256805dSOphir Munk }; 901256805dSOphir Munk 911256805dSOphir Munk /* The kernel values can be found in /include/uapi/linux/ethtool.h */ 921256805dSOphir Munk #define ETHTOOL_GLINKSETTINGS 0x0000004c 931256805dSOphir Munk #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 941256805dSOphir Munk #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 951256805dSOphir Munk #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 961256805dSOphir Munk #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 971256805dSOphir Munk #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 981256805dSOphir Munk #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 991256805dSOphir Munk #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 1001256805dSOphir Munk #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 1011256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 1021256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 1031256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 1041256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 1051256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 1061256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 1071256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 1081256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 1091256805dSOphir Munk #endif 1101256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_25G 1111256805dSOphir Munk #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 1121256805dSOphir Munk #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 1131256805dSOphir Munk #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 1141256805dSOphir Munk #endif 1151256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_50G 1161256805dSOphir Munk #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 1171256805dSOphir Munk #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 1181256805dSOphir Munk #endif 1191256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_100G 1201256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 1211256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 1221256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 1231256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 1241256805dSOphir Munk #endif 1251256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_200G 1261256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62 1271256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63 1281256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 0 /* 64 - 64 */ 1291256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 1 /* 65 - 64 */ 1301256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 2 /* 66 - 64 */ 1311256805dSOphir Munk #endif 1321256805dSOphir Munk 133919488fbSXueming Li /* Get interface index from SubFunction device name. */ 134919488fbSXueming Li int 135919488fbSXueming Li mlx5_auxiliary_get_ifindex(const char *sf_name) 136919488fbSXueming Li { 137919488fbSXueming Li char if_name[IF_NAMESIZE] = { 0 }; 138919488fbSXueming Li 139919488fbSXueming Li if (mlx5_auxiliary_get_child_name(sf_name, "/net", 140919488fbSXueming Li if_name, sizeof(if_name)) != 0) 141919488fbSXueming Li return -rte_errno; 142919488fbSXueming Li return if_nametoindex(if_name); 143919488fbSXueming Li } 1441256805dSOphir Munk 1451256805dSOphir Munk /** 1461256805dSOphir Munk * Get interface name from private structure. 1471256805dSOphir Munk * 148aec086c9SMatan Azrad * This is a port representor-aware version of mlx5_get_ifname_sysfs(). 1491256805dSOphir Munk * 1501256805dSOphir Munk * @param[in] dev 1511256805dSOphir Munk * Pointer to Ethernet device. 1521256805dSOphir Munk * @param[out] ifname 1531256805dSOphir Munk * Interface name output buffer. 1541256805dSOphir Munk * 1551256805dSOphir Munk * @return 1561256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 1571256805dSOphir Munk */ 1581256805dSOphir Munk int 15928743807STal Shnaiderman mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE]) 1601256805dSOphir Munk { 1611256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 1621256805dSOphir Munk unsigned int ifindex; 1631256805dSOphir Munk 1641256805dSOphir Munk MLX5_ASSERT(priv); 1651256805dSOphir Munk MLX5_ASSERT(priv->sh); 166f5f4c482SXueming Li if (priv->master && priv->sh->bond.ifindex > 0) { 167f5f4c482SXueming Li memcpy(ifname, priv->sh->bond.ifname, MLX5_NAMESIZE); 168c21e5facSXueming Li return 0; 169c21e5facSXueming Li } 1701256805dSOphir Munk ifindex = mlx5_ifindex(dev); 1711256805dSOphir Munk if (!ifindex) { 1721256805dSOphir Munk if (!priv->representor) 173aec086c9SMatan Azrad return mlx5_get_ifname_sysfs(priv->sh->ibdev_path, 174aec086c9SMatan Azrad *ifname); 1751256805dSOphir Munk rte_errno = ENXIO; 1761256805dSOphir Munk return -rte_errno; 1771256805dSOphir Munk } 1781256805dSOphir Munk if (if_indextoname(ifindex, &(*ifname)[0])) 1791256805dSOphir Munk return 0; 1801256805dSOphir Munk rte_errno = errno; 1811256805dSOphir Munk return -rte_errno; 1821256805dSOphir Munk } 1831256805dSOphir Munk 1841256805dSOphir Munk /** 1857ed15acdSXueming Li * Perform ifreq ioctl() on associated netdev ifname. 1867ed15acdSXueming Li * 1877ed15acdSXueming Li * @param[in] ifname 1887ed15acdSXueming Li * Pointer to netdev name. 1897ed15acdSXueming Li * @param req 1907ed15acdSXueming Li * Request number to pass to ioctl(). 1917ed15acdSXueming Li * @param[out] ifr 1927ed15acdSXueming Li * Interface request structure output buffer. 1937ed15acdSXueming Li * 1947ed15acdSXueming Li * @return 1957ed15acdSXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 1967ed15acdSXueming Li */ 1977ed15acdSXueming Li static int 1987ed15acdSXueming Li mlx5_ifreq_by_ifname(const char *ifname, int req, struct ifreq *ifr) 1997ed15acdSXueming Li { 2007ed15acdSXueming Li int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 2017ed15acdSXueming Li int ret = 0; 2027ed15acdSXueming Li 2037ed15acdSXueming Li if (sock == -1) { 2047ed15acdSXueming Li rte_errno = errno; 2057ed15acdSXueming Li return -rte_errno; 2067ed15acdSXueming Li } 2077ed15acdSXueming Li rte_strscpy(ifr->ifr_name, ifname, sizeof(ifr->ifr_name)); 2087ed15acdSXueming Li ret = ioctl(sock, req, ifr); 2097ed15acdSXueming Li if (ret == -1) { 2107ed15acdSXueming Li rte_errno = errno; 2117ed15acdSXueming Li goto error; 2127ed15acdSXueming Li } 2137ed15acdSXueming Li close(sock); 2147ed15acdSXueming Li return 0; 2157ed15acdSXueming Li error: 2167ed15acdSXueming Li close(sock); 2177ed15acdSXueming Li return -rte_errno; 2187ed15acdSXueming Li } 2197ed15acdSXueming Li 2207ed15acdSXueming Li /** 2211256805dSOphir Munk * Perform ifreq ioctl() on associated Ethernet device. 2221256805dSOphir Munk * 2231256805dSOphir Munk * @param[in] dev 2241256805dSOphir Munk * Pointer to Ethernet device. 2251256805dSOphir Munk * @param req 2261256805dSOphir Munk * Request number to pass to ioctl(). 2271256805dSOphir Munk * @param[out] ifr 2281256805dSOphir Munk * Interface request structure output buffer. 2291256805dSOphir Munk * 2301256805dSOphir Munk * @return 2311256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2321256805dSOphir Munk */ 23398c4b12aSOphir Munk static int 2341256805dSOphir Munk mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) 2351256805dSOphir Munk { 2367ed15acdSXueming Li char ifname[sizeof(ifr->ifr_name)]; 2377ed15acdSXueming Li int ret; 2381256805dSOphir Munk 2397ed15acdSXueming Li ret = mlx5_get_ifname(dev, &ifname); 2401256805dSOphir Munk if (ret) 2411256805dSOphir Munk return -rte_errno; 2427ed15acdSXueming Li return mlx5_ifreq_by_ifname(ifname, req, ifr); 2431256805dSOphir Munk } 2441256805dSOphir Munk 2451256805dSOphir Munk /** 2461256805dSOphir Munk * Get device MTU. 2471256805dSOphir Munk * 2481256805dSOphir Munk * @param dev 2491256805dSOphir Munk * Pointer to Ethernet device. 2501256805dSOphir Munk * @param[out] mtu 2511256805dSOphir Munk * MTU value output buffer. 2521256805dSOphir Munk * 2531256805dSOphir Munk * @return 2541256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2551256805dSOphir Munk */ 2561256805dSOphir Munk int 2571256805dSOphir Munk mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 2581256805dSOphir Munk { 2591256805dSOphir Munk struct ifreq request; 2601256805dSOphir Munk int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); 2611256805dSOphir Munk 2621256805dSOphir Munk if (ret) 2631256805dSOphir Munk return ret; 2641256805dSOphir Munk *mtu = request.ifr_mtu; 2651256805dSOphir Munk return 0; 2661256805dSOphir Munk } 2671256805dSOphir Munk 2681256805dSOphir Munk /** 2691256805dSOphir Munk * Set device MTU. 2701256805dSOphir Munk * 2711256805dSOphir Munk * @param dev 2721256805dSOphir Munk * Pointer to Ethernet device. 2731256805dSOphir Munk * @param mtu 2741256805dSOphir Munk * MTU value to set. 2751256805dSOphir Munk * 2761256805dSOphir Munk * @return 2771256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2781256805dSOphir Munk */ 2791256805dSOphir Munk int 2801256805dSOphir Munk mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2811256805dSOphir Munk { 2821256805dSOphir Munk struct ifreq request = { .ifr_mtu = mtu, }; 2831256805dSOphir Munk 2841256805dSOphir Munk return mlx5_ifreq(dev, SIOCSIFMTU, &request); 2851256805dSOphir Munk } 2861256805dSOphir Munk 2871256805dSOphir Munk /** 2881256805dSOphir Munk * Set device flags. 2891256805dSOphir Munk * 2901256805dSOphir Munk * @param dev 2911256805dSOphir Munk * Pointer to Ethernet device. 2921256805dSOphir Munk * @param keep 2931256805dSOphir Munk * Bitmask for flags that must remain untouched. 2941256805dSOphir Munk * @param flags 2951256805dSOphir Munk * Bitmask for flags to modify. 2961256805dSOphir Munk * 2971256805dSOphir Munk * @return 2981256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2991256805dSOphir Munk */ 30098c4b12aSOphir Munk static int 3011256805dSOphir Munk mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 3021256805dSOphir Munk { 3031256805dSOphir Munk struct ifreq request; 3041256805dSOphir Munk int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); 3051256805dSOphir Munk 3061256805dSOphir Munk if (ret) 3071256805dSOphir Munk return ret; 3081256805dSOphir Munk request.ifr_flags &= keep; 3091256805dSOphir Munk request.ifr_flags |= flags & ~keep; 3101256805dSOphir Munk return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); 3111256805dSOphir Munk } 3121256805dSOphir Munk 3131256805dSOphir Munk /** 3141256805dSOphir Munk * Get device current raw clock counter 3151256805dSOphir Munk * 3161256805dSOphir Munk * @param dev 3171256805dSOphir Munk * Pointer to Ethernet device structure. 3181256805dSOphir Munk * @param[out] time 3191256805dSOphir Munk * Current raw clock counter of the device. 3201256805dSOphir Munk * 3211256805dSOphir Munk * @return 3221256805dSOphir Munk * 0 if the clock has correctly been read 3231256805dSOphir Munk * The value of errno in case of error 3241256805dSOphir Munk */ 3251256805dSOphir Munk int 3261256805dSOphir Munk mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock) 3271256805dSOphir Munk { 3281256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 329ca1418ceSMichael Baum struct ibv_context *ctx = priv->sh->cdev->ctx; 3301256805dSOphir Munk struct ibv_values_ex values; 3311256805dSOphir Munk int err = 0; 3321256805dSOphir Munk 3331256805dSOphir Munk values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK; 3341256805dSOphir Munk err = mlx5_glue->query_rt_values_ex(ctx, &values); 3351256805dSOphir Munk if (err != 0) { 3361256805dSOphir Munk DRV_LOG(WARNING, "Could not query the clock !"); 3371256805dSOphir Munk return err; 3381256805dSOphir Munk } 3391256805dSOphir Munk *clock = values.raw_clock.tv_nsec; 3401256805dSOphir Munk return 0; 3411256805dSOphir Munk } 3421256805dSOphir Munk 3431256805dSOphir Munk /** 3441256805dSOphir Munk * Retrieve the master device for representor in the same switch domain. 3451256805dSOphir Munk * 3461256805dSOphir Munk * @param dev 3471256805dSOphir Munk * Pointer to representor Ethernet device structure. 3481256805dSOphir Munk * 3491256805dSOphir Munk * @return 3501256805dSOphir Munk * Master device structure on success, NULL otherwise. 3511256805dSOphir Munk */ 3521256805dSOphir Munk static struct rte_eth_dev * 3531256805dSOphir Munk mlx5_find_master_dev(struct rte_eth_dev *dev) 3541256805dSOphir Munk { 3551256805dSOphir Munk struct mlx5_priv *priv; 3561256805dSOphir Munk uint16_t port_id; 3571256805dSOphir Munk uint16_t domain_id; 3581256805dSOphir Munk 3591256805dSOphir Munk priv = dev->data->dev_private; 3601256805dSOphir Munk domain_id = priv->domain_id; 3611256805dSOphir Munk MLX5_ASSERT(priv->representor); 36256bb3c84SXueming Li MLX5_ETH_FOREACH_DEV(port_id, dev->device) { 3631256805dSOphir Munk struct mlx5_priv *opriv = 3641256805dSOphir Munk rte_eth_devices[port_id].data->dev_private; 3651256805dSOphir Munk if (opriv && 3661256805dSOphir Munk opriv->master && 3671256805dSOphir Munk opriv->domain_id == domain_id && 3681256805dSOphir Munk opriv->sh == priv->sh) 3691256805dSOphir Munk return &rte_eth_devices[port_id]; 3701256805dSOphir Munk } 3711256805dSOphir Munk return NULL; 3721256805dSOphir Munk } 3731256805dSOphir Munk 3741256805dSOphir Munk /** 3751256805dSOphir Munk * DPDK callback to retrieve physical link information. 3761256805dSOphir Munk * 3771256805dSOphir Munk * @param dev 3781256805dSOphir Munk * Pointer to Ethernet device structure. 3791256805dSOphir Munk * @param[out] link 3801256805dSOphir Munk * Storage for current link status. 3811256805dSOphir Munk * 3821256805dSOphir Munk * @return 3831256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 3841256805dSOphir Munk */ 3851256805dSOphir Munk static int 3861256805dSOphir Munk mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, 3871256805dSOphir Munk struct rte_eth_link *link) 3881256805dSOphir Munk { 3891256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 3901256805dSOphir Munk struct ethtool_cmd edata = { 3911256805dSOphir Munk .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 3921256805dSOphir Munk }; 3931256805dSOphir Munk struct ifreq ifr; 3941256805dSOphir Munk struct rte_eth_link dev_link; 3951256805dSOphir Munk int link_speed = 0; 3961256805dSOphir Munk int ret; 3971256805dSOphir Munk 3981256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 3991256805dSOphir Munk if (ret) { 4001256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 4011256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 4021256805dSOphir Munk return ret; 4031256805dSOphir Munk } 4041256805dSOphir Munk dev_link = (struct rte_eth_link) { 4051256805dSOphir Munk .link_status = ((ifr.ifr_flags & IFF_UP) && 4061256805dSOphir Munk (ifr.ifr_flags & IFF_RUNNING)), 4071256805dSOphir Munk }; 4081256805dSOphir Munk ifr = (struct ifreq) { 4091256805dSOphir Munk .ifr_data = (void *)&edata, 4101256805dSOphir Munk }; 4111256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 4121256805dSOphir Munk if (ret) { 4131256805dSOphir Munk if (ret == -ENOTSUP && priv->representor) { 4141256805dSOphir Munk struct rte_eth_dev *master; 4151256805dSOphir Munk 4161256805dSOphir Munk /* 4171256805dSOphir Munk * For representors we can try to inherit link 4181256805dSOphir Munk * settings from the master device. Actually 4191256805dSOphir Munk * link settings do not make a lot of sense 4201256805dSOphir Munk * for representors due to missing physical 4211256805dSOphir Munk * link. The old kernel drivers supported 4221256805dSOphir Munk * emulated settings query for representors, 4231256805dSOphir Munk * the new ones do not, so we have to add 4241256805dSOphir Munk * this code for compatibility issues. 4251256805dSOphir Munk */ 4261256805dSOphir Munk master = mlx5_find_master_dev(dev); 4271256805dSOphir Munk if (master) { 4281256805dSOphir Munk ifr = (struct ifreq) { 4291256805dSOphir Munk .ifr_data = (void *)&edata, 4301256805dSOphir Munk }; 4311256805dSOphir Munk ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 4321256805dSOphir Munk } 4331256805dSOphir Munk } 4341256805dSOphir Munk if (ret) { 4351256805dSOphir Munk DRV_LOG(WARNING, 4361256805dSOphir Munk "port %u ioctl(SIOCETHTOOL," 4371256805dSOphir Munk " ETHTOOL_GSET) failed: %s", 4381256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 4391256805dSOphir Munk return ret; 4401256805dSOphir Munk } 4411256805dSOphir Munk } 4421256805dSOphir Munk link_speed = ethtool_cmd_speed(&edata); 4431256805dSOphir Munk if (link_speed == -1) 444295968d1SFerruh Yigit dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 4451256805dSOphir Munk else 4461256805dSOphir Munk dev_link.link_speed = link_speed; 4471256805dSOphir Munk priv->link_speed_capa = 0; 4481256805dSOphir Munk if (edata.supported & (SUPPORTED_1000baseT_Full | 4491256805dSOphir Munk SUPPORTED_1000baseKX_Full)) 450295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G; 4511256805dSOphir Munk if (edata.supported & SUPPORTED_10000baseKR_Full) 452295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G; 4531256805dSOphir Munk if (edata.supported & (SUPPORTED_40000baseKR4_Full | 4541256805dSOphir Munk SUPPORTED_40000baseCR4_Full | 4551256805dSOphir Munk SUPPORTED_40000baseSR4_Full | 4561256805dSOphir Munk SUPPORTED_40000baseLR4_Full)) 457295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G; 4581256805dSOphir Munk dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 459295968d1SFerruh Yigit RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX); 4601256805dSOphir Munk dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 461295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_FIXED); 4621256805dSOphir Munk *link = dev_link; 4631256805dSOphir Munk return 0; 4641256805dSOphir Munk } 4651256805dSOphir Munk 4661256805dSOphir Munk /** 4671256805dSOphir Munk * Retrieve physical link information (unlocked version using new ioctl). 4681256805dSOphir Munk * 4691256805dSOphir Munk * @param dev 4701256805dSOphir Munk * Pointer to Ethernet device structure. 4711256805dSOphir Munk * @param[out] link 4721256805dSOphir Munk * Storage for current link status. 4731256805dSOphir Munk * 4741256805dSOphir Munk * @return 4751256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 4761256805dSOphir Munk */ 4771256805dSOphir Munk static int 4781256805dSOphir Munk mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, 4791256805dSOphir Munk struct rte_eth_link *link) 4801256805dSOphir Munk 4811256805dSOphir Munk { 4821256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 4831256805dSOphir Munk struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 4841256805dSOphir Munk struct ifreq ifr; 4851256805dSOphir Munk struct rte_eth_link dev_link; 4861256805dSOphir Munk struct rte_eth_dev *master = NULL; 4871256805dSOphir Munk uint64_t sc; 4881256805dSOphir Munk int ret; 4891256805dSOphir Munk 4901256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 4911256805dSOphir Munk if (ret) { 4921256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 4931256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 4941256805dSOphir Munk return ret; 4951256805dSOphir Munk } 4961256805dSOphir Munk dev_link = (struct rte_eth_link) { 4971256805dSOphir Munk .link_status = ((ifr.ifr_flags & IFF_UP) && 4981256805dSOphir Munk (ifr.ifr_flags & IFF_RUNNING)), 4991256805dSOphir Munk }; 5001256805dSOphir Munk ifr = (struct ifreq) { 5011256805dSOphir Munk .ifr_data = (void *)&gcmd, 5021256805dSOphir Munk }; 5031256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 5041256805dSOphir Munk if (ret) { 5051256805dSOphir Munk if (ret == -ENOTSUP && priv->representor) { 5061256805dSOphir Munk /* 5071256805dSOphir Munk * For representors we can try to inherit link 5081256805dSOphir Munk * settings from the master device. Actually 5091256805dSOphir Munk * link settings do not make a lot of sense 5101256805dSOphir Munk * for representors due to missing physical 5111256805dSOphir Munk * link. The old kernel drivers supported 5121256805dSOphir Munk * emulated settings query for representors, 5131256805dSOphir Munk * the new ones do not, so we have to add 5141256805dSOphir Munk * this code for compatibility issues. 5151256805dSOphir Munk */ 5161256805dSOphir Munk master = mlx5_find_master_dev(dev); 5171256805dSOphir Munk if (master) { 5181256805dSOphir Munk ifr = (struct ifreq) { 5191256805dSOphir Munk .ifr_data = (void *)&gcmd, 5201256805dSOphir Munk }; 5211256805dSOphir Munk ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 5221256805dSOphir Munk } 5231256805dSOphir Munk } 5241256805dSOphir Munk if (ret) { 5251256805dSOphir Munk DRV_LOG(DEBUG, 5261256805dSOphir Munk "port %u ioctl(SIOCETHTOOL," 5271256805dSOphir Munk " ETHTOOL_GLINKSETTINGS) failed: %s", 5281256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 5291256805dSOphir Munk return ret; 5301256805dSOphir Munk } 5311256805dSOphir Munk } 5321256805dSOphir Munk gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 5331256805dSOphir Munk 5341256805dSOphir Munk alignas(struct ethtool_link_settings) 5351256805dSOphir Munk uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 5361256805dSOphir Munk sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 5371256805dSOphir Munk struct ethtool_link_settings *ecmd = (void *)data; 5381256805dSOphir Munk 5391256805dSOphir Munk *ecmd = gcmd; 5401256805dSOphir Munk ifr.ifr_data = (void *)ecmd; 5411256805dSOphir Munk ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr); 5421256805dSOphir Munk if (ret) { 5431256805dSOphir Munk DRV_LOG(DEBUG, 5441256805dSOphir Munk "port %u ioctl(SIOCETHTOOL," 5451256805dSOphir Munk "ETHTOOL_GLINKSETTINGS) failed: %s", 5461256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 5471256805dSOphir Munk return ret; 5481256805dSOphir Munk } 5491688c580SBenoît Ganne dev_link.link_speed = (ecmd->speed == UINT32_MAX) ? 550295968d1SFerruh Yigit RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed; 5511256805dSOphir Munk sc = ecmd->link_mode_masks[0] | 5521256805dSOphir Munk ((uint64_t)ecmd->link_mode_masks[1] << 32); 5531256805dSOphir Munk priv->link_speed_capa = 0; 5541256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 5551256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 556295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_1G; 5571256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 5581256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 5591256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 560295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_10G; 5611256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 5621256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 563295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_20G; 5641256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 5651256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 5661256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 5671256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 568295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_40G; 5691256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 5701256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 5711256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 5721256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 573295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_56G; 5741256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 5751256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 5761256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 577295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_25G; 5781256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 5791256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 580295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_50G; 5811256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 5821256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 5831256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 5841256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 585295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_100G; 5861256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) | 5871256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT))) 588295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G; 5891256805dSOphir Munk 5901256805dSOphir Munk sc = ecmd->link_mode_masks[2] | 5911256805dSOphir Munk ((uint64_t)ecmd->link_mode_masks[3] << 32); 5921256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT) | 5931256805dSOphir Munk MLX5_BITSHIFT 5941256805dSOphir Munk (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) | 5951256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT))) 596295968d1SFerruh Yigit priv->link_speed_capa |= RTE_ETH_LINK_SPEED_200G; 5971256805dSOphir Munk dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 598295968d1SFerruh Yigit RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX); 5991256805dSOphir Munk dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 600295968d1SFerruh Yigit RTE_ETH_LINK_SPEED_FIXED); 6011256805dSOphir Munk *link = dev_link; 6021256805dSOphir Munk return 0; 6031256805dSOphir Munk } 6041256805dSOphir Munk 6051256805dSOphir Munk /** 6061256805dSOphir Munk * DPDK callback to retrieve physical link information. 6071256805dSOphir Munk * 6081256805dSOphir Munk * @param dev 6091256805dSOphir Munk * Pointer to Ethernet device structure. 6101256805dSOphir Munk * @param wait_to_complete 6111256805dSOphir Munk * Wait for request completion. 6121256805dSOphir Munk * 6131256805dSOphir Munk * @return 6141256805dSOphir Munk * 0 if link status was not updated, positive if it was, a negative errno 6151256805dSOphir Munk * value otherwise and rte_errno is set. 6161256805dSOphir Munk */ 6171256805dSOphir Munk int 6181256805dSOphir Munk mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) 6191256805dSOphir Munk { 6201256805dSOphir Munk int ret; 6211256805dSOphir Munk struct rte_eth_link dev_link; 6221256805dSOphir Munk time_t start_time = time(NULL); 6231256805dSOphir Munk int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT; 6241256805dSOphir Munk 6251256805dSOphir Munk do { 6261256805dSOphir Munk ret = mlx5_link_update_unlocked_gs(dev, &dev_link); 6271256805dSOphir Munk if (ret == -ENOTSUP) 6281256805dSOphir Munk ret = mlx5_link_update_unlocked_gset(dev, &dev_link); 6291256805dSOphir Munk if (ret == 0) 6301256805dSOphir Munk break; 6311256805dSOphir Munk /* Handle wait to complete situation. */ 6321256805dSOphir Munk if ((wait_to_complete || retry) && ret == -EAGAIN) { 6331256805dSOphir Munk if (abs((int)difftime(time(NULL), start_time)) < 6341256805dSOphir Munk MLX5_LINK_STATUS_TIMEOUT) { 6351256805dSOphir Munk usleep(0); 6361256805dSOphir Munk continue; 6371256805dSOphir Munk } else { 6381256805dSOphir Munk rte_errno = EBUSY; 6391256805dSOphir Munk return -rte_errno; 6401256805dSOphir Munk } 6411256805dSOphir Munk } else if (ret < 0) { 6421256805dSOphir Munk return ret; 6431256805dSOphir Munk } 6441256805dSOphir Munk } while (wait_to_complete || retry-- > 0); 6451256805dSOphir Munk ret = !!memcmp(&dev->data->dev_link, &dev_link, 6461256805dSOphir Munk sizeof(struct rte_eth_link)); 6471256805dSOphir Munk dev->data->dev_link = dev_link; 6481256805dSOphir Munk return ret; 6491256805dSOphir Munk } 6501256805dSOphir Munk 6511256805dSOphir Munk /** 6521256805dSOphir Munk * DPDK callback to get flow control status. 6531256805dSOphir Munk * 6541256805dSOphir Munk * @param dev 6551256805dSOphir Munk * Pointer to Ethernet device structure. 6561256805dSOphir Munk * @param[out] fc_conf 6571256805dSOphir Munk * Flow control output buffer. 6581256805dSOphir Munk * 6591256805dSOphir Munk * @return 6601256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 6611256805dSOphir Munk */ 6621256805dSOphir Munk int 6631256805dSOphir Munk mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6641256805dSOphir Munk { 6651256805dSOphir Munk struct ifreq ifr; 6661256805dSOphir Munk struct ethtool_pauseparam ethpause = { 6671256805dSOphir Munk .cmd = ETHTOOL_GPAUSEPARAM 6681256805dSOphir Munk }; 6691256805dSOphir Munk int ret; 6701256805dSOphir Munk 6711256805dSOphir Munk ifr.ifr_data = (void *)ðpause; 6721256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 6731256805dSOphir Munk if (ret) { 6741256805dSOphir Munk DRV_LOG(WARNING, 6751256805dSOphir Munk "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 6761256805dSOphir Munk " %s", 6771256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 6781256805dSOphir Munk return ret; 6791256805dSOphir Munk } 6801256805dSOphir Munk fc_conf->autoneg = ethpause.autoneg; 6811256805dSOphir Munk if (ethpause.rx_pause && ethpause.tx_pause) 682295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_FULL; 6831256805dSOphir Munk else if (ethpause.rx_pause) 684295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 6851256805dSOphir Munk else if (ethpause.tx_pause) 686295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 6871256805dSOphir Munk else 688295968d1SFerruh Yigit fc_conf->mode = RTE_ETH_FC_NONE; 6891256805dSOphir Munk return 0; 6901256805dSOphir Munk } 6911256805dSOphir Munk 6921256805dSOphir Munk /** 6931256805dSOphir Munk * DPDK callback to modify flow control parameters. 6941256805dSOphir Munk * 6951256805dSOphir Munk * @param dev 6961256805dSOphir Munk * Pointer to Ethernet device structure. 6971256805dSOphir Munk * @param[in] fc_conf 6981256805dSOphir Munk * Flow control parameters. 6991256805dSOphir Munk * 7001256805dSOphir Munk * @return 7011256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 7021256805dSOphir Munk */ 7031256805dSOphir Munk int 7041256805dSOphir Munk mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 7051256805dSOphir Munk { 7061256805dSOphir Munk struct ifreq ifr; 7071256805dSOphir Munk struct ethtool_pauseparam ethpause = { 7081256805dSOphir Munk .cmd = ETHTOOL_SPAUSEPARAM 7091256805dSOphir Munk }; 7101256805dSOphir Munk int ret; 7111256805dSOphir Munk 7121256805dSOphir Munk ifr.ifr_data = (void *)ðpause; 7131256805dSOphir Munk ethpause.autoneg = fc_conf->autoneg; 714295968d1SFerruh Yigit if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) || 715295968d1SFerruh Yigit (fc_conf->mode & RTE_ETH_FC_RX_PAUSE)) 7161256805dSOphir Munk ethpause.rx_pause = 1; 7171256805dSOphir Munk else 7181256805dSOphir Munk ethpause.rx_pause = 0; 7191256805dSOphir Munk 720295968d1SFerruh Yigit if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) || 721295968d1SFerruh Yigit (fc_conf->mode & RTE_ETH_FC_TX_PAUSE)) 7221256805dSOphir Munk ethpause.tx_pause = 1; 7231256805dSOphir Munk else 7241256805dSOphir Munk ethpause.tx_pause = 0; 7251256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 7261256805dSOphir Munk if (ret) { 7271256805dSOphir Munk DRV_LOG(WARNING, 7281256805dSOphir Munk "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 7291256805dSOphir Munk " failed: %s", 7301256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 7311256805dSOphir Munk return ret; 7321256805dSOphir Munk } 7331256805dSOphir Munk return 0; 7341256805dSOphir Munk } 7351256805dSOphir Munk 7361256805dSOphir Munk /** 7371256805dSOphir Munk * Handle asynchronous removal event for entire multiport device. 7381256805dSOphir Munk * 7391256805dSOphir Munk * @param sh 7401256805dSOphir Munk * Infiniband device shared context. 7411256805dSOphir Munk */ 7421256805dSOphir Munk static void 7431256805dSOphir Munk mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) 7441256805dSOphir Munk { 7451256805dSOphir Munk uint32_t i; 7461256805dSOphir Munk 7471256805dSOphir Munk for (i = 0; i < sh->max_port; ++i) { 7481256805dSOphir Munk struct rte_eth_dev *dev; 74922dc56cfSViacheslav Ovsiienko struct mlx5_priv *priv; 7501256805dSOphir Munk 7511256805dSOphir Munk if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { 7521256805dSOphir Munk /* 7531256805dSOphir Munk * Or not existing port either no 7541256805dSOphir Munk * handler installed for this port. 7551256805dSOphir Munk */ 7561256805dSOphir Munk continue; 7571256805dSOphir Munk } 7581256805dSOphir Munk dev = &rte_eth_devices[sh->port[i].ih_port_id]; 7591256805dSOphir Munk MLX5_ASSERT(dev); 76022dc56cfSViacheslav Ovsiienko priv = dev->data->dev_private; 76122dc56cfSViacheslav Ovsiienko MLX5_ASSERT(priv); 76222dc56cfSViacheslav Ovsiienko if (!priv->rmv_notified && dev->data->dev_conf.intr_conf.rmv) { 76322dc56cfSViacheslav Ovsiienko /* Notify driver about removal only once. */ 76422dc56cfSViacheslav Ovsiienko priv->rmv_notified = 1; 7655723fbedSFerruh Yigit rte_eth_dev_callback_process 7661256805dSOphir Munk (dev, RTE_ETH_EVENT_INTR_RMV, NULL); 7671256805dSOphir Munk } 7681256805dSOphir Munk } 76922dc56cfSViacheslav Ovsiienko } 7701256805dSOphir Munk 771*53d1e65cSHaifei Luo static bool 772*53d1e65cSHaifei Luo mlx5_dev_nl_ifindex_verify(uint32_t if_index, struct mlx5_priv *priv) 773*53d1e65cSHaifei Luo { 774*53d1e65cSHaifei Luo struct mlx5_bond_info *bond = &priv->sh->bond; 775*53d1e65cSHaifei Luo int i; 776*53d1e65cSHaifei Luo 777*53d1e65cSHaifei Luo if (bond->n_port == 0) 778*53d1e65cSHaifei Luo return (if_index == priv->if_index); 779*53d1e65cSHaifei Luo 780*53d1e65cSHaifei Luo if (if_index == bond->ifindex) 781*53d1e65cSHaifei Luo return true; 782*53d1e65cSHaifei Luo for (i = 0; i < bond->n_port; i++) { 783*53d1e65cSHaifei Luo if (i >= MLX5_BOND_MAX_PORTS) 784*53d1e65cSHaifei Luo return false; 785*53d1e65cSHaifei Luo if (if_index == bond->ports[i].ifindex) 786*53d1e65cSHaifei Luo return true; 787*53d1e65cSHaifei Luo } 788*53d1e65cSHaifei Luo 789*53d1e65cSHaifei Luo return false; 790*53d1e65cSHaifei Luo } 791*53d1e65cSHaifei Luo 792*53d1e65cSHaifei Luo static void 793*53d1e65cSHaifei Luo mlx5_link_update_bond(struct rte_eth_dev *dev) 794*53d1e65cSHaifei Luo { 795*53d1e65cSHaifei Luo struct mlx5_priv *priv = dev->data->dev_private; 796*53d1e65cSHaifei Luo struct mlx5_bond_info *bond = &priv->sh->bond; 797*53d1e65cSHaifei Luo struct ifreq ifr = (struct ifreq) { 798*53d1e65cSHaifei Luo .ifr_flags = 0, 799*53d1e65cSHaifei Luo }; 800*53d1e65cSHaifei Luo int ret; 801*53d1e65cSHaifei Luo 802*53d1e65cSHaifei Luo ret = mlx5_ifreq_by_ifname(bond->ifname, SIOCGIFFLAGS, &ifr); 803*53d1e65cSHaifei Luo if (ret) { 804*53d1e65cSHaifei Luo DRV_LOG(WARNING, "ifname %s ioctl(SIOCGIFFLAGS) failed: %s", 805*53d1e65cSHaifei Luo bond->ifname, strerror(rte_errno)); 806*53d1e65cSHaifei Luo return; 807*53d1e65cSHaifei Luo } 808*53d1e65cSHaifei Luo dev->data->dev_link.link_status = 809*53d1e65cSHaifei Luo ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); 810*53d1e65cSHaifei Luo } 811*53d1e65cSHaifei Luo 81217f95513SDmitry Kozlyuk static void 81317f95513SDmitry Kozlyuk mlx5_dev_interrupt_nl_cb(struct nlmsghdr *hdr, void *cb_arg) 81417f95513SDmitry Kozlyuk { 81517f95513SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = cb_arg; 81617f95513SDmitry Kozlyuk uint32_t i; 81717f95513SDmitry Kozlyuk uint32_t if_index; 81817f95513SDmitry Kozlyuk 81917f95513SDmitry Kozlyuk if (mlx5_nl_parse_link_status_update(hdr, &if_index) < 0) 82017f95513SDmitry Kozlyuk return; 82117f95513SDmitry Kozlyuk for (i = 0; i < sh->max_port; i++) { 82217f95513SDmitry Kozlyuk struct mlx5_dev_shared_port *port = &sh->port[i]; 82317f95513SDmitry Kozlyuk struct rte_eth_dev *dev; 82417f95513SDmitry Kozlyuk struct mlx5_priv *priv; 82517f95513SDmitry Kozlyuk 82617f95513SDmitry Kozlyuk if (port->nl_ih_port_id >= RTE_MAX_ETHPORTS) 82717f95513SDmitry Kozlyuk continue; 82817f95513SDmitry Kozlyuk dev = &rte_eth_devices[port->nl_ih_port_id]; 82917f95513SDmitry Kozlyuk /* Probing may initiate an LSC before configuration is done. */ 83017f95513SDmitry Kozlyuk if (dev->data->dev_configured && 83117f95513SDmitry Kozlyuk !dev->data->dev_conf.intr_conf.lsc) 83217f95513SDmitry Kozlyuk break; 83317f95513SDmitry Kozlyuk priv = dev->data->dev_private; 834*53d1e65cSHaifei Luo if (mlx5_dev_nl_ifindex_verify(if_index, priv)) { 83517f95513SDmitry Kozlyuk /* Block logical LSC events. */ 83617f95513SDmitry Kozlyuk uint16_t prev_status = dev->data->dev_link.link_status; 83717f95513SDmitry Kozlyuk 838*53d1e65cSHaifei Luo if (mlx5_link_update(dev, 0) < 0) { 83917f95513SDmitry Kozlyuk DRV_LOG(ERR, "Failed to update link status: %s", 84017f95513SDmitry Kozlyuk rte_strerror(rte_errno)); 841*53d1e65cSHaifei Luo } else { 842*53d1e65cSHaifei Luo if (priv->sh->bond.n_port) 843*53d1e65cSHaifei Luo mlx5_link_update_bond(dev); 844*53d1e65cSHaifei Luo if (prev_status != dev->data->dev_link.link_status) 84517f95513SDmitry Kozlyuk rte_eth_dev_callback_process 84617f95513SDmitry Kozlyuk (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 847*53d1e65cSHaifei Luo } 84817f95513SDmitry Kozlyuk break; 84917f95513SDmitry Kozlyuk } 85017f95513SDmitry Kozlyuk } 85117f95513SDmitry Kozlyuk } 85217f95513SDmitry Kozlyuk 85317f95513SDmitry Kozlyuk void 85417f95513SDmitry Kozlyuk mlx5_dev_interrupt_handler_nl(void *arg) 85517f95513SDmitry Kozlyuk { 85617f95513SDmitry Kozlyuk struct mlx5_dev_ctx_shared *sh = arg; 85717f95513SDmitry Kozlyuk int nlsk_fd = rte_intr_fd_get(sh->intr_handle_nl); 85817f95513SDmitry Kozlyuk 85917f95513SDmitry Kozlyuk if (nlsk_fd < 0) 86017f95513SDmitry Kozlyuk return; 86117f95513SDmitry Kozlyuk if (mlx5_nl_read_events(nlsk_fd, mlx5_dev_interrupt_nl_cb, sh) < 0) 86217f95513SDmitry Kozlyuk DRV_LOG(ERR, "Failed to process Netlink events: %s", 86317f95513SDmitry Kozlyuk rte_strerror(rte_errno)); 86417f95513SDmitry Kozlyuk } 86517f95513SDmitry Kozlyuk 8661256805dSOphir Munk /** 8671256805dSOphir Munk * Handle shared asynchronous events the NIC (removal event 8681256805dSOphir Munk * and link status change). Supports multiport IB device. 8691256805dSOphir Munk * 8701256805dSOphir Munk * @param cb_arg 8711256805dSOphir Munk * Callback argument. 8721256805dSOphir Munk */ 8731256805dSOphir Munk void 8741256805dSOphir Munk mlx5_dev_interrupt_handler(void *cb_arg) 8751256805dSOphir Munk { 8761256805dSOphir Munk struct mlx5_dev_ctx_shared *sh = cb_arg; 8771256805dSOphir Munk struct ibv_async_event event; 8781256805dSOphir Munk 8791256805dSOphir Munk /* Read all message from the IB device and acknowledge them. */ 8801256805dSOphir Munk for (;;) { 8811256805dSOphir Munk struct rte_eth_dev *dev; 8821256805dSOphir Munk uint32_t tmp; 8831256805dSOphir Munk 88422dc56cfSViacheslav Ovsiienko if (mlx5_glue->get_async_event(sh->cdev->ctx, &event)) { 88522dc56cfSViacheslav Ovsiienko if (errno == EIO) { 88622dc56cfSViacheslav Ovsiienko DRV_LOG(DEBUG, 88722dc56cfSViacheslav Ovsiienko "IBV async event queue closed on: %s", 88822dc56cfSViacheslav Ovsiienko sh->ibdev_name); 8891256805dSOphir Munk mlx5_dev_interrupt_device_fatal(sh); 89022dc56cfSViacheslav Ovsiienko } 89122dc56cfSViacheslav Ovsiienko break; 89222dc56cfSViacheslav Ovsiienko } 89322dc56cfSViacheslav Ovsiienko if (event.event_type == IBV_EVENT_DEVICE_FATAL) { 89422dc56cfSViacheslav Ovsiienko /* 89522dc56cfSViacheslav Ovsiienko * The DEVICE_FATAL event can be called by kernel 89622dc56cfSViacheslav Ovsiienko * twice - from mlx5 and uverbs layers, and port 89722dc56cfSViacheslav Ovsiienko * index is not applicable. We should notify all 89822dc56cfSViacheslav Ovsiienko * existing ports. 89922dc56cfSViacheslav Ovsiienko */ 90022dc56cfSViacheslav Ovsiienko mlx5_dev_interrupt_device_fatal(sh); 90122dc56cfSViacheslav Ovsiienko mlx5_glue->ack_async_event(&event); 9021256805dSOphir Munk continue; 9031256805dSOphir Munk } 90422dc56cfSViacheslav Ovsiienko /* Retrieve and check IB port index. */ 90522dc56cfSViacheslav Ovsiienko tmp = (uint32_t)event.element.port_num; 90622dc56cfSViacheslav Ovsiienko MLX5_ASSERT(tmp <= sh->max_port); 9071256805dSOphir Munk if (!tmp) { 9081256805dSOphir Munk /* Unsupported device level event. */ 9091256805dSOphir Munk mlx5_glue->ack_async_event(&event); 9101256805dSOphir Munk DRV_LOG(DEBUG, 9111256805dSOphir Munk "unsupported common event (type %d)", 9121256805dSOphir Munk event.event_type); 9131256805dSOphir Munk continue; 9141256805dSOphir Munk } 9151256805dSOphir Munk if (tmp > sh->max_port) { 9161256805dSOphir Munk /* Invalid IB port index. */ 9171256805dSOphir Munk mlx5_glue->ack_async_event(&event); 9181256805dSOphir Munk DRV_LOG(DEBUG, 9191256805dSOphir Munk "cannot handle an event (type %d)" 9201256805dSOphir Munk "due to invalid IB port index (%u)", 9211256805dSOphir Munk event.event_type, tmp); 9221256805dSOphir Munk continue; 9231256805dSOphir Munk } 9241256805dSOphir Munk if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { 9251256805dSOphir Munk /* No handler installed. */ 9261256805dSOphir Munk mlx5_glue->ack_async_event(&event); 9271256805dSOphir Munk DRV_LOG(DEBUG, 9281256805dSOphir Munk "cannot handle an event (type %d)" 9291256805dSOphir Munk "due to no handler installed for port %u", 9301256805dSOphir Munk event.event_type, tmp); 9311256805dSOphir Munk continue; 9321256805dSOphir Munk } 9331256805dSOphir Munk /* Retrieve ethernet device descriptor. */ 9341256805dSOphir Munk tmp = sh->port[tmp - 1].ih_port_id; 9351256805dSOphir Munk dev = &rte_eth_devices[tmp]; 9361256805dSOphir Munk MLX5_ASSERT(dev); 9371256805dSOphir Munk DRV_LOG(DEBUG, 9381256805dSOphir Munk "port %u cannot handle an unknown event (type %d)", 9391256805dSOphir Munk dev->data->port_id, event.event_type); 9401256805dSOphir Munk mlx5_glue->ack_async_event(&event); 9411256805dSOphir Munk } 9421256805dSOphir Munk } 9431256805dSOphir Munk 9441256805dSOphir Munk /** 9451256805dSOphir Munk * Handle DEVX interrupts from the NIC. 9461256805dSOphir Munk * This function is probably called from the DPDK host thread. 9471256805dSOphir Munk * 9481256805dSOphir Munk * @param cb_arg 9491256805dSOphir Munk * Callback argument. 9501256805dSOphir Munk */ 9511256805dSOphir Munk void 9521256805dSOphir Munk mlx5_dev_interrupt_handler_devx(void *cb_arg) 9531256805dSOphir Munk { 9541256805dSOphir Munk #ifndef HAVE_IBV_DEVX_ASYNC 9551256805dSOphir Munk (void)cb_arg; 9561256805dSOphir Munk return; 9571256805dSOphir Munk #else 9581256805dSOphir Munk struct mlx5_dev_ctx_shared *sh = cb_arg; 9591256805dSOphir Munk union { 9601256805dSOphir Munk struct mlx5dv_devx_async_cmd_hdr cmd_resp; 9611256805dSOphir Munk uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) + 9621256805dSOphir Munk MLX5_ST_SZ_BYTES(traffic_counter) + 9631256805dSOphir Munk sizeof(struct mlx5dv_devx_async_cmd_hdr)]; 9641256805dSOphir Munk } out; 9651256805dSOphir Munk uint8_t *buf = out.buf + sizeof(out.cmd_resp); 9661256805dSOphir Munk 9671256805dSOphir Munk while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp, 9681256805dSOphir Munk &out.cmd_resp, 9691256805dSOphir Munk sizeof(out.buf))) 9701256805dSOphir Munk mlx5_flow_async_pool_query_handle 9711256805dSOphir Munk (sh, (uint64_t)out.cmd_resp.wr_id, 9721256805dSOphir Munk mlx5_devx_get_out_command_status(buf)); 9731256805dSOphir Munk #endif /* HAVE_IBV_DEVX_ASYNC */ 9741256805dSOphir Munk } 9751256805dSOphir Munk 9761256805dSOphir Munk /** 9771256805dSOphir Munk * DPDK callback to bring the link DOWN. 9781256805dSOphir Munk * 9791256805dSOphir Munk * @param dev 9801256805dSOphir Munk * Pointer to Ethernet device structure. 9811256805dSOphir Munk * 9821256805dSOphir Munk * @return 9831256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 9841256805dSOphir Munk */ 9851256805dSOphir Munk int 9861256805dSOphir Munk mlx5_set_link_down(struct rte_eth_dev *dev) 9871256805dSOphir Munk { 9881256805dSOphir Munk return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 9891256805dSOphir Munk } 9901256805dSOphir Munk 9911256805dSOphir Munk /** 9921256805dSOphir Munk * DPDK callback to bring the link UP. 9931256805dSOphir Munk * 9941256805dSOphir Munk * @param dev 9951256805dSOphir Munk * Pointer to Ethernet device structure. 9961256805dSOphir Munk * 9971256805dSOphir Munk * @return 9981256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 9991256805dSOphir Munk */ 10001256805dSOphir Munk int 10011256805dSOphir Munk mlx5_set_link_up(struct rte_eth_dev *dev) 10021256805dSOphir Munk { 10031256805dSOphir Munk return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 10041256805dSOphir Munk } 10051256805dSOphir Munk 10061256805dSOphir Munk /** 10071256805dSOphir Munk * Check if mlx5 device was removed. 10081256805dSOphir Munk * 10091256805dSOphir Munk * @param dev 10101256805dSOphir Munk * Pointer to Ethernet device structure. 10111256805dSOphir Munk * 10121256805dSOphir Munk * @return 10131256805dSOphir Munk * 1 when device is removed, otherwise 0. 10141256805dSOphir Munk */ 10151256805dSOphir Munk int 10161256805dSOphir Munk mlx5_is_removed(struct rte_eth_dev *dev) 10171256805dSOphir Munk { 10181256805dSOphir Munk struct ibv_device_attr device_attr; 10191256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 10201256805dSOphir Munk 1021ca1418ceSMichael Baum if (mlx5_glue->query_device(priv->sh->cdev->ctx, &device_attr) == EIO) 10221256805dSOphir Munk return 1; 10231256805dSOphir Munk return 0; 10241256805dSOphir Munk } 10251256805dSOphir Munk 10261256805dSOphir Munk /** 1027ef9ee13fSOphir Munk * Analyze gathered port parameters via sysfs to recognize master 1028ef9ee13fSOphir Munk * and representor devices for E-Switch configuration. 1029ef9ee13fSOphir Munk * 1030ef9ee13fSOphir Munk * @param[in] device_dir 1031ef9ee13fSOphir Munk * flag of presence of "device" directory under port device key. 1032ef9ee13fSOphir Munk * @param[inout] switch_info 1033ef9ee13fSOphir Munk * Port information, including port name as a number and port name 1034ef9ee13fSOphir Munk * type if recognized 1035ef9ee13fSOphir Munk * 1036ef9ee13fSOphir Munk * @return 1037ef9ee13fSOphir Munk * master and representor flags are set in switch_info according to 1038ef9ee13fSOphir Munk * recognized parameters (if any). 1039ef9ee13fSOphir Munk */ 1040ef9ee13fSOphir Munk static void 1041ef9ee13fSOphir Munk mlx5_sysfs_check_switch_info(bool device_dir, 1042ef9ee13fSOphir Munk struct mlx5_switch_info *switch_info) 1043ef9ee13fSOphir Munk { 1044ef9ee13fSOphir Munk switch (switch_info->name_type) { 1045ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: 1046ef9ee13fSOphir Munk /* 1047ef9ee13fSOphir Munk * Name is not recognized, assume the master, 1048ef9ee13fSOphir Munk * check the device directory presence. 1049ef9ee13fSOphir Munk */ 1050ef9ee13fSOphir Munk switch_info->master = device_dir; 1051ef9ee13fSOphir Munk break; 1052ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: 1053ef9ee13fSOphir Munk /* 1054ef9ee13fSOphir Munk * Name is not set, this assumes the legacy naming 1055ef9ee13fSOphir Munk * schema for master, just check if there is 1056ef9ee13fSOphir Munk * a device directory. 1057ef9ee13fSOphir Munk */ 1058ef9ee13fSOphir Munk switch_info->master = device_dir; 1059ef9ee13fSOphir Munk break; 1060ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 1061ef9ee13fSOphir Munk /* New uplink naming schema recognized. */ 1062ef9ee13fSOphir Munk switch_info->master = 1; 1063ef9ee13fSOphir Munk break; 1064ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: 1065ef9ee13fSOphir Munk /* Legacy representors naming schema. */ 1066ef9ee13fSOphir Munk switch_info->representor = !device_dir; 1067ef9ee13fSOphir Munk break; 1068ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: 1069ef9ee13fSOphir Munk /* Fallthrough */ 1070ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 1071cb95feefSXueming Li /* Fallthrough */ 1072cb95feefSXueming Li case MLX5_PHYS_PORT_NAME_TYPE_PFSF: 1073ef9ee13fSOphir Munk /* New representors naming schema. */ 1074ef9ee13fSOphir Munk switch_info->representor = 1; 1075ef9ee13fSOphir Munk break; 107659df97f1SXueming Li default: 107759df97f1SXueming Li switch_info->master = device_dir; 107859df97f1SXueming Li break; 1079ef9ee13fSOphir Munk } 1080ef9ee13fSOphir Munk } 1081ef9ee13fSOphir Munk 1082ef9ee13fSOphir Munk /** 10831256805dSOphir Munk * Get switch information associated with network interface. 10841256805dSOphir Munk * 10851256805dSOphir Munk * @param ifindex 10861256805dSOphir Munk * Network interface index. 10871256805dSOphir Munk * @param[out] info 10881256805dSOphir Munk * Switch information object, populated in case of success. 10891256805dSOphir Munk * 10901256805dSOphir Munk * @return 10911256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 10921256805dSOphir Munk */ 10931256805dSOphir Munk int 10941256805dSOphir Munk mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) 10951256805dSOphir Munk { 10961256805dSOphir Munk char ifname[IF_NAMESIZE]; 1097f8a226edSBing Zhao char *port_name = NULL; 1098f8a226edSBing Zhao size_t port_name_size = 0; 10991256805dSOphir Munk FILE *file; 11001256805dSOphir Munk struct mlx5_switch_info data = { 11011256805dSOphir Munk .master = 0, 11021256805dSOphir Munk .representor = 0, 11031256805dSOphir Munk .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET, 11041256805dSOphir Munk .port_name = 0, 11051256805dSOphir Munk .switch_id = 0, 11061256805dSOphir Munk }; 11071256805dSOphir Munk DIR *dir; 11081256805dSOphir Munk bool port_switch_id_set = false; 11091256805dSOphir Munk bool device_dir = false; 11101256805dSOphir Munk char c; 1111f8a226edSBing Zhao ssize_t line_size; 11121256805dSOphir Munk 11131256805dSOphir Munk if (!if_indextoname(ifindex, ifname)) { 11141256805dSOphir Munk rte_errno = errno; 11151256805dSOphir Munk return -rte_errno; 11161256805dSOphir Munk } 11171256805dSOphir Munk 11181256805dSOphir Munk MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", 11191256805dSOphir Munk ifname); 11201256805dSOphir Munk MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", 11211256805dSOphir Munk ifname); 11221256805dSOphir Munk MKSTR(pci_device, "/sys/class/net/%s/device", 11231256805dSOphir Munk ifname); 11241256805dSOphir Munk 11251256805dSOphir Munk file = fopen(phys_port_name, "rb"); 11261256805dSOphir Munk if (file != NULL) { 1127f8a226edSBing Zhao char *tail_nl; 1128f8a226edSBing Zhao 1129f8a226edSBing Zhao line_size = getline(&port_name, &port_name_size, file); 1130f8a226edSBing Zhao if (line_size < 0) { 11311296e8d0SDavid Marchand free(port_name); 1132f8a226edSBing Zhao fclose(file); 1133f8a226edSBing Zhao rte_errno = errno; 1134f8a226edSBing Zhao return -rte_errno; 1135f8a226edSBing Zhao } else if (line_size > 0) { 1136f8a226edSBing Zhao /* Remove tailing newline character. */ 1137f8a226edSBing Zhao tail_nl = strchr(port_name, '\n'); 1138f8a226edSBing Zhao if (tail_nl) 1139f8a226edSBing Zhao *tail_nl = '\0'; 11401256805dSOphir Munk mlx5_translate_port_name(port_name, &data); 1141f8a226edSBing Zhao } 1142f8a226edSBing Zhao free(port_name); 1143bae645a2SAli Alnubani fclose(file); 11441256805dSOphir Munk } 11451256805dSOphir Munk file = fopen(phys_switch_id, "rb"); 11461256805dSOphir Munk if (file == NULL) { 11471256805dSOphir Munk rte_errno = errno; 11481256805dSOphir Munk return -rte_errno; 11491256805dSOphir Munk } 11501256805dSOphir Munk port_switch_id_set = 11511256805dSOphir Munk fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && 11521256805dSOphir Munk c == '\n'; 11531256805dSOphir Munk fclose(file); 11541256805dSOphir Munk dir = opendir(pci_device); 11551256805dSOphir Munk if (dir != NULL) { 11561256805dSOphir Munk closedir(dir); 11571256805dSOphir Munk device_dir = true; 11581256805dSOphir Munk } 11591256805dSOphir Munk if (port_switch_id_set) { 11601256805dSOphir Munk /* We have some E-Switch configuration. */ 11611256805dSOphir Munk mlx5_sysfs_check_switch_info(device_dir, &data); 11621256805dSOphir Munk } 11631256805dSOphir Munk *info = data; 11641256805dSOphir Munk MLX5_ASSERT(!(data.master && data.representor)); 11651256805dSOphir Munk if (data.master && data.representor) { 11661256805dSOphir Munk DRV_LOG(ERR, "ifindex %u device is recognized as master" 11671256805dSOphir Munk " and as representor", ifindex); 11681256805dSOphir Munk rte_errno = ENODEV; 11691256805dSOphir Munk return -rte_errno; 11701256805dSOphir Munk } 11711256805dSOphir Munk return 0; 11721256805dSOphir Munk } 11731256805dSOphir Munk 11741256805dSOphir Munk /** 1175c21e5facSXueming Li * Get bond information associated with network interface. 1176c21e5facSXueming Li * 1177c21e5facSXueming Li * @param pf_ifindex 1178c21e5facSXueming Li * Network interface index of bond slave interface 1179c21e5facSXueming Li * @param[out] ifindex 1180c21e5facSXueming Li * Pointer to bond ifindex. 1181c21e5facSXueming Li * @param[out] ifname 1182c21e5facSXueming Li * Pointer to bond ifname. 1183c21e5facSXueming Li * 1184c21e5facSXueming Li * @return 1185c21e5facSXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 1186c21e5facSXueming Li */ 1187c21e5facSXueming Li int 1188c21e5facSXueming Li mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex, 1189c21e5facSXueming Li char *ifname) 1190c21e5facSXueming Li { 1191c21e5facSXueming Li char name[IF_NAMESIZE]; 1192c21e5facSXueming Li FILE *file; 1193c21e5facSXueming Li unsigned int index; 1194c21e5facSXueming Li int ret; 1195c21e5facSXueming Li 1196c21e5facSXueming Li if (!if_indextoname(pf_ifindex, name) || !strlen(name)) { 1197c21e5facSXueming Li rte_errno = errno; 1198c21e5facSXueming Li return -rte_errno; 1199c21e5facSXueming Li } 1200c21e5facSXueming Li MKSTR(bond_if, "/sys/class/net/%s/master/ifindex", name); 1201c21e5facSXueming Li /* read bond ifindex */ 1202c21e5facSXueming Li file = fopen(bond_if, "rb"); 1203c21e5facSXueming Li if (file == NULL) { 1204c21e5facSXueming Li rte_errno = errno; 1205c21e5facSXueming Li return -rte_errno; 1206c21e5facSXueming Li } 1207c21e5facSXueming Li ret = fscanf(file, "%u", &index); 1208c21e5facSXueming Li fclose(file); 1209c21e5facSXueming Li if (ret <= 0) { 1210c21e5facSXueming Li rte_errno = errno; 1211c21e5facSXueming Li return -rte_errno; 1212c21e5facSXueming Li } 1213c21e5facSXueming Li if (ifindex) 1214c21e5facSXueming Li *ifindex = index; 1215c21e5facSXueming Li 1216c21e5facSXueming Li /* read bond device name from symbol link */ 1217c21e5facSXueming Li if (ifname) { 1218c21e5facSXueming Li if (!if_indextoname(index, ifname)) { 1219c21e5facSXueming Li rte_errno = errno; 1220c21e5facSXueming Li return -rte_errno; 1221c21e5facSXueming Li } 1222c21e5facSXueming Li } 1223c21e5facSXueming Li return 0; 1224c21e5facSXueming Li } 1225c21e5facSXueming Li 1226c21e5facSXueming Li /** 12271256805dSOphir Munk * DPDK callback to retrieve plug-in module EEPROM information (type and size). 12281256805dSOphir Munk * 12291256805dSOphir Munk * @param dev 12301256805dSOphir Munk * Pointer to Ethernet device structure. 12311256805dSOphir Munk * @param[out] modinfo 12321256805dSOphir Munk * Storage for plug-in module EEPROM information. 12331256805dSOphir Munk * 12341256805dSOphir Munk * @return 12351256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 12361256805dSOphir Munk */ 12371256805dSOphir Munk int 12381256805dSOphir Munk mlx5_get_module_info(struct rte_eth_dev *dev, 12391256805dSOphir Munk struct rte_eth_dev_module_info *modinfo) 12401256805dSOphir Munk { 12411256805dSOphir Munk struct ethtool_modinfo info = { 12421256805dSOphir Munk .cmd = ETHTOOL_GMODULEINFO, 12431256805dSOphir Munk }; 12441256805dSOphir Munk struct ifreq ifr = (struct ifreq) { 12451256805dSOphir Munk .ifr_data = (void *)&info, 12461256805dSOphir Munk }; 12471256805dSOphir Munk int ret = 0; 12481256805dSOphir Munk 1249e2bd08d5SChengchang Tang if (!dev) { 12501256805dSOphir Munk DRV_LOG(WARNING, "missing argument, cannot get module info"); 12511256805dSOphir Munk rte_errno = EINVAL; 12521256805dSOphir Munk return -rte_errno; 12531256805dSOphir Munk } 12541256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 12551256805dSOphir Munk if (ret) { 12561256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", 12571256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 12581256805dSOphir Munk return ret; 12591256805dSOphir Munk } 12601256805dSOphir Munk modinfo->type = info.type; 12611256805dSOphir Munk modinfo->eeprom_len = info.eeprom_len; 12621256805dSOphir Munk return ret; 12631256805dSOphir Munk } 12641256805dSOphir Munk 12651256805dSOphir Munk /** 12661256805dSOphir Munk * DPDK callback to retrieve plug-in module EEPROM data. 12671256805dSOphir Munk * 12681256805dSOphir Munk * @param dev 12691256805dSOphir Munk * Pointer to Ethernet device structure. 12701256805dSOphir Munk * @param[out] info 12711256805dSOphir Munk * Storage for plug-in module EEPROM data. 12721256805dSOphir Munk * 12731256805dSOphir Munk * @return 12741256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 12751256805dSOphir Munk */ 12761256805dSOphir Munk int mlx5_get_module_eeprom(struct rte_eth_dev *dev, 12771256805dSOphir Munk struct rte_dev_eeprom_info *info) 12781256805dSOphir Munk { 12791256805dSOphir Munk struct ethtool_eeprom *eeprom; 12801256805dSOphir Munk struct ifreq ifr; 12811256805dSOphir Munk int ret = 0; 12821256805dSOphir Munk 1283e2bd08d5SChengchang Tang if (!dev) { 12841256805dSOphir Munk DRV_LOG(WARNING, "missing argument, cannot get module eeprom"); 12851256805dSOphir Munk rte_errno = EINVAL; 12861256805dSOphir Munk return -rte_errno; 12871256805dSOphir Munk } 12882175c4dcSSuanming Mou eeprom = mlx5_malloc(MLX5_MEM_ZERO, 12892175c4dcSSuanming Mou (sizeof(struct ethtool_eeprom) + info->length), 0, 12902175c4dcSSuanming Mou SOCKET_ID_ANY); 12911256805dSOphir Munk if (!eeprom) { 12921256805dSOphir Munk DRV_LOG(WARNING, "port %u cannot allocate memory for " 12931256805dSOphir Munk "eeprom data", dev->data->port_id); 12941256805dSOphir Munk rte_errno = ENOMEM; 12951256805dSOphir Munk return -rte_errno; 12961256805dSOphir Munk } 12971256805dSOphir Munk eeprom->cmd = ETHTOOL_GMODULEEEPROM; 12981256805dSOphir Munk eeprom->offset = info->offset; 12991256805dSOphir Munk eeprom->len = info->length; 13001256805dSOphir Munk ifr = (struct ifreq) { 13011256805dSOphir Munk .ifr_data = (void *)eeprom, 13021256805dSOphir Munk }; 13031256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 13041256805dSOphir Munk if (ret) 13051256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", 13061256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 13071256805dSOphir Munk else 13081256805dSOphir Munk rte_memcpy(info->data, eeprom->data, info->length); 13092175c4dcSSuanming Mou mlx5_free(eeprom); 13101256805dSOphir Munk return ret; 13111256805dSOphir Munk } 131298c4b12aSOphir Munk 131398c4b12aSOphir Munk /** 131498c4b12aSOphir Munk * Read device counters table. 131598c4b12aSOphir Munk * 131698c4b12aSOphir Munk * @param dev 131798c4b12aSOphir Munk * Pointer to Ethernet device. 13187ed15acdSXueming Li * @param[in] pf 13197ed15acdSXueming Li * PF index in case of bonding device, -1 otherwise 132098c4b12aSOphir Munk * @param[out] stats 132198c4b12aSOphir Munk * Counters table output buffer. 132298c4b12aSOphir Munk * 132398c4b12aSOphir Munk * @return 132498c4b12aSOphir Munk * 0 on success and stats is filled, negative errno value otherwise and 132598c4b12aSOphir Munk * rte_errno is set. 132698c4b12aSOphir Munk */ 13277ed15acdSXueming Li static int 13287ed15acdSXueming Li _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) 132998c4b12aSOphir Munk { 133098c4b12aSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 133198c4b12aSOphir Munk struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 133298c4b12aSOphir Munk unsigned int i; 133398c4b12aSOphir Munk struct ifreq ifr; 1334a687c3e6SBing Zhao unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd); 1335a687c3e6SBing Zhao unsigned int stats_sz = max_stats_n * sizeof(uint64_t); 133698c4b12aSOphir Munk unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; 133798c4b12aSOphir Munk struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; 133898c4b12aSOphir Munk int ret; 1339a687c3e6SBing Zhao uint16_t i_idx, o_idx; 134098c4b12aSOphir Munk 134198c4b12aSOphir Munk et_stats->cmd = ETHTOOL_GSTATS; 1342a687c3e6SBing Zhao /* Pass the maximum value, the driver may ignore this. */ 1343a687c3e6SBing Zhao et_stats->n_stats = max_stats_n; 134498c4b12aSOphir Munk ifr.ifr_data = (caddr_t)et_stats; 13457ed15acdSXueming Li if (pf >= 0) 13467ed15acdSXueming Li ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, 13477ed15acdSXueming Li SIOCETHTOOL, &ifr); 13487ed15acdSXueming Li else 134998c4b12aSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 135098c4b12aSOphir Munk if (ret) { 135198c4b12aSOphir Munk DRV_LOG(WARNING, 135298c4b12aSOphir Munk "port %u unable to read statistic values from device", 135398c4b12aSOphir Munk dev->data->port_id); 135498c4b12aSOphir Munk return ret; 135598c4b12aSOphir Munk } 1356a687c3e6SBing Zhao if (pf <= 0) { 1357a687c3e6SBing Zhao for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { 1358a687c3e6SBing Zhao i_idx = xstats_ctrl->dev_table_idx[i]; 1359a687c3e6SBing Zhao if (i_idx == UINT16_MAX || xstats_ctrl->info[i].dev) 13607ed15acdSXueming Li continue; 1361a687c3e6SBing Zhao o_idx = xstats_ctrl->xstats_o_idx[i]; 1362a687c3e6SBing Zhao stats[o_idx] += (uint64_t)et_stats->data[i_idx]; 1363a687c3e6SBing Zhao } 1364a687c3e6SBing Zhao } else { 1365a687c3e6SBing Zhao for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { 1366a687c3e6SBing Zhao i_idx = xstats_ctrl->dev_table_idx_2nd[i]; 1367a687c3e6SBing Zhao if (i_idx == UINT16_MAX) 1368a687c3e6SBing Zhao continue; 1369a687c3e6SBing Zhao o_idx = xstats_ctrl->xstats_o_idx_2nd[i]; 1370a687c3e6SBing Zhao stats[o_idx] += (uint64_t)et_stats->data[i_idx]; 1371a687c3e6SBing Zhao } 13727ed15acdSXueming Li } 13737ed15acdSXueming Li return 0; 13747ed15acdSXueming Li } 13757ed15acdSXueming Li 1376a687c3e6SBing Zhao /* 13777ed15acdSXueming Li * Read device counters. 13787ed15acdSXueming Li * 13797ed15acdSXueming Li * @param dev 13807ed15acdSXueming Li * Pointer to Ethernet device. 1381a687c3e6SBing Zhao * @param bond_master 1382a687c3e6SBing Zhao * Indicate if the device is a bond master. 1383a687c3e6SBing Zhao * @param stats 13847ed15acdSXueming Li * Counters table output buffer. 13857ed15acdSXueming Li * 13867ed15acdSXueming Li * @return 13877ed15acdSXueming Li * 0 on success and stats is filled, negative errno value otherwise and 13887ed15acdSXueming Li * rte_errno is set. 13897ed15acdSXueming Li */ 13907ed15acdSXueming Li int 1391a687c3e6SBing Zhao mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) 13927ed15acdSXueming Li { 13937ed15acdSXueming Li struct mlx5_priv *priv = dev->data->dev_private; 13947ed15acdSXueming Li struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 13957ed15acdSXueming Li int ret = 0, i; 13967ed15acdSXueming Li 13977ed15acdSXueming Li memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); 13987ed15acdSXueming Li /* Read ifreq counters. */ 1399a687c3e6SBing Zhao if (bond_master) { 14007ed15acdSXueming Li /* Sum xstats from bonding device member ports. */ 14017ed15acdSXueming Li for (i = 0; i < priv->sh->bond.n_port; i++) { 14027ed15acdSXueming Li ret = _mlx5_os_read_dev_counters(dev, i, stats); 14037ed15acdSXueming Li if (ret) 14047ed15acdSXueming Li return ret; 14057ed15acdSXueming Li } 14067ed15acdSXueming Li } else { 14077ed15acdSXueming Li ret = _mlx5_os_read_dev_counters(dev, -1, stats); 1408eadc35dfSGeoffrey Le Gourriérec if (ret) 1409eadc35dfSGeoffrey Le Gourriérec return ret; 14107ed15acdSXueming Li } 1411a687c3e6SBing Zhao /* 1412a687c3e6SBing Zhao * Read IB counters. 1413a687c3e6SBing Zhao * The counters are unique per IB device but not per net IF. 1414a687c3e6SBing Zhao * In bonding mode, getting the stats name only from 1 port is enough. 1415a687c3e6SBing Zhao */ 1416a687c3e6SBing Zhao for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { 14177ed15acdSXueming Li if (!xstats_ctrl->info[i].dev) 14187ed15acdSXueming Li continue; 141998c4b12aSOphir Munk /* return last xstats counter if fail to read. */ 1420eadc35dfSGeoffrey Le Gourriérec if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, 1421eadc35dfSGeoffrey Le Gourriérec &stats[i]) == 0) 142298c4b12aSOphir Munk xstats_ctrl->xstats[i] = stats[i]; 142398c4b12aSOphir Munk else 142498c4b12aSOphir Munk stats[i] = xstats_ctrl->xstats[i]; 142598c4b12aSOphir Munk } 14267ed15acdSXueming Li return ret; 142798c4b12aSOphir Munk } 142898c4b12aSOphir Munk 1429a687c3e6SBing Zhao /* 143098c4b12aSOphir Munk * Query the number of statistics provided by ETHTOOL. 143198c4b12aSOphir Munk * 143298c4b12aSOphir Munk * @param dev 143398c4b12aSOphir Munk * Pointer to Ethernet device. 1434a687c3e6SBing Zhao * @param bond_master 1435a687c3e6SBing Zhao * Indicate if the device is a bond master. 1436a687c3e6SBing Zhao * @param n_stats 1437a687c3e6SBing Zhao * Pointer to number of stats to store. 1438a687c3e6SBing Zhao * @param n_stats_sec 1439a687c3e6SBing Zhao * Pointer to number of stats to store for the 2nd port of the bond. 144098c4b12aSOphir Munk * 144198c4b12aSOphir Munk * @return 1442a687c3e6SBing Zhao * 0 on success, negative errno value otherwise and rte_errno is set. 144398c4b12aSOphir Munk */ 144498c4b12aSOphir Munk int 1445a687c3e6SBing Zhao mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, 1446a687c3e6SBing Zhao uint16_t *n_stats, uint16_t *n_stats_sec) 144798c4b12aSOphir Munk { 14487ed15acdSXueming Li struct mlx5_priv *priv = dev->data->dev_private; 144998c4b12aSOphir Munk struct ethtool_drvinfo drvinfo; 145098c4b12aSOphir Munk struct ifreq ifr; 145198c4b12aSOphir Munk int ret; 145298c4b12aSOphir Munk 145398c4b12aSOphir Munk drvinfo.cmd = ETHTOOL_GDRVINFO; 145498c4b12aSOphir Munk ifr.ifr_data = (caddr_t)&drvinfo; 1455a687c3e6SBing Zhao /* Bonding PFs. */ 1456a687c3e6SBing Zhao if (bond_master) { 14577ed15acdSXueming Li ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, 14587ed15acdSXueming Li SIOCETHTOOL, &ifr); 1459a687c3e6SBing Zhao if (ret) { 1460a687c3e6SBing Zhao DRV_LOG(WARNING, "bonding port %u unable to query number of" 1461a687c3e6SBing Zhao " statistics for the 1st slave, %d", PORT_ID(priv), ret); 1462a687c3e6SBing Zhao return ret; 1463a687c3e6SBing Zhao } 1464a687c3e6SBing Zhao *n_stats = drvinfo.n_stats; 1465a687c3e6SBing Zhao ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, 1466a687c3e6SBing Zhao SIOCETHTOOL, &ifr); 1467a687c3e6SBing Zhao if (ret) { 1468a687c3e6SBing Zhao DRV_LOG(WARNING, "bonding port %u unable to query number of" 1469a687c3e6SBing Zhao " statistics for the 2nd slave, %d", PORT_ID(priv), ret); 1470a687c3e6SBing Zhao return ret; 1471a687c3e6SBing Zhao } 1472a687c3e6SBing Zhao *n_stats_sec = drvinfo.n_stats; 1473a687c3e6SBing Zhao } else { 147498c4b12aSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 147598c4b12aSOphir Munk if (ret) { 147698c4b12aSOphir Munk DRV_LOG(WARNING, "port %u unable to query number of statistics", 1477a687c3e6SBing Zhao PORT_ID(priv)); 147898c4b12aSOphir Munk return ret; 147998c4b12aSOphir Munk } 1480a687c3e6SBing Zhao *n_stats = drvinfo.n_stats; 1481a687c3e6SBing Zhao } 1482a687c3e6SBing Zhao return 0; 148398c4b12aSOphir Munk } 148498c4b12aSOphir Munk 148598c4b12aSOphir Munk static const struct mlx5_counter_ctrl mlx5_counters_init[] = { 148698c4b12aSOphir Munk { 14871101809bSViacheslav Ovsiienko .dpdk_name = "rx_unicast_bytes", 148898c4b12aSOphir Munk .ctr_name = "rx_vport_unicast_bytes", 148998c4b12aSOphir Munk }, 149098c4b12aSOphir Munk { 14911101809bSViacheslav Ovsiienko .dpdk_name = "rx_multicast_bytes", 149298c4b12aSOphir Munk .ctr_name = "rx_vport_multicast_bytes", 149398c4b12aSOphir Munk }, 149498c4b12aSOphir Munk { 14951101809bSViacheslav Ovsiienko .dpdk_name = "rx_broadcast_bytes", 149698c4b12aSOphir Munk .ctr_name = "rx_vport_broadcast_bytes", 149798c4b12aSOphir Munk }, 149898c4b12aSOphir Munk { 14991101809bSViacheslav Ovsiienko .dpdk_name = "rx_unicast_packets", 150098c4b12aSOphir Munk .ctr_name = "rx_vport_unicast_packets", 150198c4b12aSOphir Munk }, 150298c4b12aSOphir Munk { 15031101809bSViacheslav Ovsiienko .dpdk_name = "rx_multicast_packets", 150498c4b12aSOphir Munk .ctr_name = "rx_vport_multicast_packets", 150598c4b12aSOphir Munk }, 150698c4b12aSOphir Munk { 15071101809bSViacheslav Ovsiienko .dpdk_name = "rx_broadcast_packets", 150898c4b12aSOphir Munk .ctr_name = "rx_vport_broadcast_packets", 150998c4b12aSOphir Munk }, 151098c4b12aSOphir Munk { 15111101809bSViacheslav Ovsiienko .dpdk_name = "tx_unicast_bytes", 151298c4b12aSOphir Munk .ctr_name = "tx_vport_unicast_bytes", 151398c4b12aSOphir Munk }, 151498c4b12aSOphir Munk { 15151101809bSViacheslav Ovsiienko .dpdk_name = "tx_multicast_bytes", 151698c4b12aSOphir Munk .ctr_name = "tx_vport_multicast_bytes", 151798c4b12aSOphir Munk }, 151898c4b12aSOphir Munk { 15191101809bSViacheslav Ovsiienko .dpdk_name = "tx_broadcast_bytes", 152098c4b12aSOphir Munk .ctr_name = "tx_vport_broadcast_bytes", 152198c4b12aSOphir Munk }, 152298c4b12aSOphir Munk { 15231101809bSViacheslav Ovsiienko .dpdk_name = "tx_unicast_packets", 152498c4b12aSOphir Munk .ctr_name = "tx_vport_unicast_packets", 152598c4b12aSOphir Munk }, 152698c4b12aSOphir Munk { 15271101809bSViacheslav Ovsiienko .dpdk_name = "tx_multicast_packets", 152898c4b12aSOphir Munk .ctr_name = "tx_vport_multicast_packets", 152998c4b12aSOphir Munk }, 153098c4b12aSOphir Munk { 15311101809bSViacheslav Ovsiienko .dpdk_name = "tx_broadcast_packets", 153298c4b12aSOphir Munk .ctr_name = "tx_vport_broadcast_packets", 153398c4b12aSOphir Munk }, 153498c4b12aSOphir Munk { 15351101809bSViacheslav Ovsiienko .dpdk_name = "rx_wqe_errors", 153698c4b12aSOphir Munk .ctr_name = "rx_wqe_err", 153798c4b12aSOphir Munk }, 153898c4b12aSOphir Munk { 15391101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_crc_errors", 154098c4b12aSOphir Munk .ctr_name = "rx_crc_errors_phy", 154198c4b12aSOphir Munk }, 154298c4b12aSOphir Munk { 15431101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_in_range_len_errors", 154498c4b12aSOphir Munk .ctr_name = "rx_in_range_len_errors_phy", 154598c4b12aSOphir Munk }, 154698c4b12aSOphir Munk { 15471101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_symbol_errors", 154898c4b12aSOphir Munk .ctr_name = "rx_symbol_err_phy", 154998c4b12aSOphir Munk }, 155098c4b12aSOphir Munk { 15511101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_errors", 155298c4b12aSOphir Munk .ctr_name = "tx_errors_phy", 155398c4b12aSOphir Munk }, 155498c4b12aSOphir Munk { 155598c4b12aSOphir Munk .dpdk_name = "rx_out_of_buffer", 155698c4b12aSOphir Munk .ctr_name = "out_of_buffer", 155798c4b12aSOphir Munk .dev = 1, 155898c4b12aSOphir Munk }, 155998c4b12aSOphir Munk { 15601101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_packets", 156198c4b12aSOphir Munk .ctr_name = "tx_packets_phy", 156298c4b12aSOphir Munk }, 156398c4b12aSOphir Munk { 15641101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_packets", 156598c4b12aSOphir Munk .ctr_name = "rx_packets_phy", 156698c4b12aSOphir Munk }, 156798c4b12aSOphir Munk { 15681101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_discard_packets", 156998c4b12aSOphir Munk .ctr_name = "tx_discards_phy", 157098c4b12aSOphir Munk }, 157198c4b12aSOphir Munk { 15721101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_discard_packets", 157398c4b12aSOphir Munk .ctr_name = "rx_discards_phy", 157498c4b12aSOphir Munk }, 157598c4b12aSOphir Munk { 15762bd03a43SRongwei Liu .dpdk_name = "rx_prio0_buf_discard_packets", 15772bd03a43SRongwei Liu .ctr_name = "rx_prio0_buf_discard", 15782bd03a43SRongwei Liu }, 15792bd03a43SRongwei Liu { 15802bd03a43SRongwei Liu .dpdk_name = "rx_prio1_buf_discard_packets", 15812bd03a43SRongwei Liu .ctr_name = "rx_prio1_buf_discard", 15822bd03a43SRongwei Liu }, 15832bd03a43SRongwei Liu { 15842bd03a43SRongwei Liu .dpdk_name = "rx_prio2_buf_discard_packets", 15852bd03a43SRongwei Liu .ctr_name = "rx_prio2_buf_discard", 15862bd03a43SRongwei Liu }, 15872bd03a43SRongwei Liu { 15882bd03a43SRongwei Liu .dpdk_name = "rx_prio3_buf_discard_packets", 15892bd03a43SRongwei Liu .ctr_name = "rx_prio3_buf_discard", 15902bd03a43SRongwei Liu }, 15912bd03a43SRongwei Liu { 15922bd03a43SRongwei Liu .dpdk_name = "rx_prio4_buf_discard_packets", 15932bd03a43SRongwei Liu .ctr_name = "rx_prio4_buf_discard", 15942bd03a43SRongwei Liu }, 15952bd03a43SRongwei Liu { 15962bd03a43SRongwei Liu .dpdk_name = "rx_prio5_buf_discard_packets", 15972bd03a43SRongwei Liu .ctr_name = "rx_prio5_buf_discard", 15982bd03a43SRongwei Liu }, 15992bd03a43SRongwei Liu { 16002bd03a43SRongwei Liu .dpdk_name = "rx_prio6_buf_discard_packets", 16012bd03a43SRongwei Liu .ctr_name = "rx_prio6_buf_discard", 16022bd03a43SRongwei Liu }, 16032bd03a43SRongwei Liu { 16042bd03a43SRongwei Liu .dpdk_name = "rx_prio7_buf_discard_packets", 16052bd03a43SRongwei Liu .ctr_name = "rx_prio7_buf_discard", 16062bd03a43SRongwei Liu }, 16072bd03a43SRongwei Liu { 16082bd03a43SRongwei Liu .dpdk_name = "rx_prio0_cong_discard_packets", 16092bd03a43SRongwei Liu .ctr_name = "rx_prio0_cong_discard", 16102bd03a43SRongwei Liu }, 16112bd03a43SRongwei Liu { 16122bd03a43SRongwei Liu .dpdk_name = "rx_prio1_cong_discard_packets", 16132bd03a43SRongwei Liu .ctr_name = "rx_prio1_cong_discard", 16142bd03a43SRongwei Liu }, 16152bd03a43SRongwei Liu { 16162bd03a43SRongwei Liu .dpdk_name = "rx_prio2_cong_discard_packets", 16172bd03a43SRongwei Liu .ctr_name = "rx_prio2_cong_discard", 16182bd03a43SRongwei Liu }, 16192bd03a43SRongwei Liu { 16202bd03a43SRongwei Liu .dpdk_name = "rx_prio3_cong_discard_packets", 16212bd03a43SRongwei Liu .ctr_name = "rx_prio3_cong_discard", 16222bd03a43SRongwei Liu }, 16232bd03a43SRongwei Liu { 16242bd03a43SRongwei Liu .dpdk_name = "rx_prio4_cong_discard_packets", 16252bd03a43SRongwei Liu .ctr_name = "rx_prio4_cong_discard", 16262bd03a43SRongwei Liu }, 16272bd03a43SRongwei Liu { 16282bd03a43SRongwei Liu .dpdk_name = "rx_prio5_cong_discard_packets", 16292bd03a43SRongwei Liu .ctr_name = "rx_prio5_cong_discard", 16302bd03a43SRongwei Liu }, 16312bd03a43SRongwei Liu { 16322bd03a43SRongwei Liu .dpdk_name = "rx_prio6_cong_discard_packets", 16332bd03a43SRongwei Liu .ctr_name = "rx_prio6_cong_discard", 16342bd03a43SRongwei Liu }, 16352bd03a43SRongwei Liu { 16362bd03a43SRongwei Liu .dpdk_name = "rx_prio7_cong_discard_packets", 16372bd03a43SRongwei Liu .ctr_name = "rx_prio7_cong_discard", 16382bd03a43SRongwei Liu }, 16392bd03a43SRongwei Liu { 16401101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_bytes", 164198c4b12aSOphir Munk .ctr_name = "tx_bytes_phy", 164298c4b12aSOphir Munk }, 164398c4b12aSOphir Munk { 16441101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_bytes", 164598c4b12aSOphir Munk .ctr_name = "rx_bytes_phy", 164698c4b12aSOphir Munk }, 164798c4b12aSOphir Munk /* Representor only */ 164898c4b12aSOphir Munk { 16491101809bSViacheslav Ovsiienko .dpdk_name = "rx_vport_packets", 165098c4b12aSOphir Munk .ctr_name = "vport_rx_packets", 165198c4b12aSOphir Munk }, 165298c4b12aSOphir Munk { 16531101809bSViacheslav Ovsiienko .dpdk_name = "rx_vport_bytes", 165498c4b12aSOphir Munk .ctr_name = "vport_rx_bytes", 165598c4b12aSOphir Munk }, 165698c4b12aSOphir Munk { 16571101809bSViacheslav Ovsiienko .dpdk_name = "tx_vport_packets", 165898c4b12aSOphir Munk .ctr_name = "vport_tx_packets", 165998c4b12aSOphir Munk }, 166098c4b12aSOphir Munk { 16611101809bSViacheslav Ovsiienko .dpdk_name = "tx_vport_bytes", 166298c4b12aSOphir Munk .ctr_name = "vport_tx_bytes", 166398c4b12aSOphir Munk }, 16648e7869f0SWathsala Vithanage /** 16658e7869f0SWathsala Vithanage * Device counters: These counters are for the 16668e7869f0SWathsala Vithanage * entire PCI device (NIC). These counters are 16678e7869f0SWathsala Vithanage * not counting on a per port/queue basis. 16688e7869f0SWathsala Vithanage */ 16698e7869f0SWathsala Vithanage { 16708e7869f0SWathsala Vithanage .dpdk_name = "rx_pci_signal_integrity", 16718e7869f0SWathsala Vithanage .ctr_name = "rx_pci_signal_integrity", 16728e7869f0SWathsala Vithanage }, 16738e7869f0SWathsala Vithanage { 16748e7869f0SWathsala Vithanage .dpdk_name = "tx_pci_signal_integrity", 16758e7869f0SWathsala Vithanage .ctr_name = "tx_pci_signal_integrity", 16768e7869f0SWathsala Vithanage }, 16778e7869f0SWathsala Vithanage { 16788e7869f0SWathsala Vithanage .dpdk_name = "outbound_pci_buffer_overflow", 16798e7869f0SWathsala Vithanage .ctr_name = "outbound_pci_buffer_overflow", 16808e7869f0SWathsala Vithanage }, 16818e7869f0SWathsala Vithanage { 16828e7869f0SWathsala Vithanage .dpdk_name = "outbound_pci_stalled_rd", 16838e7869f0SWathsala Vithanage .ctr_name = "outbound_pci_stalled_rd", 16848e7869f0SWathsala Vithanage }, 16858e7869f0SWathsala Vithanage { 16868e7869f0SWathsala Vithanage .dpdk_name = "outbound_pci_stalled_wr", 16878e7869f0SWathsala Vithanage .ctr_name = "outbound_pci_stalled_wr", 16888e7869f0SWathsala Vithanage }, 16898e7869f0SWathsala Vithanage { 16908e7869f0SWathsala Vithanage .dpdk_name = "outbound_pci_stalled_rd_events", 16918e7869f0SWathsala Vithanage .ctr_name = "outbound_pci_stalled_rd_events", 16928e7869f0SWathsala Vithanage }, 16938e7869f0SWathsala Vithanage { 16948e7869f0SWathsala Vithanage .dpdk_name = "outbound_pci_stalled_wr_events", 16958e7869f0SWathsala Vithanage .ctr_name = "outbound_pci_stalled_wr_events", 16968e7869f0SWathsala Vithanage }, 16978e7869f0SWathsala Vithanage { 16988e7869f0SWathsala Vithanage .dpdk_name = "dev_out_of_buffer", 16998e7869f0SWathsala Vithanage .ctr_name = "dev_out_of_buffer", 17008e7869f0SWathsala Vithanage }, 170198c4b12aSOphir Munk }; 170298c4b12aSOphir Munk 170398c4b12aSOphir Munk static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); 170498c4b12aSOphir Munk 1705a687c3e6SBing Zhao static int 1706a687c3e6SBing Zhao mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master, 1707a687c3e6SBing Zhao struct ethtool_gstrings *strings, 1708a687c3e6SBing Zhao uint32_t stats_n, uint32_t stats_n_2nd) 1709a687c3e6SBing Zhao { 1710a687c3e6SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 1711a687c3e6SBing Zhao struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 1712a687c3e6SBing Zhao struct ifreq ifr; 1713a687c3e6SBing Zhao int ret; 1714a687c3e6SBing Zhao uint32_t i, j, idx; 1715a687c3e6SBing Zhao 1716a687c3e6SBing Zhao /* Ensure no out of bounds access before. */ 1717a687c3e6SBing Zhao MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS); 1718a687c3e6SBing Zhao strings->cmd = ETHTOOL_GSTRINGS; 1719a687c3e6SBing Zhao strings->string_set = ETH_SS_STATS; 1720a687c3e6SBing Zhao strings->len = stats_n; 1721a687c3e6SBing Zhao ifr.ifr_data = (caddr_t)strings; 1722a687c3e6SBing Zhao if (bond_master) 1723a687c3e6SBing Zhao ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, 1724a687c3e6SBing Zhao SIOCETHTOOL, &ifr); 1725a687c3e6SBing Zhao else 1726a687c3e6SBing Zhao ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1727a687c3e6SBing Zhao if (ret) { 1728a687c3e6SBing Zhao DRV_LOG(WARNING, "port %u unable to get statistic names with %d", 1729a687c3e6SBing Zhao PORT_ID(priv), ret); 1730a687c3e6SBing Zhao return ret; 1731a687c3e6SBing Zhao } 1732a687c3e6SBing Zhao /* Reorganize the orders to reduce the iterations. */ 1733a687c3e6SBing Zhao for (j = 0; j < xstats_n; j++) { 1734a687c3e6SBing Zhao xstats_ctrl->dev_table_idx[j] = UINT16_MAX; 1735a687c3e6SBing Zhao for (i = 0; i < stats_n; i++) { 1736a687c3e6SBing Zhao const char *curr_string = 1737a687c3e6SBing Zhao (const char *)&strings->data[i * ETH_GSTRING_LEN]; 1738a687c3e6SBing Zhao 1739a687c3e6SBing Zhao if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { 1740a687c3e6SBing Zhao idx = xstats_ctrl->mlx5_stats_n++; 1741a687c3e6SBing Zhao xstats_ctrl->dev_table_idx[j] = i; 1742a687c3e6SBing Zhao xstats_ctrl->xstats_o_idx[j] = idx; 1743a687c3e6SBing Zhao xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1744a687c3e6SBing Zhao } 1745a687c3e6SBing Zhao } 1746a687c3e6SBing Zhao } 1747a687c3e6SBing Zhao if (!bond_master) { 1748a687c3e6SBing Zhao /* Add dev counters, unique per IB device. */ 1749a687c3e6SBing Zhao for (j = 0; j != xstats_n; j++) { 1750a687c3e6SBing Zhao if (mlx5_counters_init[j].dev) { 1751a687c3e6SBing Zhao idx = xstats_ctrl->mlx5_stats_n++; 1752a687c3e6SBing Zhao xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1753a687c3e6SBing Zhao xstats_ctrl->hw_stats[idx] = 0; 1754a687c3e6SBing Zhao } 1755a687c3e6SBing Zhao } 1756a687c3e6SBing Zhao return 0; 1757a687c3e6SBing Zhao } 1758a687c3e6SBing Zhao 1759a687c3e6SBing Zhao strings->len = stats_n_2nd; 1760a687c3e6SBing Zhao ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, 1761a687c3e6SBing Zhao SIOCETHTOOL, &ifr); 1762a687c3e6SBing Zhao if (ret) { 1763a687c3e6SBing Zhao DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d", 1764a687c3e6SBing Zhao PORT_ID(priv), ret); 1765a687c3e6SBing Zhao return ret; 1766a687c3e6SBing Zhao } 1767a687c3e6SBing Zhao /* The 2nd slave port may have a different strings set, based on the configuration. */ 1768a687c3e6SBing Zhao for (j = 0; j != xstats_n; j++) { 1769a687c3e6SBing Zhao xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX; 1770a687c3e6SBing Zhao for (i = 0; i != stats_n_2nd; i++) { 1771a687c3e6SBing Zhao const char *curr_string = 1772a687c3e6SBing Zhao (const char *)&strings->data[i * ETH_GSTRING_LEN]; 1773a687c3e6SBing Zhao 1774a687c3e6SBing Zhao if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { 1775a687c3e6SBing Zhao xstats_ctrl->dev_table_idx_2nd[j] = i; 1776a687c3e6SBing Zhao if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) { 1777a687c3e6SBing Zhao /* Already mapped in the 1st slave port. */ 1778a687c3e6SBing Zhao idx = xstats_ctrl->xstats_o_idx[j]; 1779a687c3e6SBing Zhao xstats_ctrl->xstats_o_idx_2nd[j] = idx; 1780a687c3e6SBing Zhao } else { 1781a687c3e6SBing Zhao /* Append the new items to the end of the map. */ 1782a687c3e6SBing Zhao idx = xstats_ctrl->mlx5_stats_n++; 1783a687c3e6SBing Zhao xstats_ctrl->xstats_o_idx_2nd[j] = idx; 1784a687c3e6SBing Zhao xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1785a687c3e6SBing Zhao } 1786a687c3e6SBing Zhao } 1787a687c3e6SBing Zhao } 1788a687c3e6SBing Zhao } 1789a687c3e6SBing Zhao /* Dev counters are always at the last now. */ 1790a687c3e6SBing Zhao for (j = 0; j != xstats_n; j++) { 1791a687c3e6SBing Zhao if (mlx5_counters_init[j].dev) { 1792a687c3e6SBing Zhao idx = xstats_ctrl->mlx5_stats_n++; 1793a687c3e6SBing Zhao xstats_ctrl->info[idx] = mlx5_counters_init[j]; 1794a687c3e6SBing Zhao xstats_ctrl->hw_stats[idx] = 0; 1795a687c3e6SBing Zhao } 1796a687c3e6SBing Zhao } 1797a687c3e6SBing Zhao return 0; 1798a687c3e6SBing Zhao } 1799a687c3e6SBing Zhao 180098c4b12aSOphir Munk /** 180198c4b12aSOphir Munk * Init the structures to read device counters. 180298c4b12aSOphir Munk * 180398c4b12aSOphir Munk * @param dev 180498c4b12aSOphir Munk * Pointer to Ethernet device. 180598c4b12aSOphir Munk */ 180698c4b12aSOphir Munk void 180798c4b12aSOphir Munk mlx5_os_stats_init(struct rte_eth_dev *dev) 180898c4b12aSOphir Munk { 180998c4b12aSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 181098c4b12aSOphir Munk struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 181198c4b12aSOphir Munk struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; 181298c4b12aSOphir Munk struct ethtool_gstrings *strings = NULL; 1813a687c3e6SBing Zhao uint16_t dev_stats_n = 0; 1814a687c3e6SBing Zhao uint16_t dev_stats_n_2nd = 0; 1815a687c3e6SBing Zhao unsigned int max_stats_n; 181698c4b12aSOphir Munk unsigned int str_sz; 181798c4b12aSOphir Munk int ret; 1818a687c3e6SBing Zhao bool bond_master = (priv->master && priv->pf_bond >= 0); 181998c4b12aSOphir Munk 182098c4b12aSOphir Munk /* So that it won't aggregate for each init. */ 182198c4b12aSOphir Munk xstats_ctrl->mlx5_stats_n = 0; 1822a687c3e6SBing Zhao ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd); 182398c4b12aSOphir Munk if (ret < 0) { 182498c4b12aSOphir Munk DRV_LOG(WARNING, "port %u no extended statistics available", 182598c4b12aSOphir Munk dev->data->port_id); 182698c4b12aSOphir Munk return; 182798c4b12aSOphir Munk } 1828a687c3e6SBing Zhao max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd); 182998c4b12aSOphir Munk /* Allocate memory to grab stat names and values. */ 1830a687c3e6SBing Zhao str_sz = max_stats_n * ETH_GSTRING_LEN; 183198c4b12aSOphir Munk strings = (struct ethtool_gstrings *) 183298c4b12aSOphir Munk mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, 183398c4b12aSOphir Munk SOCKET_ID_ANY); 183498c4b12aSOphir Munk if (!strings) { 183598c4b12aSOphir Munk DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", 183698c4b12aSOphir Munk dev->data->port_id); 183798c4b12aSOphir Munk return; 183898c4b12aSOphir Munk } 1839a687c3e6SBing Zhao ret = mlx5_os_get_stats_strings(dev, bond_master, strings, 1840a687c3e6SBing Zhao dev_stats_n, dev_stats_n_2nd); 1841a687c3e6SBing Zhao if (ret < 0) { 1842a687c3e6SBing Zhao DRV_LOG(WARNING, "port %u failed to get the stats strings", 184398c4b12aSOphir Munk dev->data->port_id); 184498c4b12aSOphir Munk goto free; 184598c4b12aSOphir Munk } 184698c4b12aSOphir Munk xstats_ctrl->stats_n = dev_stats_n; 1847a687c3e6SBing Zhao xstats_ctrl->stats_n_2nd = dev_stats_n_2nd; 184898c4b12aSOphir Munk /* Copy to base at first time. */ 1849a687c3e6SBing Zhao ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base); 185098c4b12aSOphir Munk if (ret) 185198c4b12aSOphir Munk DRV_LOG(ERR, "port %u cannot read device counters: %s", 185298c4b12aSOphir Munk dev->data->port_id, strerror(rte_errno)); 185398c4b12aSOphir Munk mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); 185498c4b12aSOphir Munk stats_ctrl->imissed = 0; 185598c4b12aSOphir Munk free: 185698c4b12aSOphir Munk mlx5_free(strings); 185798c4b12aSOphir Munk } 185898c4b12aSOphir Munk 185998c4b12aSOphir Munk /** 186098c4b12aSOphir Munk * Get MAC address by querying netdevice. 186198c4b12aSOphir Munk * 186298c4b12aSOphir Munk * @param[in] dev 186398c4b12aSOphir Munk * Pointer to Ethernet device. 186498c4b12aSOphir Munk * @param[out] mac 186598c4b12aSOphir Munk * MAC address output buffer. 186698c4b12aSOphir Munk * 186798c4b12aSOphir Munk * @return 186898c4b12aSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 186998c4b12aSOphir Munk */ 187098c4b12aSOphir Munk int 187198c4b12aSOphir Munk mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) 187298c4b12aSOphir Munk { 187398c4b12aSOphir Munk struct ifreq request; 187498c4b12aSOphir Munk int ret; 187598c4b12aSOphir Munk 187698c4b12aSOphir Munk ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); 187798c4b12aSOphir Munk if (ret) 187898c4b12aSOphir Munk return ret; 187998c4b12aSOphir Munk memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN); 188098c4b12aSOphir Munk return 0; 188198c4b12aSOphir Munk } 1882e8482187SBing Zhao 1883e8482187SBing Zhao /* 1884e8482187SBing Zhao * Query dropless_rq private flag value provided by ETHTOOL. 1885e8482187SBing Zhao * 1886e8482187SBing Zhao * @param dev 1887e8482187SBing Zhao * Pointer to Ethernet device. 1888e8482187SBing Zhao * 1889e8482187SBing Zhao * @return 1890e8482187SBing Zhao * - 0 on success, flag is not set. 1891e8482187SBing Zhao * - 1 on success, flag is set. 1892e8482187SBing Zhao * - negative errno value otherwise and rte_errno is set. 1893e8482187SBing Zhao */ 1894e8482187SBing Zhao int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev) 1895e8482187SBing Zhao { 1896ccc6ea5dSBing Zhao struct ethtool_sset_info *sset_info = NULL; 1897e8482187SBing Zhao struct ethtool_drvinfo drvinfo; 1898e8482187SBing Zhao struct ifreq ifr; 1899e8482187SBing Zhao struct ethtool_gstrings *strings = NULL; 1900e8482187SBing Zhao struct ethtool_value flags; 1901e8482187SBing Zhao const int32_t flag_len = sizeof(flags.data) * CHAR_BIT; 1902e8482187SBing Zhao int32_t str_sz; 1903e8482187SBing Zhao int32_t len; 1904e8482187SBing Zhao int32_t i; 1905e8482187SBing Zhao int ret; 1906e8482187SBing Zhao 1907ccc6ea5dSBing Zhao sset_info = mlx5_malloc(0, sizeof(struct ethtool_sset_info) + 1908ccc6ea5dSBing Zhao sizeof(uint32_t), 0, SOCKET_ID_ANY); 1909ccc6ea5dSBing Zhao if (sset_info == NULL) { 1910ccc6ea5dSBing Zhao rte_errno = ENOMEM; 1911ccc6ea5dSBing Zhao return -rte_errno; 1912ccc6ea5dSBing Zhao } 1913ccc6ea5dSBing Zhao sset_info->cmd = ETHTOOL_GSSET_INFO; 1914ccc6ea5dSBing Zhao sset_info->reserved = 0; 1915ccc6ea5dSBing Zhao sset_info->sset_mask = 1ULL << ETH_SS_PRIV_FLAGS; 1916e8482187SBing Zhao ifr.ifr_data = (caddr_t)&sset_info; 1917e8482187SBing Zhao ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1918e8482187SBing Zhao if (!ret) { 1919ccc6ea5dSBing Zhao const uint32_t *sset_lengths = sset_info->data; 1920e8482187SBing Zhao 1921ccc6ea5dSBing Zhao len = sset_info->sset_mask ? sset_lengths[0] : 0; 1922e8482187SBing Zhao } else if (ret == -EOPNOTSUPP) { 1923e8482187SBing Zhao drvinfo.cmd = ETHTOOL_GDRVINFO; 1924e8482187SBing Zhao ifr.ifr_data = (caddr_t)&drvinfo; 1925e8482187SBing Zhao ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1926e8482187SBing Zhao if (ret) { 1927e8482187SBing Zhao DRV_LOG(WARNING, "port %u cannot get the driver info", 1928e8482187SBing Zhao dev->data->port_id); 1929e8482187SBing Zhao goto exit; 1930e8482187SBing Zhao } 1931e8482187SBing Zhao len = *(uint32_t *)((char *)&drvinfo + 1932e8482187SBing Zhao offsetof(struct ethtool_drvinfo, n_priv_flags)); 1933e8482187SBing Zhao } else { 1934e8482187SBing Zhao DRV_LOG(WARNING, "port %u cannot get the sset info", 1935e8482187SBing Zhao dev->data->port_id); 1936e8482187SBing Zhao goto exit; 1937e8482187SBing Zhao } 1938e8482187SBing Zhao if (!len) { 1939e8482187SBing Zhao DRV_LOG(WARNING, "port %u does not have private flag", 1940e8482187SBing Zhao dev->data->port_id); 1941e8482187SBing Zhao rte_errno = EOPNOTSUPP; 1942e8482187SBing Zhao ret = -rte_errno; 1943e8482187SBing Zhao goto exit; 1944e8482187SBing Zhao } else if (len > flag_len) { 1945e8482187SBing Zhao DRV_LOG(WARNING, "port %u maximal private flags number is %d", 1946e8482187SBing Zhao dev->data->port_id, flag_len); 1947e8482187SBing Zhao len = flag_len; 1948e8482187SBing Zhao } 1949e8482187SBing Zhao str_sz = ETH_GSTRING_LEN * len; 1950e8482187SBing Zhao strings = (struct ethtool_gstrings *) 1951e8482187SBing Zhao mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, 1952e8482187SBing Zhao SOCKET_ID_ANY); 1953e8482187SBing Zhao if (!strings) { 1954e8482187SBing Zhao DRV_LOG(WARNING, "port %u unable to allocate memory for" 1955e8482187SBing Zhao " private flags", dev->data->port_id); 1956e8482187SBing Zhao rte_errno = ENOMEM; 1957e8482187SBing Zhao ret = -rte_errno; 1958e8482187SBing Zhao goto exit; 1959e8482187SBing Zhao } 1960e8482187SBing Zhao strings->cmd = ETHTOOL_GSTRINGS; 1961e8482187SBing Zhao strings->string_set = ETH_SS_PRIV_FLAGS; 1962e8482187SBing Zhao strings->len = len; 1963e8482187SBing Zhao ifr.ifr_data = (caddr_t)strings; 1964e8482187SBing Zhao ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1965e8482187SBing Zhao if (ret) { 1966e8482187SBing Zhao DRV_LOG(WARNING, "port %u unable to get private flags strings", 1967e8482187SBing Zhao dev->data->port_id); 1968e8482187SBing Zhao goto exit; 1969e8482187SBing Zhao } 1970e8482187SBing Zhao for (i = 0; i < len; i++) { 1971e8482187SBing Zhao strings->data[(i + 1) * ETH_GSTRING_LEN - 1] = 0; 1972e8482187SBing Zhao if (!strcmp((const char *)strings->data + i * ETH_GSTRING_LEN, 1973e8482187SBing Zhao "dropless_rq")) 1974e8482187SBing Zhao break; 1975e8482187SBing Zhao } 1976e8482187SBing Zhao if (i == len) { 1977e8482187SBing Zhao DRV_LOG(WARNING, "port %u does not support dropless_rq", 1978e8482187SBing Zhao dev->data->port_id); 1979e8482187SBing Zhao rte_errno = EOPNOTSUPP; 1980e8482187SBing Zhao ret = -rte_errno; 1981e8482187SBing Zhao goto exit; 1982e8482187SBing Zhao } 1983e8482187SBing Zhao flags.cmd = ETHTOOL_GPFLAGS; 1984e8482187SBing Zhao ifr.ifr_data = (caddr_t)&flags; 1985e8482187SBing Zhao ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 1986e8482187SBing Zhao if (ret) { 1987e8482187SBing Zhao DRV_LOG(WARNING, "port %u unable to get private flags status", 1988e8482187SBing Zhao dev->data->port_id); 1989e8482187SBing Zhao goto exit; 1990e8482187SBing Zhao } 1991e8482187SBing Zhao ret = !!(flags.data & (1U << i)); 1992e8482187SBing Zhao exit: 1993e8482187SBing Zhao mlx5_free(strings); 1994ccc6ea5dSBing Zhao mlx5_free(sset_info); 1995e8482187SBing Zhao return ret; 1996e8482187SBing Zhao } 19979b31fc90SViacheslav Ovsiienko 19989b31fc90SViacheslav Ovsiienko /** 19999b31fc90SViacheslav Ovsiienko * Unmaps HCA PCI BAR from the current process address space. 20009b31fc90SViacheslav Ovsiienko * 20019b31fc90SViacheslav Ovsiienko * @param dev 20029b31fc90SViacheslav Ovsiienko * Pointer to Ethernet device structure. 20039b31fc90SViacheslav Ovsiienko */ 20049b31fc90SViacheslav Ovsiienko void mlx5_txpp_unmap_hca_bar(struct rte_eth_dev *dev) 20059b31fc90SViacheslav Ovsiienko { 20069b31fc90SViacheslav Ovsiienko struct mlx5_proc_priv *ppriv = dev->process_private; 20079b31fc90SViacheslav Ovsiienko 20089b31fc90SViacheslav Ovsiienko if (ppriv && ppriv->hca_bar) { 20099b31fc90SViacheslav Ovsiienko rte_mem_unmap(ppriv->hca_bar, MLX5_ST_SZ_BYTES(initial_seg)); 20109b31fc90SViacheslav Ovsiienko ppriv->hca_bar = NULL; 20119b31fc90SViacheslav Ovsiienko } 20129b31fc90SViacheslav Ovsiienko } 20139b31fc90SViacheslav Ovsiienko 20149b31fc90SViacheslav Ovsiienko /** 20159b31fc90SViacheslav Ovsiienko * Maps HCA PCI BAR to the current process address space. 20169b31fc90SViacheslav Ovsiienko * Stores pointer in the process private structure allowing 20179b31fc90SViacheslav Ovsiienko * to read internal and real time counter directly from the HW. 20189b31fc90SViacheslav Ovsiienko * 20199b31fc90SViacheslav Ovsiienko * @param dev 20209b31fc90SViacheslav Ovsiienko * Pointer to Ethernet device structure. 20219b31fc90SViacheslav Ovsiienko * 20229b31fc90SViacheslav Ovsiienko * @return 20239b31fc90SViacheslav Ovsiienko * 0 on success and not NULL pointer to mapped area in process structure. 20249b31fc90SViacheslav Ovsiienko * negative otherwise and NULL pointer 20259b31fc90SViacheslav Ovsiienko */ 20269b31fc90SViacheslav Ovsiienko int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev) 20279b31fc90SViacheslav Ovsiienko { 20289b31fc90SViacheslav Ovsiienko struct mlx5_proc_priv *ppriv = dev->process_private; 20299b31fc90SViacheslav Ovsiienko char pci_addr[PCI_PRI_STR_SIZE] = { 0 }; 20309b31fc90SViacheslav Ovsiienko void *base, *expected = NULL; 20319b31fc90SViacheslav Ovsiienko int fd, ret; 20329b31fc90SViacheslav Ovsiienko 20339b31fc90SViacheslav Ovsiienko if (!ppriv) { 20349b31fc90SViacheslav Ovsiienko rte_errno = ENOMEM; 20359b31fc90SViacheslav Ovsiienko return -rte_errno; 20369b31fc90SViacheslav Ovsiienko } 20379b31fc90SViacheslav Ovsiienko if (ppriv->hca_bar) 20389b31fc90SViacheslav Ovsiienko return 0; 20399b31fc90SViacheslav Ovsiienko ret = mlx5_dev_to_pci_str(dev->device, pci_addr, sizeof(pci_addr)); 20409b31fc90SViacheslav Ovsiienko if (ret < 0) 20419b31fc90SViacheslav Ovsiienko return -rte_errno; 20429b31fc90SViacheslav Ovsiienko /* Open PCI device resource 0 - HCA initialize segment */ 20439b31fc90SViacheslav Ovsiienko MKSTR(name, "/sys/bus/pci/devices/%s/resource0", pci_addr); 20449b31fc90SViacheslav Ovsiienko fd = open(name, O_RDWR | O_SYNC); 20459b31fc90SViacheslav Ovsiienko if (fd == -1) { 20469b31fc90SViacheslav Ovsiienko rte_errno = ENOTSUP; 20479b31fc90SViacheslav Ovsiienko return -ENOTSUP; 20489b31fc90SViacheslav Ovsiienko } 20499b31fc90SViacheslav Ovsiienko base = rte_mem_map(NULL, MLX5_ST_SZ_BYTES(initial_seg), 20509b31fc90SViacheslav Ovsiienko RTE_PROT_READ, RTE_MAP_SHARED, fd, 0); 20519b31fc90SViacheslav Ovsiienko close(fd); 20529b31fc90SViacheslav Ovsiienko if (!base) { 20539b31fc90SViacheslav Ovsiienko rte_errno = ENOTSUP; 20549b31fc90SViacheslav Ovsiienko return -ENOTSUP; 20559b31fc90SViacheslav Ovsiienko } 20569b31fc90SViacheslav Ovsiienko /* Check there is no concurrent mapping in other thread. */ 20579b31fc90SViacheslav Ovsiienko if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected, 20589b31fc90SViacheslav Ovsiienko base, false, 20599b31fc90SViacheslav Ovsiienko __ATOMIC_RELAXED, __ATOMIC_RELAXED)) 20609b31fc90SViacheslav Ovsiienko rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg)); 20619b31fc90SViacheslav Ovsiienko return 0; 20629b31fc90SViacheslav Ovsiienko } 20639b31fc90SViacheslav Ovsiienko 2064