11256805dSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 21256805dSOphir Munk * Copyright 2015 6WIND S.A. 31256805dSOphir Munk * Copyright 2015 Mellanox Technologies, Ltd 41256805dSOphir Munk */ 51256805dSOphir Munk 61256805dSOphir Munk #include <stddef.h> 71256805dSOphir Munk #include <inttypes.h> 81256805dSOphir Munk #include <unistd.h> 91256805dSOphir Munk #include <stdbool.h> 101256805dSOphir Munk #include <stdint.h> 111256805dSOphir Munk #include <stdio.h> 121256805dSOphir Munk #include <string.h> 131256805dSOphir Munk #include <stdlib.h> 141256805dSOphir Munk #include <errno.h> 151256805dSOphir Munk #include <dirent.h> 161256805dSOphir Munk #include <net/if.h> 171256805dSOphir Munk #include <sys/ioctl.h> 181256805dSOphir Munk #include <sys/socket.h> 191256805dSOphir Munk #include <netinet/in.h> 201256805dSOphir Munk #include <linux/ethtool.h> 211256805dSOphir Munk #include <linux/sockios.h> 221256805dSOphir Munk #include <fcntl.h> 231256805dSOphir Munk #include <stdalign.h> 241256805dSOphir Munk #include <sys/un.h> 251256805dSOphir Munk #include <time.h> 261256805dSOphir Munk 27df96fd0dSBruce Richardson #include <ethdev_driver.h> 281256805dSOphir Munk #include <rte_bus_pci.h> 291256805dSOphir Munk #include <rte_mbuf.h> 301256805dSOphir Munk #include <rte_common.h> 311256805dSOphir Munk #include <rte_interrupts.h> 321256805dSOphir Munk #include <rte_malloc.h> 331256805dSOphir Munk #include <rte_string_fns.h> 341256805dSOphir Munk #include <rte_rwlock.h> 351256805dSOphir Munk #include <rte_cycles.h> 361256805dSOphir Munk 371256805dSOphir Munk #include <mlx5_glue.h> 381256805dSOphir Munk #include <mlx5_devx_cmds.h> 391256805dSOphir Munk #include <mlx5_common.h> 402175c4dcSSuanming Mou #include <mlx5_malloc.h> 411256805dSOphir Munk 421256805dSOphir Munk #include "mlx5.h" 431256805dSOphir Munk #include "mlx5_rxtx.h" 441256805dSOphir Munk #include "mlx5_utils.h" 451256805dSOphir Munk 461256805dSOphir Munk /* Supported speed values found in /usr/include/linux/ethtool.h */ 471256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseKR4_Full 481256805dSOphir Munk #define SUPPORTED_40000baseKR4_Full (1 << 23) 491256805dSOphir Munk #endif 501256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseCR4_Full 511256805dSOphir Munk #define SUPPORTED_40000baseCR4_Full (1 << 24) 521256805dSOphir Munk #endif 531256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseSR4_Full 541256805dSOphir Munk #define SUPPORTED_40000baseSR4_Full (1 << 25) 551256805dSOphir Munk #endif 561256805dSOphir Munk #ifndef HAVE_SUPPORTED_40000baseLR4_Full 571256805dSOphir Munk #define SUPPORTED_40000baseLR4_Full (1 << 26) 581256805dSOphir Munk #endif 591256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseKR4_Full 601256805dSOphir Munk #define SUPPORTED_56000baseKR4_Full (1 << 27) 611256805dSOphir Munk #endif 621256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseCR4_Full 631256805dSOphir Munk #define SUPPORTED_56000baseCR4_Full (1 << 28) 641256805dSOphir Munk #endif 651256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseSR4_Full 661256805dSOphir Munk #define SUPPORTED_56000baseSR4_Full (1 << 29) 671256805dSOphir Munk #endif 681256805dSOphir Munk #ifndef HAVE_SUPPORTED_56000baseLR4_Full 691256805dSOphir Munk #define SUPPORTED_56000baseLR4_Full (1 << 30) 701256805dSOphir Munk #endif 711256805dSOphir Munk 721256805dSOphir Munk /* Add defines in case the running kernel is not the same as user headers. */ 731256805dSOphir Munk #ifndef ETHTOOL_GLINKSETTINGS 741256805dSOphir Munk struct ethtool_link_settings { 751256805dSOphir Munk uint32_t cmd; 761256805dSOphir Munk uint32_t speed; 771256805dSOphir Munk uint8_t duplex; 781256805dSOphir Munk uint8_t port; 791256805dSOphir Munk uint8_t phy_address; 801256805dSOphir Munk uint8_t autoneg; 811256805dSOphir Munk uint8_t mdio_support; 821256805dSOphir Munk uint8_t eth_to_mdix; 831256805dSOphir Munk uint8_t eth_tp_mdix_ctrl; 841256805dSOphir Munk int8_t link_mode_masks_nwords; 851256805dSOphir Munk uint32_t reserved[8]; 861256805dSOphir Munk uint32_t link_mode_masks[]; 871256805dSOphir Munk }; 881256805dSOphir Munk 891256805dSOphir Munk /* The kernel values can be found in /include/uapi/linux/ethtool.h */ 901256805dSOphir Munk #define ETHTOOL_GLINKSETTINGS 0x0000004c 911256805dSOphir Munk #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 921256805dSOphir Munk #define ETHTOOL_LINK_MODE_Autoneg_BIT 6 931256805dSOphir Munk #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 941256805dSOphir Munk #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 951256805dSOphir Munk #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 961256805dSOphir Munk #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 971256805dSOphir Munk #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 981256805dSOphir Munk #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 991256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 1001256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 1011256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 1021256805dSOphir Munk #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 1031256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 1041256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 1051256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 1061256805dSOphir Munk #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 1071256805dSOphir Munk #endif 1081256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_25G 1091256805dSOphir Munk #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 1101256805dSOphir Munk #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 1111256805dSOphir Munk #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 1121256805dSOphir Munk #endif 1131256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_50G 1141256805dSOphir Munk #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 1151256805dSOphir Munk #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 1161256805dSOphir Munk #endif 1171256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_100G 1181256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 1191256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 1201256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 1211256805dSOphir Munk #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 1221256805dSOphir Munk #endif 1231256805dSOphir Munk #ifndef HAVE_ETHTOOL_LINK_MODE_200G 1241256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62 1251256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63 1261256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 0 /* 64 - 64 */ 1271256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 1 /* 65 - 64 */ 1281256805dSOphir Munk #define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 2 /* 66 - 64 */ 1291256805dSOphir Munk #endif 1301256805dSOphir Munk 1311256805dSOphir Munk 1321256805dSOphir Munk /** 1331256805dSOphir Munk * Get interface name from private structure. 1341256805dSOphir Munk * 135aec086c9SMatan Azrad * This is a port representor-aware version of mlx5_get_ifname_sysfs(). 1361256805dSOphir Munk * 1371256805dSOphir Munk * @param[in] dev 1381256805dSOphir Munk * Pointer to Ethernet device. 1391256805dSOphir Munk * @param[out] ifname 1401256805dSOphir Munk * Interface name output buffer. 1411256805dSOphir Munk * 1421256805dSOphir Munk * @return 1431256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 1441256805dSOphir Munk */ 1451256805dSOphir Munk int 14628743807STal Shnaiderman mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE]) 1471256805dSOphir Munk { 1481256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 1491256805dSOphir Munk unsigned int ifindex; 1501256805dSOphir Munk 1511256805dSOphir Munk MLX5_ASSERT(priv); 1521256805dSOphir Munk MLX5_ASSERT(priv->sh); 153f5f4c482SXueming Li if (priv->master && priv->sh->bond.ifindex > 0) { 154f5f4c482SXueming Li memcpy(ifname, priv->sh->bond.ifname, MLX5_NAMESIZE); 155c21e5facSXueming Li return 0; 156c21e5facSXueming Li } 1571256805dSOphir Munk ifindex = mlx5_ifindex(dev); 1581256805dSOphir Munk if (!ifindex) { 1591256805dSOphir Munk if (!priv->representor) 160aec086c9SMatan Azrad return mlx5_get_ifname_sysfs(priv->sh->ibdev_path, 161aec086c9SMatan Azrad *ifname); 1621256805dSOphir Munk rte_errno = ENXIO; 1631256805dSOphir Munk return -rte_errno; 1641256805dSOphir Munk } 1651256805dSOphir Munk if (if_indextoname(ifindex, &(*ifname)[0])) 1661256805dSOphir Munk return 0; 1671256805dSOphir Munk rte_errno = errno; 1681256805dSOphir Munk return -rte_errno; 1691256805dSOphir Munk } 1701256805dSOphir Munk 1711256805dSOphir Munk /** 172*7ed15acdSXueming Li * Perform ifreq ioctl() on associated netdev ifname. 173*7ed15acdSXueming Li * 174*7ed15acdSXueming Li * @param[in] ifname 175*7ed15acdSXueming Li * Pointer to netdev name. 176*7ed15acdSXueming Li * @param req 177*7ed15acdSXueming Li * Request number to pass to ioctl(). 178*7ed15acdSXueming Li * @param[out] ifr 179*7ed15acdSXueming Li * Interface request structure output buffer. 180*7ed15acdSXueming Li * 181*7ed15acdSXueming Li * @return 182*7ed15acdSXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 183*7ed15acdSXueming Li */ 184*7ed15acdSXueming Li static int 185*7ed15acdSXueming Li mlx5_ifreq_by_ifname(const char *ifname, int req, struct ifreq *ifr) 186*7ed15acdSXueming Li { 187*7ed15acdSXueming Li int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); 188*7ed15acdSXueming Li int ret = 0; 189*7ed15acdSXueming Li 190*7ed15acdSXueming Li if (sock == -1) { 191*7ed15acdSXueming Li rte_errno = errno; 192*7ed15acdSXueming Li return -rte_errno; 193*7ed15acdSXueming Li } 194*7ed15acdSXueming Li rte_strscpy(ifr->ifr_name, ifname, sizeof(ifr->ifr_name)); 195*7ed15acdSXueming Li ret = ioctl(sock, req, ifr); 196*7ed15acdSXueming Li if (ret == -1) { 197*7ed15acdSXueming Li rte_errno = errno; 198*7ed15acdSXueming Li goto error; 199*7ed15acdSXueming Li } 200*7ed15acdSXueming Li close(sock); 201*7ed15acdSXueming Li return 0; 202*7ed15acdSXueming Li error: 203*7ed15acdSXueming Li close(sock); 204*7ed15acdSXueming Li return -rte_errno; 205*7ed15acdSXueming Li } 206*7ed15acdSXueming Li 207*7ed15acdSXueming Li /** 2081256805dSOphir Munk * Perform ifreq ioctl() on associated Ethernet device. 2091256805dSOphir Munk * 2101256805dSOphir Munk * @param[in] dev 2111256805dSOphir Munk * Pointer to Ethernet device. 2121256805dSOphir Munk * @param req 2131256805dSOphir Munk * Request number to pass to ioctl(). 2141256805dSOphir Munk * @param[out] ifr 2151256805dSOphir Munk * Interface request structure output buffer. 2161256805dSOphir Munk * 2171256805dSOphir Munk * @return 2181256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2191256805dSOphir Munk */ 22098c4b12aSOphir Munk static int 2211256805dSOphir Munk mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr) 2221256805dSOphir Munk { 223*7ed15acdSXueming Li char ifname[sizeof(ifr->ifr_name)]; 224*7ed15acdSXueming Li int ret; 2251256805dSOphir Munk 226*7ed15acdSXueming Li ret = mlx5_get_ifname(dev, &ifname); 2271256805dSOphir Munk if (ret) 2281256805dSOphir Munk return -rte_errno; 229*7ed15acdSXueming Li return mlx5_ifreq_by_ifname(ifname, req, ifr); 2301256805dSOphir Munk } 2311256805dSOphir Munk 2321256805dSOphir Munk /** 2331256805dSOphir Munk * Get device MTU. 2341256805dSOphir Munk * 2351256805dSOphir Munk * @param dev 2361256805dSOphir Munk * Pointer to Ethernet device. 2371256805dSOphir Munk * @param[out] mtu 2381256805dSOphir Munk * MTU value output buffer. 2391256805dSOphir Munk * 2401256805dSOphir Munk * @return 2411256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2421256805dSOphir Munk */ 2431256805dSOphir Munk int 2441256805dSOphir Munk mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu) 2451256805dSOphir Munk { 2461256805dSOphir Munk struct ifreq request; 2471256805dSOphir Munk int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request); 2481256805dSOphir Munk 2491256805dSOphir Munk if (ret) 2501256805dSOphir Munk return ret; 2511256805dSOphir Munk *mtu = request.ifr_mtu; 2521256805dSOphir Munk return 0; 2531256805dSOphir Munk } 2541256805dSOphir Munk 2551256805dSOphir Munk /** 2561256805dSOphir Munk * Set device MTU. 2571256805dSOphir Munk * 2581256805dSOphir Munk * @param dev 2591256805dSOphir Munk * Pointer to Ethernet device. 2601256805dSOphir Munk * @param mtu 2611256805dSOphir Munk * MTU value to set. 2621256805dSOphir Munk * 2631256805dSOphir Munk * @return 2641256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2651256805dSOphir Munk */ 2661256805dSOphir Munk int 2671256805dSOphir Munk mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2681256805dSOphir Munk { 2691256805dSOphir Munk struct ifreq request = { .ifr_mtu = mtu, }; 2701256805dSOphir Munk 2711256805dSOphir Munk return mlx5_ifreq(dev, SIOCSIFMTU, &request); 2721256805dSOphir Munk } 2731256805dSOphir Munk 2741256805dSOphir Munk /** 2751256805dSOphir Munk * Set device flags. 2761256805dSOphir Munk * 2771256805dSOphir Munk * @param dev 2781256805dSOphir Munk * Pointer to Ethernet device. 2791256805dSOphir Munk * @param keep 2801256805dSOphir Munk * Bitmask for flags that must remain untouched. 2811256805dSOphir Munk * @param flags 2821256805dSOphir Munk * Bitmask for flags to modify. 2831256805dSOphir Munk * 2841256805dSOphir Munk * @return 2851256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 2861256805dSOphir Munk */ 28798c4b12aSOphir Munk static int 2881256805dSOphir Munk mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags) 2891256805dSOphir Munk { 2901256805dSOphir Munk struct ifreq request; 2911256805dSOphir Munk int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request); 2921256805dSOphir Munk 2931256805dSOphir Munk if (ret) 2941256805dSOphir Munk return ret; 2951256805dSOphir Munk request.ifr_flags &= keep; 2961256805dSOphir Munk request.ifr_flags |= flags & ~keep; 2971256805dSOphir Munk return mlx5_ifreq(dev, SIOCSIFFLAGS, &request); 2981256805dSOphir Munk } 2991256805dSOphir Munk 3001256805dSOphir Munk /** 3011256805dSOphir Munk * Get device current raw clock counter 3021256805dSOphir Munk * 3031256805dSOphir Munk * @param dev 3041256805dSOphir Munk * Pointer to Ethernet device structure. 3051256805dSOphir Munk * @param[out] time 3061256805dSOphir Munk * Current raw clock counter of the device. 3071256805dSOphir Munk * 3081256805dSOphir Munk * @return 3091256805dSOphir Munk * 0 if the clock has correctly been read 3101256805dSOphir Munk * The value of errno in case of error 3111256805dSOphir Munk */ 3121256805dSOphir Munk int 3131256805dSOphir Munk mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock) 3141256805dSOphir Munk { 3151256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 3161256805dSOphir Munk struct ibv_context *ctx = priv->sh->ctx; 3171256805dSOphir Munk struct ibv_values_ex values; 3181256805dSOphir Munk int err = 0; 3191256805dSOphir Munk 3201256805dSOphir Munk values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK; 3211256805dSOphir Munk err = mlx5_glue->query_rt_values_ex(ctx, &values); 3221256805dSOphir Munk if (err != 0) { 3231256805dSOphir Munk DRV_LOG(WARNING, "Could not query the clock !"); 3241256805dSOphir Munk return err; 3251256805dSOphir Munk } 3261256805dSOphir Munk *clock = values.raw_clock.tv_nsec; 3271256805dSOphir Munk return 0; 3281256805dSOphir Munk } 3291256805dSOphir Munk 3301256805dSOphir Munk /** 3311256805dSOphir Munk * Retrieve the master device for representor in the same switch domain. 3321256805dSOphir Munk * 3331256805dSOphir Munk * @param dev 3341256805dSOphir Munk * Pointer to representor Ethernet device structure. 3351256805dSOphir Munk * 3361256805dSOphir Munk * @return 3371256805dSOphir Munk * Master device structure on success, NULL otherwise. 3381256805dSOphir Munk */ 3391256805dSOphir Munk static struct rte_eth_dev * 3401256805dSOphir Munk mlx5_find_master_dev(struct rte_eth_dev *dev) 3411256805dSOphir Munk { 3421256805dSOphir Munk struct mlx5_priv *priv; 3431256805dSOphir Munk uint16_t port_id; 3441256805dSOphir Munk uint16_t domain_id; 3451256805dSOphir Munk 3461256805dSOphir Munk priv = dev->data->dev_private; 3471256805dSOphir Munk domain_id = priv->domain_id; 3481256805dSOphir Munk MLX5_ASSERT(priv->representor); 3491256805dSOphir Munk MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 3501256805dSOphir Munk struct mlx5_priv *opriv = 3511256805dSOphir Munk rte_eth_devices[port_id].data->dev_private; 3521256805dSOphir Munk if (opriv && 3531256805dSOphir Munk opriv->master && 3541256805dSOphir Munk opriv->domain_id == domain_id && 3551256805dSOphir Munk opriv->sh == priv->sh) 3561256805dSOphir Munk return &rte_eth_devices[port_id]; 3571256805dSOphir Munk } 3581256805dSOphir Munk return NULL; 3591256805dSOphir Munk } 3601256805dSOphir Munk 3611256805dSOphir Munk /** 3621256805dSOphir Munk * DPDK callback to retrieve physical link information. 3631256805dSOphir Munk * 3641256805dSOphir Munk * @param dev 3651256805dSOphir Munk * Pointer to Ethernet device structure. 3661256805dSOphir Munk * @param[out] link 3671256805dSOphir Munk * Storage for current link status. 3681256805dSOphir Munk * 3691256805dSOphir Munk * @return 3701256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 3711256805dSOphir Munk */ 3721256805dSOphir Munk static int 3731256805dSOphir Munk mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, 3741256805dSOphir Munk struct rte_eth_link *link) 3751256805dSOphir Munk { 3761256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 3771256805dSOphir Munk struct ethtool_cmd edata = { 3781256805dSOphir Munk .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ 3791256805dSOphir Munk }; 3801256805dSOphir Munk struct ifreq ifr; 3811256805dSOphir Munk struct rte_eth_link dev_link; 3821256805dSOphir Munk int link_speed = 0; 3831256805dSOphir Munk int ret; 3841256805dSOphir Munk 3851256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 3861256805dSOphir Munk if (ret) { 3871256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 3881256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 3891256805dSOphir Munk return ret; 3901256805dSOphir Munk } 3911256805dSOphir Munk dev_link = (struct rte_eth_link) { 3921256805dSOphir Munk .link_status = ((ifr.ifr_flags & IFF_UP) && 3931256805dSOphir Munk (ifr.ifr_flags & IFF_RUNNING)), 3941256805dSOphir Munk }; 3951256805dSOphir Munk ifr = (struct ifreq) { 3961256805dSOphir Munk .ifr_data = (void *)&edata, 3971256805dSOphir Munk }; 3981256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 3991256805dSOphir Munk if (ret) { 4001256805dSOphir Munk if (ret == -ENOTSUP && priv->representor) { 4011256805dSOphir Munk struct rte_eth_dev *master; 4021256805dSOphir Munk 4031256805dSOphir Munk /* 4041256805dSOphir Munk * For representors we can try to inherit link 4051256805dSOphir Munk * settings from the master device. Actually 4061256805dSOphir Munk * link settings do not make a lot of sense 4071256805dSOphir Munk * for representors due to missing physical 4081256805dSOphir Munk * link. The old kernel drivers supported 4091256805dSOphir Munk * emulated settings query for representors, 4101256805dSOphir Munk * the new ones do not, so we have to add 4111256805dSOphir Munk * this code for compatibility issues. 4121256805dSOphir Munk */ 4131256805dSOphir Munk master = mlx5_find_master_dev(dev); 4141256805dSOphir Munk if (master) { 4151256805dSOphir Munk ifr = (struct ifreq) { 4161256805dSOphir Munk .ifr_data = (void *)&edata, 4171256805dSOphir Munk }; 4181256805dSOphir Munk ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 4191256805dSOphir Munk } 4201256805dSOphir Munk } 4211256805dSOphir Munk if (ret) { 4221256805dSOphir Munk DRV_LOG(WARNING, 4231256805dSOphir Munk "port %u ioctl(SIOCETHTOOL," 4241256805dSOphir Munk " ETHTOOL_GSET) failed: %s", 4251256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 4261256805dSOphir Munk return ret; 4271256805dSOphir Munk } 4281256805dSOphir Munk } 4291256805dSOphir Munk link_speed = ethtool_cmd_speed(&edata); 4301256805dSOphir Munk if (link_speed == -1) 4311688c580SBenoît Ganne dev_link.link_speed = ETH_SPEED_NUM_UNKNOWN; 4321256805dSOphir Munk else 4331256805dSOphir Munk dev_link.link_speed = link_speed; 4341256805dSOphir Munk priv->link_speed_capa = 0; 4351256805dSOphir Munk if (edata.supported & SUPPORTED_Autoneg) 4361256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 4371256805dSOphir Munk if (edata.supported & (SUPPORTED_1000baseT_Full | 4381256805dSOphir Munk SUPPORTED_1000baseKX_Full)) 4391256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_1G; 4401256805dSOphir Munk if (edata.supported & SUPPORTED_10000baseKR_Full) 4411256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_10G; 4421256805dSOphir Munk if (edata.supported & (SUPPORTED_40000baseKR4_Full | 4431256805dSOphir Munk SUPPORTED_40000baseCR4_Full | 4441256805dSOphir Munk SUPPORTED_40000baseSR4_Full | 4451256805dSOphir Munk SUPPORTED_40000baseLR4_Full)) 4461256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_40G; 4471256805dSOphir Munk dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? 4481256805dSOphir Munk ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 4491256805dSOphir Munk dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4501256805dSOphir Munk ETH_LINK_SPEED_FIXED); 4511256805dSOphir Munk *link = dev_link; 4521256805dSOphir Munk return 0; 4531256805dSOphir Munk } 4541256805dSOphir Munk 4551256805dSOphir Munk /** 4561256805dSOphir Munk * Retrieve physical link information (unlocked version using new ioctl). 4571256805dSOphir Munk * 4581256805dSOphir Munk * @param dev 4591256805dSOphir Munk * Pointer to Ethernet device structure. 4601256805dSOphir Munk * @param[out] link 4611256805dSOphir Munk * Storage for current link status. 4621256805dSOphir Munk * 4631256805dSOphir Munk * @return 4641256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 4651256805dSOphir Munk */ 4661256805dSOphir Munk static int 4671256805dSOphir Munk mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, 4681256805dSOphir Munk struct rte_eth_link *link) 4691256805dSOphir Munk 4701256805dSOphir Munk { 4711256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 4721256805dSOphir Munk struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; 4731256805dSOphir Munk struct ifreq ifr; 4741256805dSOphir Munk struct rte_eth_link dev_link; 4751256805dSOphir Munk struct rte_eth_dev *master = NULL; 4761256805dSOphir Munk uint64_t sc; 4771256805dSOphir Munk int ret; 4781256805dSOphir Munk 4791256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr); 4801256805dSOphir Munk if (ret) { 4811256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s", 4821256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 4831256805dSOphir Munk return ret; 4841256805dSOphir Munk } 4851256805dSOphir Munk dev_link = (struct rte_eth_link) { 4861256805dSOphir Munk .link_status = ((ifr.ifr_flags & IFF_UP) && 4871256805dSOphir Munk (ifr.ifr_flags & IFF_RUNNING)), 4881256805dSOphir Munk }; 4891256805dSOphir Munk ifr = (struct ifreq) { 4901256805dSOphir Munk .ifr_data = (void *)&gcmd, 4911256805dSOphir Munk }; 4921256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 4931256805dSOphir Munk if (ret) { 4941256805dSOphir Munk if (ret == -ENOTSUP && priv->representor) { 4951256805dSOphir Munk /* 4961256805dSOphir Munk * For representors we can try to inherit link 4971256805dSOphir Munk * settings from the master device. Actually 4981256805dSOphir Munk * link settings do not make a lot of sense 4991256805dSOphir Munk * for representors due to missing physical 5001256805dSOphir Munk * link. The old kernel drivers supported 5011256805dSOphir Munk * emulated settings query for representors, 5021256805dSOphir Munk * the new ones do not, so we have to add 5031256805dSOphir Munk * this code for compatibility issues. 5041256805dSOphir Munk */ 5051256805dSOphir Munk master = mlx5_find_master_dev(dev); 5061256805dSOphir Munk if (master) { 5071256805dSOphir Munk ifr = (struct ifreq) { 5081256805dSOphir Munk .ifr_data = (void *)&gcmd, 5091256805dSOphir Munk }; 5101256805dSOphir Munk ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr); 5111256805dSOphir Munk } 5121256805dSOphir Munk } 5131256805dSOphir Munk if (ret) { 5141256805dSOphir Munk DRV_LOG(DEBUG, 5151256805dSOphir Munk "port %u ioctl(SIOCETHTOOL," 5161256805dSOphir Munk " ETHTOOL_GLINKSETTINGS) failed: %s", 5171256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 5181256805dSOphir Munk return ret; 5191256805dSOphir Munk } 5201256805dSOphir Munk } 5211256805dSOphir Munk gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords; 5221256805dSOphir Munk 5231256805dSOphir Munk alignas(struct ethtool_link_settings) 5241256805dSOphir Munk uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) + 5251256805dSOphir Munk sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3]; 5261256805dSOphir Munk struct ethtool_link_settings *ecmd = (void *)data; 5271256805dSOphir Munk 5281256805dSOphir Munk *ecmd = gcmd; 5291256805dSOphir Munk ifr.ifr_data = (void *)ecmd; 5301256805dSOphir Munk ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr); 5311256805dSOphir Munk if (ret) { 5321256805dSOphir Munk DRV_LOG(DEBUG, 5331256805dSOphir Munk "port %u ioctl(SIOCETHTOOL," 5341256805dSOphir Munk "ETHTOOL_GLINKSETTINGS) failed: %s", 5351256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 5361256805dSOphir Munk return ret; 5371256805dSOphir Munk } 5381688c580SBenoît Ganne dev_link.link_speed = (ecmd->speed == UINT32_MAX) ? 5391688c580SBenoît Ganne ETH_SPEED_NUM_UNKNOWN : ecmd->speed; 5401256805dSOphir Munk sc = ecmd->link_mode_masks[0] | 5411256805dSOphir Munk ((uint64_t)ecmd->link_mode_masks[1] << 32); 5421256805dSOphir Munk priv->link_speed_capa = 0; 5431256805dSOphir Munk if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT)) 5441256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG; 5451256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) | 5461256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))) 5471256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_1G; 5481256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) | 5491256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) | 5501256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))) 5511256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_10G; 5521256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) | 5531256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))) 5541256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_20G; 5551256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) | 5561256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) | 5571256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) | 5581256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))) 5591256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_40G; 5601256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) | 5611256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) | 5621256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) | 5631256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))) 5641256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_56G; 5651256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) | 5661256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) | 5671256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))) 5681256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_25G; 5691256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) | 5701256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))) 5711256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_50G; 5721256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) | 5731256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) | 5741256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) | 5751256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))) 5761256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_100G; 5771256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) | 5781256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT))) 5791256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_200G; 5801256805dSOphir Munk 5811256805dSOphir Munk sc = ecmd->link_mode_masks[2] | 5821256805dSOphir Munk ((uint64_t)ecmd->link_mode_masks[3] << 32); 5831256805dSOphir Munk if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT) | 5841256805dSOphir Munk MLX5_BITSHIFT 5851256805dSOphir Munk (ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT) | 5861256805dSOphir Munk MLX5_BITSHIFT(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT))) 5871256805dSOphir Munk priv->link_speed_capa |= ETH_LINK_SPEED_200G; 5881256805dSOphir Munk dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ? 5891256805dSOphir Munk ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); 5901256805dSOphir Munk dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & 5911256805dSOphir Munk ETH_LINK_SPEED_FIXED); 5921256805dSOphir Munk *link = dev_link; 5931256805dSOphir Munk return 0; 5941256805dSOphir Munk } 5951256805dSOphir Munk 5961256805dSOphir Munk /** 5971256805dSOphir Munk * DPDK callback to retrieve physical link information. 5981256805dSOphir Munk * 5991256805dSOphir Munk * @param dev 6001256805dSOphir Munk * Pointer to Ethernet device structure. 6011256805dSOphir Munk * @param wait_to_complete 6021256805dSOphir Munk * Wait for request completion. 6031256805dSOphir Munk * 6041256805dSOphir Munk * @return 6051256805dSOphir Munk * 0 if link status was not updated, positive if it was, a negative errno 6061256805dSOphir Munk * value otherwise and rte_errno is set. 6071256805dSOphir Munk */ 6081256805dSOphir Munk int 6091256805dSOphir Munk mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) 6101256805dSOphir Munk { 6111256805dSOphir Munk int ret; 6121256805dSOphir Munk struct rte_eth_link dev_link; 6131256805dSOphir Munk time_t start_time = time(NULL); 6141256805dSOphir Munk int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT; 6151256805dSOphir Munk 6161256805dSOphir Munk do { 6171256805dSOphir Munk ret = mlx5_link_update_unlocked_gs(dev, &dev_link); 6181256805dSOphir Munk if (ret == -ENOTSUP) 6191256805dSOphir Munk ret = mlx5_link_update_unlocked_gset(dev, &dev_link); 6201256805dSOphir Munk if (ret == 0) 6211256805dSOphir Munk break; 6221256805dSOphir Munk /* Handle wait to complete situation. */ 6231256805dSOphir Munk if ((wait_to_complete || retry) && ret == -EAGAIN) { 6241256805dSOphir Munk if (abs((int)difftime(time(NULL), start_time)) < 6251256805dSOphir Munk MLX5_LINK_STATUS_TIMEOUT) { 6261256805dSOphir Munk usleep(0); 6271256805dSOphir Munk continue; 6281256805dSOphir Munk } else { 6291256805dSOphir Munk rte_errno = EBUSY; 6301256805dSOphir Munk return -rte_errno; 6311256805dSOphir Munk } 6321256805dSOphir Munk } else if (ret < 0) { 6331256805dSOphir Munk return ret; 6341256805dSOphir Munk } 6351256805dSOphir Munk } while (wait_to_complete || retry-- > 0); 6361256805dSOphir Munk ret = !!memcmp(&dev->data->dev_link, &dev_link, 6371256805dSOphir Munk sizeof(struct rte_eth_link)); 6381256805dSOphir Munk dev->data->dev_link = dev_link; 6391256805dSOphir Munk return ret; 6401256805dSOphir Munk } 6411256805dSOphir Munk 6421256805dSOphir Munk /** 6431256805dSOphir Munk * DPDK callback to get flow control status. 6441256805dSOphir Munk * 6451256805dSOphir Munk * @param dev 6461256805dSOphir Munk * Pointer to Ethernet device structure. 6471256805dSOphir Munk * @param[out] fc_conf 6481256805dSOphir Munk * Flow control output buffer. 6491256805dSOphir Munk * 6501256805dSOphir Munk * @return 6511256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 6521256805dSOphir Munk */ 6531256805dSOphir Munk int 6541256805dSOphir Munk mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6551256805dSOphir Munk { 6561256805dSOphir Munk struct ifreq ifr; 6571256805dSOphir Munk struct ethtool_pauseparam ethpause = { 6581256805dSOphir Munk .cmd = ETHTOOL_GPAUSEPARAM 6591256805dSOphir Munk }; 6601256805dSOphir Munk int ret; 6611256805dSOphir Munk 6621256805dSOphir Munk ifr.ifr_data = (void *)ðpause; 6631256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 6641256805dSOphir Munk if (ret) { 6651256805dSOphir Munk DRV_LOG(WARNING, 6661256805dSOphir Munk "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" 6671256805dSOphir Munk " %s", 6681256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 6691256805dSOphir Munk return ret; 6701256805dSOphir Munk } 6711256805dSOphir Munk fc_conf->autoneg = ethpause.autoneg; 6721256805dSOphir Munk if (ethpause.rx_pause && ethpause.tx_pause) 6731256805dSOphir Munk fc_conf->mode = RTE_FC_FULL; 6741256805dSOphir Munk else if (ethpause.rx_pause) 6751256805dSOphir Munk fc_conf->mode = RTE_FC_RX_PAUSE; 6761256805dSOphir Munk else if (ethpause.tx_pause) 6771256805dSOphir Munk fc_conf->mode = RTE_FC_TX_PAUSE; 6781256805dSOphir Munk else 6791256805dSOphir Munk fc_conf->mode = RTE_FC_NONE; 6801256805dSOphir Munk return 0; 6811256805dSOphir Munk } 6821256805dSOphir Munk 6831256805dSOphir Munk /** 6841256805dSOphir Munk * DPDK callback to modify flow control parameters. 6851256805dSOphir Munk * 6861256805dSOphir Munk * @param dev 6871256805dSOphir Munk * Pointer to Ethernet device structure. 6881256805dSOphir Munk * @param[in] fc_conf 6891256805dSOphir Munk * Flow control parameters. 6901256805dSOphir Munk * 6911256805dSOphir Munk * @return 6921256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 6931256805dSOphir Munk */ 6941256805dSOphir Munk int 6951256805dSOphir Munk mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 6961256805dSOphir Munk { 6971256805dSOphir Munk struct ifreq ifr; 6981256805dSOphir Munk struct ethtool_pauseparam ethpause = { 6991256805dSOphir Munk .cmd = ETHTOOL_SPAUSEPARAM 7001256805dSOphir Munk }; 7011256805dSOphir Munk int ret; 7021256805dSOphir Munk 7031256805dSOphir Munk ifr.ifr_data = (void *)ðpause; 7041256805dSOphir Munk ethpause.autoneg = fc_conf->autoneg; 7051256805dSOphir Munk if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 7061256805dSOphir Munk (fc_conf->mode & RTE_FC_RX_PAUSE)) 7071256805dSOphir Munk ethpause.rx_pause = 1; 7081256805dSOphir Munk else 7091256805dSOphir Munk ethpause.rx_pause = 0; 7101256805dSOphir Munk 7111256805dSOphir Munk if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || 7121256805dSOphir Munk (fc_conf->mode & RTE_FC_TX_PAUSE)) 7131256805dSOphir Munk ethpause.tx_pause = 1; 7141256805dSOphir Munk else 7151256805dSOphir Munk ethpause.tx_pause = 0; 7161256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 7171256805dSOphir Munk if (ret) { 7181256805dSOphir Munk DRV_LOG(WARNING, 7191256805dSOphir Munk "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)" 7201256805dSOphir Munk " failed: %s", 7211256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 7221256805dSOphir Munk return ret; 7231256805dSOphir Munk } 7241256805dSOphir Munk return 0; 7251256805dSOphir Munk } 7261256805dSOphir Munk 7271256805dSOphir Munk /** 7281256805dSOphir Munk * Handle asynchronous removal event for entire multiport device. 7291256805dSOphir Munk * 7301256805dSOphir Munk * @param sh 7311256805dSOphir Munk * Infiniband device shared context. 7321256805dSOphir Munk */ 7331256805dSOphir Munk static void 7341256805dSOphir Munk mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh) 7351256805dSOphir Munk { 7361256805dSOphir Munk uint32_t i; 7371256805dSOphir Munk 7381256805dSOphir Munk for (i = 0; i < sh->max_port; ++i) { 7391256805dSOphir Munk struct rte_eth_dev *dev; 7401256805dSOphir Munk 7411256805dSOphir Munk if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) { 7421256805dSOphir Munk /* 7431256805dSOphir Munk * Or not existing port either no 7441256805dSOphir Munk * handler installed for this port. 7451256805dSOphir Munk */ 7461256805dSOphir Munk continue; 7471256805dSOphir Munk } 7481256805dSOphir Munk dev = &rte_eth_devices[sh->port[i].ih_port_id]; 7491256805dSOphir Munk MLX5_ASSERT(dev); 7501256805dSOphir Munk if (dev->data->dev_conf.intr_conf.rmv) 7515723fbedSFerruh Yigit rte_eth_dev_callback_process 7521256805dSOphir Munk (dev, RTE_ETH_EVENT_INTR_RMV, NULL); 7531256805dSOphir Munk } 7541256805dSOphir Munk } 7551256805dSOphir Munk 7561256805dSOphir Munk /** 7571256805dSOphir Munk * Handle shared asynchronous events the NIC (removal event 7581256805dSOphir Munk * and link status change). Supports multiport IB device. 7591256805dSOphir Munk * 7601256805dSOphir Munk * @param cb_arg 7611256805dSOphir Munk * Callback argument. 7621256805dSOphir Munk */ 7631256805dSOphir Munk void 7641256805dSOphir Munk mlx5_dev_interrupt_handler(void *cb_arg) 7651256805dSOphir Munk { 7661256805dSOphir Munk struct mlx5_dev_ctx_shared *sh = cb_arg; 7671256805dSOphir Munk struct ibv_async_event event; 7681256805dSOphir Munk 7691256805dSOphir Munk /* Read all message from the IB device and acknowledge them. */ 7701256805dSOphir Munk for (;;) { 7711256805dSOphir Munk struct rte_eth_dev *dev; 7721256805dSOphir Munk uint32_t tmp; 7731256805dSOphir Munk 7741256805dSOphir Munk if (mlx5_glue->get_async_event(sh->ctx, &event)) 7751256805dSOphir Munk break; 7761256805dSOphir Munk /* Retrieve and check IB port index. */ 7771256805dSOphir Munk tmp = (uint32_t)event.element.port_num; 7781256805dSOphir Munk if (!tmp && event.event_type == IBV_EVENT_DEVICE_FATAL) { 7791256805dSOphir Munk /* 7801256805dSOphir Munk * The DEVICE_FATAL event is called once for 7811256805dSOphir Munk * entire device without port specifying. 7821256805dSOphir Munk * We should notify all existing ports. 7831256805dSOphir Munk */ 7841256805dSOphir Munk mlx5_glue->ack_async_event(&event); 7851256805dSOphir Munk mlx5_dev_interrupt_device_fatal(sh); 7861256805dSOphir Munk continue; 7871256805dSOphir Munk } 7881256805dSOphir Munk MLX5_ASSERT(tmp && (tmp <= sh->max_port)); 7891256805dSOphir Munk if (!tmp) { 7901256805dSOphir Munk /* Unsupported device level event. */ 7911256805dSOphir Munk mlx5_glue->ack_async_event(&event); 7921256805dSOphir Munk DRV_LOG(DEBUG, 7931256805dSOphir Munk "unsupported common event (type %d)", 7941256805dSOphir Munk event.event_type); 7951256805dSOphir Munk continue; 7961256805dSOphir Munk } 7971256805dSOphir Munk if (tmp > sh->max_port) { 7981256805dSOphir Munk /* Invalid IB port index. */ 7991256805dSOphir Munk mlx5_glue->ack_async_event(&event); 8001256805dSOphir Munk DRV_LOG(DEBUG, 8011256805dSOphir Munk "cannot handle an event (type %d)" 8021256805dSOphir Munk "due to invalid IB port index (%u)", 8031256805dSOphir Munk event.event_type, tmp); 8041256805dSOphir Munk continue; 8051256805dSOphir Munk } 8061256805dSOphir Munk if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) { 8071256805dSOphir Munk /* No handler installed. */ 8081256805dSOphir Munk mlx5_glue->ack_async_event(&event); 8091256805dSOphir Munk DRV_LOG(DEBUG, 8101256805dSOphir Munk "cannot handle an event (type %d)" 8111256805dSOphir Munk "due to no handler installed for port %u", 8121256805dSOphir Munk event.event_type, tmp); 8131256805dSOphir Munk continue; 8141256805dSOphir Munk } 8151256805dSOphir Munk /* Retrieve ethernet device descriptor. */ 8161256805dSOphir Munk tmp = sh->port[tmp - 1].ih_port_id; 8171256805dSOphir Munk dev = &rte_eth_devices[tmp]; 8181256805dSOphir Munk MLX5_ASSERT(dev); 8191256805dSOphir Munk if ((event.event_type == IBV_EVENT_PORT_ACTIVE || 8201256805dSOphir Munk event.event_type == IBV_EVENT_PORT_ERR) && 8211256805dSOphir Munk dev->data->dev_conf.intr_conf.lsc) { 8221256805dSOphir Munk mlx5_glue->ack_async_event(&event); 8231256805dSOphir Munk if (mlx5_link_update(dev, 0) == -EAGAIN) { 8241256805dSOphir Munk usleep(0); 8251256805dSOphir Munk continue; 8261256805dSOphir Munk } 8275723fbedSFerruh Yigit rte_eth_dev_callback_process 8281256805dSOphir Munk (dev, RTE_ETH_EVENT_INTR_LSC, NULL); 8291256805dSOphir Munk continue; 8301256805dSOphir Munk } 8311256805dSOphir Munk DRV_LOG(DEBUG, 8321256805dSOphir Munk "port %u cannot handle an unknown event (type %d)", 8331256805dSOphir Munk dev->data->port_id, event.event_type); 8341256805dSOphir Munk mlx5_glue->ack_async_event(&event); 8351256805dSOphir Munk } 8361256805dSOphir Munk } 8371256805dSOphir Munk 8381256805dSOphir Munk /* 8391256805dSOphir Munk * Unregister callback handler safely. The handler may be active 8401256805dSOphir Munk * while we are trying to unregister it, in this case code -EAGAIN 8411256805dSOphir Munk * is returned by rte_intr_callback_unregister(). This routine checks 8421256805dSOphir Munk * the return code and tries to unregister handler again. 8431256805dSOphir Munk * 8441256805dSOphir Munk * @param handle 8451256805dSOphir Munk * interrupt handle 8461256805dSOphir Munk * @param cb_fn 8471256805dSOphir Munk * pointer to callback routine 8481256805dSOphir Munk * @cb_arg 8491256805dSOphir Munk * opaque callback parameter 8501256805dSOphir Munk */ 8511256805dSOphir Munk void 8521256805dSOphir Munk mlx5_intr_callback_unregister(const struct rte_intr_handle *handle, 8531256805dSOphir Munk rte_intr_callback_fn cb_fn, void *cb_arg) 8541256805dSOphir Munk { 8551256805dSOphir Munk /* 8561256805dSOphir Munk * Try to reduce timeout management overhead by not calling 8571256805dSOphir Munk * the timer related routines on the first iteration. If the 8581256805dSOphir Munk * unregistering succeeds on first call there will be no 8591256805dSOphir Munk * timer calls at all. 8601256805dSOphir Munk */ 8611256805dSOphir Munk uint64_t twait = 0; 8621256805dSOphir Munk uint64_t start = 0; 8631256805dSOphir Munk 8641256805dSOphir Munk do { 8651256805dSOphir Munk int ret; 8661256805dSOphir Munk 8671256805dSOphir Munk ret = rte_intr_callback_unregister(handle, cb_fn, cb_arg); 8681256805dSOphir Munk if (ret >= 0) 8691256805dSOphir Munk return; 8701256805dSOphir Munk if (ret != -EAGAIN) { 8711256805dSOphir Munk DRV_LOG(INFO, "failed to unregister interrupt" 8721256805dSOphir Munk " handler (error: %d)", ret); 8731256805dSOphir Munk MLX5_ASSERT(false); 8741256805dSOphir Munk return; 8751256805dSOphir Munk } 8761256805dSOphir Munk if (twait) { 8771256805dSOphir Munk struct timespec onems; 8781256805dSOphir Munk 8791256805dSOphir Munk /* Wait one millisecond and try again. */ 8801256805dSOphir Munk onems.tv_sec = 0; 8811256805dSOphir Munk onems.tv_nsec = NS_PER_S / MS_PER_S; 8821256805dSOphir Munk nanosleep(&onems, 0); 8831256805dSOphir Munk /* Check whether one second elapsed. */ 8841256805dSOphir Munk if ((rte_get_timer_cycles() - start) <= twait) 8851256805dSOphir Munk continue; 8861256805dSOphir Munk } else { 8871256805dSOphir Munk /* 8881256805dSOphir Munk * We get the amount of timer ticks for one second. 8891256805dSOphir Munk * If this amount elapsed it means we spent one 8901256805dSOphir Munk * second in waiting. This branch is executed once 8911256805dSOphir Munk * on first iteration. 8921256805dSOphir Munk */ 8931256805dSOphir Munk twait = rte_get_timer_hz(); 8941256805dSOphir Munk MLX5_ASSERT(twait); 8951256805dSOphir Munk } 8961256805dSOphir Munk /* 8971256805dSOphir Munk * Timeout elapsed, show message (once a second) and retry. 8981256805dSOphir Munk * We have no other acceptable option here, if we ignore 8991256805dSOphir Munk * the unregistering return code the handler will not 9001256805dSOphir Munk * be unregistered, fd will be closed and we may get the 9011256805dSOphir Munk * crush. Hanging and messaging in the loop seems not to be 9021256805dSOphir Munk * the worst choice. 9031256805dSOphir Munk */ 9041256805dSOphir Munk DRV_LOG(INFO, "Retrying to unregister interrupt handler"); 9051256805dSOphir Munk start = rte_get_timer_cycles(); 9061256805dSOphir Munk } while (true); 9071256805dSOphir Munk } 9081256805dSOphir Munk 9091256805dSOphir Munk /** 9101256805dSOphir Munk * Handle DEVX interrupts from the NIC. 9111256805dSOphir Munk * This function is probably called from the DPDK host thread. 9121256805dSOphir Munk * 9131256805dSOphir Munk * @param cb_arg 9141256805dSOphir Munk * Callback argument. 9151256805dSOphir Munk */ 9161256805dSOphir Munk void 9171256805dSOphir Munk mlx5_dev_interrupt_handler_devx(void *cb_arg) 9181256805dSOphir Munk { 9191256805dSOphir Munk #ifndef HAVE_IBV_DEVX_ASYNC 9201256805dSOphir Munk (void)cb_arg; 9211256805dSOphir Munk return; 9221256805dSOphir Munk #else 9231256805dSOphir Munk struct mlx5_dev_ctx_shared *sh = cb_arg; 9241256805dSOphir Munk union { 9251256805dSOphir Munk struct mlx5dv_devx_async_cmd_hdr cmd_resp; 9261256805dSOphir Munk uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) + 9271256805dSOphir Munk MLX5_ST_SZ_BYTES(traffic_counter) + 9281256805dSOphir Munk sizeof(struct mlx5dv_devx_async_cmd_hdr)]; 9291256805dSOphir Munk } out; 9301256805dSOphir Munk uint8_t *buf = out.buf + sizeof(out.cmd_resp); 9311256805dSOphir Munk 9321256805dSOphir Munk while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp, 9331256805dSOphir Munk &out.cmd_resp, 9341256805dSOphir Munk sizeof(out.buf))) 9351256805dSOphir Munk mlx5_flow_async_pool_query_handle 9361256805dSOphir Munk (sh, (uint64_t)out.cmd_resp.wr_id, 9371256805dSOphir Munk mlx5_devx_get_out_command_status(buf)); 9381256805dSOphir Munk #endif /* HAVE_IBV_DEVX_ASYNC */ 9391256805dSOphir Munk } 9401256805dSOphir Munk 9411256805dSOphir Munk /** 9421256805dSOphir Munk * DPDK callback to bring the link DOWN. 9431256805dSOphir Munk * 9441256805dSOphir Munk * @param dev 9451256805dSOphir Munk * Pointer to Ethernet device structure. 9461256805dSOphir Munk * 9471256805dSOphir Munk * @return 9481256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 9491256805dSOphir Munk */ 9501256805dSOphir Munk int 9511256805dSOphir Munk mlx5_set_link_down(struct rte_eth_dev *dev) 9521256805dSOphir Munk { 9531256805dSOphir Munk return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP); 9541256805dSOphir Munk } 9551256805dSOphir Munk 9561256805dSOphir Munk /** 9571256805dSOphir Munk * DPDK callback to bring the link UP. 9581256805dSOphir Munk * 9591256805dSOphir Munk * @param dev 9601256805dSOphir Munk * Pointer to Ethernet device structure. 9611256805dSOphir Munk * 9621256805dSOphir Munk * @return 9631256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 9641256805dSOphir Munk */ 9651256805dSOphir Munk int 9661256805dSOphir Munk mlx5_set_link_up(struct rte_eth_dev *dev) 9671256805dSOphir Munk { 9681256805dSOphir Munk return mlx5_set_flags(dev, ~IFF_UP, IFF_UP); 9691256805dSOphir Munk } 9701256805dSOphir Munk 9711256805dSOphir Munk /** 9721256805dSOphir Munk * Check if mlx5 device was removed. 9731256805dSOphir Munk * 9741256805dSOphir Munk * @param dev 9751256805dSOphir Munk * Pointer to Ethernet device structure. 9761256805dSOphir Munk * 9771256805dSOphir Munk * @return 9781256805dSOphir Munk * 1 when device is removed, otherwise 0. 9791256805dSOphir Munk */ 9801256805dSOphir Munk int 9811256805dSOphir Munk mlx5_is_removed(struct rte_eth_dev *dev) 9821256805dSOphir Munk { 9831256805dSOphir Munk struct ibv_device_attr device_attr; 9841256805dSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 9851256805dSOphir Munk 9861256805dSOphir Munk if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO) 9871256805dSOphir Munk return 1; 9881256805dSOphir Munk return 0; 9891256805dSOphir Munk } 9901256805dSOphir Munk 9911256805dSOphir Munk /** 992ef9ee13fSOphir Munk * Analyze gathered port parameters via sysfs to recognize master 993ef9ee13fSOphir Munk * and representor devices for E-Switch configuration. 994ef9ee13fSOphir Munk * 995ef9ee13fSOphir Munk * @param[in] device_dir 996ef9ee13fSOphir Munk * flag of presence of "device" directory under port device key. 997ef9ee13fSOphir Munk * @param[inout] switch_info 998ef9ee13fSOphir Munk * Port information, including port name as a number and port name 999ef9ee13fSOphir Munk * type if recognized 1000ef9ee13fSOphir Munk * 1001ef9ee13fSOphir Munk * @return 1002ef9ee13fSOphir Munk * master and representor flags are set in switch_info according to 1003ef9ee13fSOphir Munk * recognized parameters (if any). 1004ef9ee13fSOphir Munk */ 1005ef9ee13fSOphir Munk static void 1006ef9ee13fSOphir Munk mlx5_sysfs_check_switch_info(bool device_dir, 1007ef9ee13fSOphir Munk struct mlx5_switch_info *switch_info) 1008ef9ee13fSOphir Munk { 1009ef9ee13fSOphir Munk switch (switch_info->name_type) { 1010ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN: 1011ef9ee13fSOphir Munk /* 1012ef9ee13fSOphir Munk * Name is not recognized, assume the master, 1013ef9ee13fSOphir Munk * check the device directory presence. 1014ef9ee13fSOphir Munk */ 1015ef9ee13fSOphir Munk switch_info->master = device_dir; 1016ef9ee13fSOphir Munk break; 1017ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_NOTSET: 1018ef9ee13fSOphir Munk /* 1019ef9ee13fSOphir Munk * Name is not set, this assumes the legacy naming 1020ef9ee13fSOphir Munk * schema for master, just check if there is 1021ef9ee13fSOphir Munk * a device directory. 1022ef9ee13fSOphir Munk */ 1023ef9ee13fSOphir Munk switch_info->master = device_dir; 1024ef9ee13fSOphir Munk break; 1025ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_UPLINK: 1026ef9ee13fSOphir Munk /* New uplink naming schema recognized. */ 1027ef9ee13fSOphir Munk switch_info->master = 1; 1028ef9ee13fSOphir Munk break; 1029ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_LEGACY: 1030ef9ee13fSOphir Munk /* Legacy representors naming schema. */ 1031ef9ee13fSOphir Munk switch_info->representor = !device_dir; 1032ef9ee13fSOphir Munk break; 1033ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: 1034ef9ee13fSOphir Munk /* Fallthrough */ 1035ef9ee13fSOphir Munk case MLX5_PHYS_PORT_NAME_TYPE_PFVF: 1036cb95feefSXueming Li /* Fallthrough */ 1037cb95feefSXueming Li case MLX5_PHYS_PORT_NAME_TYPE_PFSF: 1038ef9ee13fSOphir Munk /* New representors naming schema. */ 1039ef9ee13fSOphir Munk switch_info->representor = 1; 1040ef9ee13fSOphir Munk break; 104159df97f1SXueming Li default: 104259df97f1SXueming Li switch_info->master = device_dir; 104359df97f1SXueming Li break; 1044ef9ee13fSOphir Munk } 1045ef9ee13fSOphir Munk } 1046ef9ee13fSOphir Munk 1047ef9ee13fSOphir Munk /** 10481256805dSOphir Munk * Get switch information associated with network interface. 10491256805dSOphir Munk * 10501256805dSOphir Munk * @param ifindex 10511256805dSOphir Munk * Network interface index. 10521256805dSOphir Munk * @param[out] info 10531256805dSOphir Munk * Switch information object, populated in case of success. 10541256805dSOphir Munk * 10551256805dSOphir Munk * @return 10561256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 10571256805dSOphir Munk */ 10581256805dSOphir Munk int 10591256805dSOphir Munk mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info) 10601256805dSOphir Munk { 10611256805dSOphir Munk char ifname[IF_NAMESIZE]; 10621256805dSOphir Munk char port_name[IF_NAMESIZE]; 10631256805dSOphir Munk FILE *file; 10641256805dSOphir Munk struct mlx5_switch_info data = { 10651256805dSOphir Munk .master = 0, 10661256805dSOphir Munk .representor = 0, 10671256805dSOphir Munk .name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET, 10681256805dSOphir Munk .port_name = 0, 10691256805dSOphir Munk .switch_id = 0, 10701256805dSOphir Munk }; 10711256805dSOphir Munk DIR *dir; 10721256805dSOphir Munk bool port_switch_id_set = false; 10731256805dSOphir Munk bool device_dir = false; 10741256805dSOphir Munk char c; 10751256805dSOphir Munk int ret; 10761256805dSOphir Munk 10771256805dSOphir Munk if (!if_indextoname(ifindex, ifname)) { 10781256805dSOphir Munk rte_errno = errno; 10791256805dSOphir Munk return -rte_errno; 10801256805dSOphir Munk } 10811256805dSOphir Munk 10821256805dSOphir Munk MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name", 10831256805dSOphir Munk ifname); 10841256805dSOphir Munk MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id", 10851256805dSOphir Munk ifname); 10861256805dSOphir Munk MKSTR(pci_device, "/sys/class/net/%s/device", 10871256805dSOphir Munk ifname); 10881256805dSOphir Munk 10891256805dSOphir Munk file = fopen(phys_port_name, "rb"); 10901256805dSOphir Munk if (file != NULL) { 109163d1db71SMichael Baum ret = fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", port_name); 10921256805dSOphir Munk fclose(file); 10931256805dSOphir Munk if (ret == 1) 10941256805dSOphir Munk mlx5_translate_port_name(port_name, &data); 10951256805dSOphir Munk } 10961256805dSOphir Munk file = fopen(phys_switch_id, "rb"); 10971256805dSOphir Munk if (file == NULL) { 10981256805dSOphir Munk rte_errno = errno; 10991256805dSOphir Munk return -rte_errno; 11001256805dSOphir Munk } 11011256805dSOphir Munk port_switch_id_set = 11021256805dSOphir Munk fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 && 11031256805dSOphir Munk c == '\n'; 11041256805dSOphir Munk fclose(file); 11051256805dSOphir Munk dir = opendir(pci_device); 11061256805dSOphir Munk if (dir != NULL) { 11071256805dSOphir Munk closedir(dir); 11081256805dSOphir Munk device_dir = true; 11091256805dSOphir Munk } 11101256805dSOphir Munk if (port_switch_id_set) { 11111256805dSOphir Munk /* We have some E-Switch configuration. */ 11121256805dSOphir Munk mlx5_sysfs_check_switch_info(device_dir, &data); 11131256805dSOphir Munk } 11141256805dSOphir Munk *info = data; 11151256805dSOphir Munk MLX5_ASSERT(!(data.master && data.representor)); 11161256805dSOphir Munk if (data.master && data.representor) { 11171256805dSOphir Munk DRV_LOG(ERR, "ifindex %u device is recognized as master" 11181256805dSOphir Munk " and as representor", ifindex); 11191256805dSOphir Munk rte_errno = ENODEV; 11201256805dSOphir Munk return -rte_errno; 11211256805dSOphir Munk } 11221256805dSOphir Munk return 0; 11231256805dSOphir Munk } 11241256805dSOphir Munk 11251256805dSOphir Munk /** 1126c21e5facSXueming Li * Get bond information associated with network interface. 1127c21e5facSXueming Li * 1128c21e5facSXueming Li * @param pf_ifindex 1129c21e5facSXueming Li * Network interface index of bond slave interface 1130c21e5facSXueming Li * @param[out] ifindex 1131c21e5facSXueming Li * Pointer to bond ifindex. 1132c21e5facSXueming Li * @param[out] ifname 1133c21e5facSXueming Li * Pointer to bond ifname. 1134c21e5facSXueming Li * 1135c21e5facSXueming Li * @return 1136c21e5facSXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 1137c21e5facSXueming Li */ 1138c21e5facSXueming Li int 1139c21e5facSXueming Li mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex, 1140c21e5facSXueming Li char *ifname) 1141c21e5facSXueming Li { 1142c21e5facSXueming Li char name[IF_NAMESIZE]; 1143c21e5facSXueming Li FILE *file; 1144c21e5facSXueming Li unsigned int index; 1145c21e5facSXueming Li int ret; 1146c21e5facSXueming Li 1147c21e5facSXueming Li if (!if_indextoname(pf_ifindex, name) || !strlen(name)) { 1148c21e5facSXueming Li rte_errno = errno; 1149c21e5facSXueming Li return -rte_errno; 1150c21e5facSXueming Li } 1151c21e5facSXueming Li MKSTR(bond_if, "/sys/class/net/%s/master/ifindex", name); 1152c21e5facSXueming Li /* read bond ifindex */ 1153c21e5facSXueming Li file = fopen(bond_if, "rb"); 1154c21e5facSXueming Li if (file == NULL) { 1155c21e5facSXueming Li rte_errno = errno; 1156c21e5facSXueming Li return -rte_errno; 1157c21e5facSXueming Li } 1158c21e5facSXueming Li ret = fscanf(file, "%u", &index); 1159c21e5facSXueming Li fclose(file); 1160c21e5facSXueming Li if (ret <= 0) { 1161c21e5facSXueming Li rte_errno = errno; 1162c21e5facSXueming Li return -rte_errno; 1163c21e5facSXueming Li } 1164c21e5facSXueming Li if (ifindex) 1165c21e5facSXueming Li *ifindex = index; 1166c21e5facSXueming Li 1167c21e5facSXueming Li /* read bond device name from symbol link */ 1168c21e5facSXueming Li if (ifname) { 1169c21e5facSXueming Li if (!if_indextoname(index, ifname)) { 1170c21e5facSXueming Li rte_errno = errno; 1171c21e5facSXueming Li return -rte_errno; 1172c21e5facSXueming Li } 1173c21e5facSXueming Li } 1174c21e5facSXueming Li return 0; 1175c21e5facSXueming Li } 1176c21e5facSXueming Li 1177c21e5facSXueming Li /** 11781256805dSOphir Munk * DPDK callback to retrieve plug-in module EEPROM information (type and size). 11791256805dSOphir Munk * 11801256805dSOphir Munk * @param dev 11811256805dSOphir Munk * Pointer to Ethernet device structure. 11821256805dSOphir Munk * @param[out] modinfo 11831256805dSOphir Munk * Storage for plug-in module EEPROM information. 11841256805dSOphir Munk * 11851256805dSOphir Munk * @return 11861256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 11871256805dSOphir Munk */ 11881256805dSOphir Munk int 11891256805dSOphir Munk mlx5_get_module_info(struct rte_eth_dev *dev, 11901256805dSOphir Munk struct rte_eth_dev_module_info *modinfo) 11911256805dSOphir Munk { 11921256805dSOphir Munk struct ethtool_modinfo info = { 11931256805dSOphir Munk .cmd = ETHTOOL_GMODULEINFO, 11941256805dSOphir Munk }; 11951256805dSOphir Munk struct ifreq ifr = (struct ifreq) { 11961256805dSOphir Munk .ifr_data = (void *)&info, 11971256805dSOphir Munk }; 11981256805dSOphir Munk int ret = 0; 11991256805dSOphir Munk 12001256805dSOphir Munk if (!dev || !modinfo) { 12011256805dSOphir Munk DRV_LOG(WARNING, "missing argument, cannot get module info"); 12021256805dSOphir Munk rte_errno = EINVAL; 12031256805dSOphir Munk return -rte_errno; 12041256805dSOphir Munk } 12051256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 12061256805dSOphir Munk if (ret) { 12071256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", 12081256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 12091256805dSOphir Munk return ret; 12101256805dSOphir Munk } 12111256805dSOphir Munk modinfo->type = info.type; 12121256805dSOphir Munk modinfo->eeprom_len = info.eeprom_len; 12131256805dSOphir Munk return ret; 12141256805dSOphir Munk } 12151256805dSOphir Munk 12161256805dSOphir Munk /** 12171256805dSOphir Munk * DPDK callback to retrieve plug-in module EEPROM data. 12181256805dSOphir Munk * 12191256805dSOphir Munk * @param dev 12201256805dSOphir Munk * Pointer to Ethernet device structure. 12211256805dSOphir Munk * @param[out] info 12221256805dSOphir Munk * Storage for plug-in module EEPROM data. 12231256805dSOphir Munk * 12241256805dSOphir Munk * @return 12251256805dSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 12261256805dSOphir Munk */ 12271256805dSOphir Munk int mlx5_get_module_eeprom(struct rte_eth_dev *dev, 12281256805dSOphir Munk struct rte_dev_eeprom_info *info) 12291256805dSOphir Munk { 12301256805dSOphir Munk struct ethtool_eeprom *eeprom; 12311256805dSOphir Munk struct ifreq ifr; 12321256805dSOphir Munk int ret = 0; 12331256805dSOphir Munk 12341256805dSOphir Munk if (!dev || !info) { 12351256805dSOphir Munk DRV_LOG(WARNING, "missing argument, cannot get module eeprom"); 12361256805dSOphir Munk rte_errno = EINVAL; 12371256805dSOphir Munk return -rte_errno; 12381256805dSOphir Munk } 12392175c4dcSSuanming Mou eeprom = mlx5_malloc(MLX5_MEM_ZERO, 12402175c4dcSSuanming Mou (sizeof(struct ethtool_eeprom) + info->length), 0, 12412175c4dcSSuanming Mou SOCKET_ID_ANY); 12421256805dSOphir Munk if (!eeprom) { 12431256805dSOphir Munk DRV_LOG(WARNING, "port %u cannot allocate memory for " 12441256805dSOphir Munk "eeprom data", dev->data->port_id); 12451256805dSOphir Munk rte_errno = ENOMEM; 12461256805dSOphir Munk return -rte_errno; 12471256805dSOphir Munk } 12481256805dSOphir Munk eeprom->cmd = ETHTOOL_GMODULEEEPROM; 12491256805dSOphir Munk eeprom->offset = info->offset; 12501256805dSOphir Munk eeprom->len = info->length; 12511256805dSOphir Munk ifr = (struct ifreq) { 12521256805dSOphir Munk .ifr_data = (void *)eeprom, 12531256805dSOphir Munk }; 12541256805dSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 12551256805dSOphir Munk if (ret) 12561256805dSOphir Munk DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s", 12571256805dSOphir Munk dev->data->port_id, strerror(rte_errno)); 12581256805dSOphir Munk else 12591256805dSOphir Munk rte_memcpy(info->data, eeprom->data, info->length); 12602175c4dcSSuanming Mou mlx5_free(eeprom); 12611256805dSOphir Munk return ret; 12621256805dSOphir Munk } 126398c4b12aSOphir Munk 126498c4b12aSOphir Munk /** 126598c4b12aSOphir Munk * Read device counters table. 126698c4b12aSOphir Munk * 126798c4b12aSOphir Munk * @param dev 126898c4b12aSOphir Munk * Pointer to Ethernet device. 1269*7ed15acdSXueming Li * @param[in] pf 1270*7ed15acdSXueming Li * PF index in case of bonding device, -1 otherwise 127198c4b12aSOphir Munk * @param[out] stats 127298c4b12aSOphir Munk * Counters table output buffer. 127398c4b12aSOphir Munk * 127498c4b12aSOphir Munk * @return 127598c4b12aSOphir Munk * 0 on success and stats is filled, negative errno value otherwise and 127698c4b12aSOphir Munk * rte_errno is set. 127798c4b12aSOphir Munk */ 1278*7ed15acdSXueming Li static int 1279*7ed15acdSXueming Li _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) 128098c4b12aSOphir Munk { 128198c4b12aSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 128298c4b12aSOphir Munk struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 128398c4b12aSOphir Munk unsigned int i; 128498c4b12aSOphir Munk struct ifreq ifr; 128598c4b12aSOphir Munk unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); 128698c4b12aSOphir Munk unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; 128798c4b12aSOphir Munk struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; 128898c4b12aSOphir Munk int ret; 128998c4b12aSOphir Munk 129098c4b12aSOphir Munk et_stats->cmd = ETHTOOL_GSTATS; 129198c4b12aSOphir Munk et_stats->n_stats = xstats_ctrl->stats_n; 129298c4b12aSOphir Munk ifr.ifr_data = (caddr_t)et_stats; 1293*7ed15acdSXueming Li if (pf >= 0) 1294*7ed15acdSXueming Li ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, 1295*7ed15acdSXueming Li SIOCETHTOOL, &ifr); 1296*7ed15acdSXueming Li else 129798c4b12aSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 129898c4b12aSOphir Munk if (ret) { 129998c4b12aSOphir Munk DRV_LOG(WARNING, 130098c4b12aSOphir Munk "port %u unable to read statistic values from device", 130198c4b12aSOphir Munk dev->data->port_id); 130298c4b12aSOphir Munk return ret; 130398c4b12aSOphir Munk } 130498c4b12aSOphir Munk for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { 1305*7ed15acdSXueming Li if (xstats_ctrl->info[i].dev) 1306*7ed15acdSXueming Li continue; 1307*7ed15acdSXueming Li stats[i] += (uint64_t) 1308*7ed15acdSXueming Li et_stats->data[xstats_ctrl->dev_table_idx[i]]; 1309*7ed15acdSXueming Li } 1310*7ed15acdSXueming Li return 0; 1311*7ed15acdSXueming Li } 1312*7ed15acdSXueming Li 1313*7ed15acdSXueming Li /** 1314*7ed15acdSXueming Li * Read device counters. 1315*7ed15acdSXueming Li * 1316*7ed15acdSXueming Li * @param dev 1317*7ed15acdSXueming Li * Pointer to Ethernet device. 1318*7ed15acdSXueming Li * @param[out] stats 1319*7ed15acdSXueming Li * Counters table output buffer. 1320*7ed15acdSXueming Li * 1321*7ed15acdSXueming Li * @return 1322*7ed15acdSXueming Li * 0 on success and stats is filled, negative errno value otherwise and 1323*7ed15acdSXueming Li * rte_errno is set. 1324*7ed15acdSXueming Li */ 1325*7ed15acdSXueming Li int 1326*7ed15acdSXueming Li mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) 1327*7ed15acdSXueming Li { 1328*7ed15acdSXueming Li struct mlx5_priv *priv = dev->data->dev_private; 1329*7ed15acdSXueming Li struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 1330*7ed15acdSXueming Li int ret = 0, i; 1331*7ed15acdSXueming Li 1332*7ed15acdSXueming Li memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); 1333*7ed15acdSXueming Li /* Read ifreq counters. */ 1334*7ed15acdSXueming Li if (priv->master && priv->pf_bond >= 0) { 1335*7ed15acdSXueming Li /* Sum xstats from bonding device member ports. */ 1336*7ed15acdSXueming Li for (i = 0; i < priv->sh->bond.n_port; i++) { 1337*7ed15acdSXueming Li ret = _mlx5_os_read_dev_counters(dev, i, stats); 1338*7ed15acdSXueming Li if (ret) 1339*7ed15acdSXueming Li return ret; 1340*7ed15acdSXueming Li } 1341*7ed15acdSXueming Li } else { 1342*7ed15acdSXueming Li ret = _mlx5_os_read_dev_counters(dev, -1, stats); 1343*7ed15acdSXueming Li } 1344*7ed15acdSXueming Li /* Read IB counters. */ 1345*7ed15acdSXueming Li for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { 1346*7ed15acdSXueming Li if (!xstats_ctrl->info[i].dev) 1347*7ed15acdSXueming Li continue; 1348*7ed15acdSXueming Li ret = mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, 134998c4b12aSOphir Munk &stats[i]); 135098c4b12aSOphir Munk /* return last xstats counter if fail to read. */ 1351*7ed15acdSXueming Li if (ret != 0) 135298c4b12aSOphir Munk xstats_ctrl->xstats[i] = stats[i]; 135398c4b12aSOphir Munk else 135498c4b12aSOphir Munk stats[i] = xstats_ctrl->xstats[i]; 135598c4b12aSOphir Munk } 1356*7ed15acdSXueming Li return ret; 135798c4b12aSOphir Munk } 135898c4b12aSOphir Munk 135998c4b12aSOphir Munk /** 136098c4b12aSOphir Munk * Query the number of statistics provided by ETHTOOL. 136198c4b12aSOphir Munk * 136298c4b12aSOphir Munk * @param dev 136398c4b12aSOphir Munk * Pointer to Ethernet device. 136498c4b12aSOphir Munk * 136598c4b12aSOphir Munk * @return 136698c4b12aSOphir Munk * Number of statistics on success, negative errno value otherwise and 136798c4b12aSOphir Munk * rte_errno is set. 136898c4b12aSOphir Munk */ 136998c4b12aSOphir Munk int 137098c4b12aSOphir Munk mlx5_os_get_stats_n(struct rte_eth_dev *dev) 137198c4b12aSOphir Munk { 1372*7ed15acdSXueming Li struct mlx5_priv *priv = dev->data->dev_private; 137398c4b12aSOphir Munk struct ethtool_drvinfo drvinfo; 137498c4b12aSOphir Munk struct ifreq ifr; 137598c4b12aSOphir Munk int ret; 137698c4b12aSOphir Munk 137798c4b12aSOphir Munk drvinfo.cmd = ETHTOOL_GDRVINFO; 137898c4b12aSOphir Munk ifr.ifr_data = (caddr_t)&drvinfo; 1379*7ed15acdSXueming Li if (priv->master && priv->pf_bond >= 0) 1380*7ed15acdSXueming Li /* Bonding PF. */ 1381*7ed15acdSXueming Li ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, 1382*7ed15acdSXueming Li SIOCETHTOOL, &ifr); 1383*7ed15acdSXueming Li else 138498c4b12aSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 138598c4b12aSOphir Munk if (ret) { 138698c4b12aSOphir Munk DRV_LOG(WARNING, "port %u unable to query number of statistics", 138798c4b12aSOphir Munk dev->data->port_id); 138898c4b12aSOphir Munk return ret; 138998c4b12aSOphir Munk } 139098c4b12aSOphir Munk return drvinfo.n_stats; 139198c4b12aSOphir Munk } 139298c4b12aSOphir Munk 139398c4b12aSOphir Munk static const struct mlx5_counter_ctrl mlx5_counters_init[] = { 139498c4b12aSOphir Munk { 13951101809bSViacheslav Ovsiienko .dpdk_name = "rx_unicast_bytes", 139698c4b12aSOphir Munk .ctr_name = "rx_vport_unicast_bytes", 139798c4b12aSOphir Munk }, 139898c4b12aSOphir Munk { 13991101809bSViacheslav Ovsiienko .dpdk_name = "rx_multicast_bytes", 140098c4b12aSOphir Munk .ctr_name = "rx_vport_multicast_bytes", 140198c4b12aSOphir Munk }, 140298c4b12aSOphir Munk { 14031101809bSViacheslav Ovsiienko .dpdk_name = "rx_broadcast_bytes", 140498c4b12aSOphir Munk .ctr_name = "rx_vport_broadcast_bytes", 140598c4b12aSOphir Munk }, 140698c4b12aSOphir Munk { 14071101809bSViacheslav Ovsiienko .dpdk_name = "rx_unicast_packets", 140898c4b12aSOphir Munk .ctr_name = "rx_vport_unicast_packets", 140998c4b12aSOphir Munk }, 141098c4b12aSOphir Munk { 14111101809bSViacheslav Ovsiienko .dpdk_name = "rx_multicast_packets", 141298c4b12aSOphir Munk .ctr_name = "rx_vport_multicast_packets", 141398c4b12aSOphir Munk }, 141498c4b12aSOphir Munk { 14151101809bSViacheslav Ovsiienko .dpdk_name = "rx_broadcast_packets", 141698c4b12aSOphir Munk .ctr_name = "rx_vport_broadcast_packets", 141798c4b12aSOphir Munk }, 141898c4b12aSOphir Munk { 14191101809bSViacheslav Ovsiienko .dpdk_name = "tx_unicast_bytes", 142098c4b12aSOphir Munk .ctr_name = "tx_vport_unicast_bytes", 142198c4b12aSOphir Munk }, 142298c4b12aSOphir Munk { 14231101809bSViacheslav Ovsiienko .dpdk_name = "tx_multicast_bytes", 142498c4b12aSOphir Munk .ctr_name = "tx_vport_multicast_bytes", 142598c4b12aSOphir Munk }, 142698c4b12aSOphir Munk { 14271101809bSViacheslav Ovsiienko .dpdk_name = "tx_broadcast_bytes", 142898c4b12aSOphir Munk .ctr_name = "tx_vport_broadcast_bytes", 142998c4b12aSOphir Munk }, 143098c4b12aSOphir Munk { 14311101809bSViacheslav Ovsiienko .dpdk_name = "tx_unicast_packets", 143298c4b12aSOphir Munk .ctr_name = "tx_vport_unicast_packets", 143398c4b12aSOphir Munk }, 143498c4b12aSOphir Munk { 14351101809bSViacheslav Ovsiienko .dpdk_name = "tx_multicast_packets", 143698c4b12aSOphir Munk .ctr_name = "tx_vport_multicast_packets", 143798c4b12aSOphir Munk }, 143898c4b12aSOphir Munk { 14391101809bSViacheslav Ovsiienko .dpdk_name = "tx_broadcast_packets", 144098c4b12aSOphir Munk .ctr_name = "tx_vport_broadcast_packets", 144198c4b12aSOphir Munk }, 144298c4b12aSOphir Munk { 14431101809bSViacheslav Ovsiienko .dpdk_name = "rx_wqe_errors", 144498c4b12aSOphir Munk .ctr_name = "rx_wqe_err", 144598c4b12aSOphir Munk }, 144698c4b12aSOphir Munk { 14471101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_crc_errors", 144898c4b12aSOphir Munk .ctr_name = "rx_crc_errors_phy", 144998c4b12aSOphir Munk }, 145098c4b12aSOphir Munk { 14511101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_in_range_len_errors", 145298c4b12aSOphir Munk .ctr_name = "rx_in_range_len_errors_phy", 145398c4b12aSOphir Munk }, 145498c4b12aSOphir Munk { 14551101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_symbol_errors", 145698c4b12aSOphir Munk .ctr_name = "rx_symbol_err_phy", 145798c4b12aSOphir Munk }, 145898c4b12aSOphir Munk { 14591101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_errors", 146098c4b12aSOphir Munk .ctr_name = "tx_errors_phy", 146198c4b12aSOphir Munk }, 146298c4b12aSOphir Munk { 146398c4b12aSOphir Munk .dpdk_name = "rx_out_of_buffer", 146498c4b12aSOphir Munk .ctr_name = "out_of_buffer", 146598c4b12aSOphir Munk .dev = 1, 146698c4b12aSOphir Munk }, 146798c4b12aSOphir Munk { 14681101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_packets", 146998c4b12aSOphir Munk .ctr_name = "tx_packets_phy", 147098c4b12aSOphir Munk }, 147198c4b12aSOphir Munk { 14721101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_packets", 147398c4b12aSOphir Munk .ctr_name = "rx_packets_phy", 147498c4b12aSOphir Munk }, 147598c4b12aSOphir Munk { 14761101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_discard_packets", 147798c4b12aSOphir Munk .ctr_name = "tx_discards_phy", 147898c4b12aSOphir Munk }, 147998c4b12aSOphir Munk { 14801101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_discard_packets", 148198c4b12aSOphir Munk .ctr_name = "rx_discards_phy", 148298c4b12aSOphir Munk }, 148398c4b12aSOphir Munk { 14841101809bSViacheslav Ovsiienko .dpdk_name = "tx_phy_bytes", 148598c4b12aSOphir Munk .ctr_name = "tx_bytes_phy", 148698c4b12aSOphir Munk }, 148798c4b12aSOphir Munk { 14881101809bSViacheslav Ovsiienko .dpdk_name = "rx_phy_bytes", 148998c4b12aSOphir Munk .ctr_name = "rx_bytes_phy", 149098c4b12aSOphir Munk }, 149198c4b12aSOphir Munk /* Representor only */ 149298c4b12aSOphir Munk { 14931101809bSViacheslav Ovsiienko .dpdk_name = "rx_vport_packets", 149498c4b12aSOphir Munk .ctr_name = "vport_rx_packets", 149598c4b12aSOphir Munk }, 149698c4b12aSOphir Munk { 14971101809bSViacheslav Ovsiienko .dpdk_name = "rx_vport_bytes", 149898c4b12aSOphir Munk .ctr_name = "vport_rx_bytes", 149998c4b12aSOphir Munk }, 150098c4b12aSOphir Munk { 15011101809bSViacheslav Ovsiienko .dpdk_name = "tx_vport_packets", 150298c4b12aSOphir Munk .ctr_name = "vport_tx_packets", 150398c4b12aSOphir Munk }, 150498c4b12aSOphir Munk { 15051101809bSViacheslav Ovsiienko .dpdk_name = "tx_vport_bytes", 150698c4b12aSOphir Munk .ctr_name = "vport_tx_bytes", 150798c4b12aSOphir Munk }, 150898c4b12aSOphir Munk }; 150998c4b12aSOphir Munk 151098c4b12aSOphir Munk static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); 151198c4b12aSOphir Munk 151298c4b12aSOphir Munk /** 151398c4b12aSOphir Munk * Init the structures to read device counters. 151498c4b12aSOphir Munk * 151598c4b12aSOphir Munk * @param dev 151698c4b12aSOphir Munk * Pointer to Ethernet device. 151798c4b12aSOphir Munk */ 151898c4b12aSOphir Munk void 151998c4b12aSOphir Munk mlx5_os_stats_init(struct rte_eth_dev *dev) 152098c4b12aSOphir Munk { 152198c4b12aSOphir Munk struct mlx5_priv *priv = dev->data->dev_private; 152298c4b12aSOphir Munk struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; 152398c4b12aSOphir Munk struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; 152498c4b12aSOphir Munk unsigned int i; 152598c4b12aSOphir Munk unsigned int j; 152698c4b12aSOphir Munk struct ifreq ifr; 152798c4b12aSOphir Munk struct ethtool_gstrings *strings = NULL; 152898c4b12aSOphir Munk unsigned int dev_stats_n; 152998c4b12aSOphir Munk unsigned int str_sz; 153098c4b12aSOphir Munk int ret; 153198c4b12aSOphir Munk 153298c4b12aSOphir Munk /* So that it won't aggregate for each init. */ 153398c4b12aSOphir Munk xstats_ctrl->mlx5_stats_n = 0; 153498c4b12aSOphir Munk ret = mlx5_os_get_stats_n(dev); 153598c4b12aSOphir Munk if (ret < 0) { 153698c4b12aSOphir Munk DRV_LOG(WARNING, "port %u no extended statistics available", 153798c4b12aSOphir Munk dev->data->port_id); 153898c4b12aSOphir Munk return; 153998c4b12aSOphir Munk } 154098c4b12aSOphir Munk dev_stats_n = ret; 154198c4b12aSOphir Munk /* Allocate memory to grab stat names and values. */ 154298c4b12aSOphir Munk str_sz = dev_stats_n * ETH_GSTRING_LEN; 154398c4b12aSOphir Munk strings = (struct ethtool_gstrings *) 154498c4b12aSOphir Munk mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, 154598c4b12aSOphir Munk SOCKET_ID_ANY); 154698c4b12aSOphir Munk if (!strings) { 154798c4b12aSOphir Munk DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", 154898c4b12aSOphir Munk dev->data->port_id); 154998c4b12aSOphir Munk return; 155098c4b12aSOphir Munk } 155198c4b12aSOphir Munk strings->cmd = ETHTOOL_GSTRINGS; 155298c4b12aSOphir Munk strings->string_set = ETH_SS_STATS; 155398c4b12aSOphir Munk strings->len = dev_stats_n; 155498c4b12aSOphir Munk ifr.ifr_data = (caddr_t)strings; 1555*7ed15acdSXueming Li if (priv->master && priv->pf_bond >= 0) 1556*7ed15acdSXueming Li /* Bonding master. */ 1557*7ed15acdSXueming Li ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, 1558*7ed15acdSXueming Li SIOCETHTOOL, &ifr); 1559*7ed15acdSXueming Li else 156098c4b12aSOphir Munk ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); 156198c4b12aSOphir Munk if (ret) { 156298c4b12aSOphir Munk DRV_LOG(WARNING, "port %u unable to get statistic names", 156398c4b12aSOphir Munk dev->data->port_id); 156498c4b12aSOphir Munk goto free; 156598c4b12aSOphir Munk } 156698c4b12aSOphir Munk for (i = 0; i != dev_stats_n; ++i) { 156798c4b12aSOphir Munk const char *curr_string = (const char *) 156898c4b12aSOphir Munk &strings->data[i * ETH_GSTRING_LEN]; 156998c4b12aSOphir Munk 157098c4b12aSOphir Munk for (j = 0; j != xstats_n; ++j) { 157198c4b12aSOphir Munk if (!strcmp(mlx5_counters_init[j].ctr_name, 157298c4b12aSOphir Munk curr_string)) { 157398c4b12aSOphir Munk unsigned int idx = xstats_ctrl->mlx5_stats_n++; 157498c4b12aSOphir Munk 157598c4b12aSOphir Munk xstats_ctrl->dev_table_idx[idx] = i; 157698c4b12aSOphir Munk xstats_ctrl->info[idx] = mlx5_counters_init[j]; 157798c4b12aSOphir Munk break; 157898c4b12aSOphir Munk } 157998c4b12aSOphir Munk } 158098c4b12aSOphir Munk } 158198c4b12aSOphir Munk /* Add dev counters. */ 158298c4b12aSOphir Munk for (i = 0; i != xstats_n; ++i) { 158398c4b12aSOphir Munk if (mlx5_counters_init[i].dev) { 158498c4b12aSOphir Munk unsigned int idx = xstats_ctrl->mlx5_stats_n++; 158598c4b12aSOphir Munk 158698c4b12aSOphir Munk xstats_ctrl->info[idx] = mlx5_counters_init[i]; 158798c4b12aSOphir Munk xstats_ctrl->hw_stats[idx] = 0; 158898c4b12aSOphir Munk } 158998c4b12aSOphir Munk } 159098c4b12aSOphir Munk MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); 159198c4b12aSOphir Munk xstats_ctrl->stats_n = dev_stats_n; 159298c4b12aSOphir Munk /* Copy to base at first time. */ 159398c4b12aSOphir Munk ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base); 159498c4b12aSOphir Munk if (ret) 159598c4b12aSOphir Munk DRV_LOG(ERR, "port %u cannot read device counters: %s", 159698c4b12aSOphir Munk dev->data->port_id, strerror(rte_errno)); 159798c4b12aSOphir Munk mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); 159898c4b12aSOphir Munk stats_ctrl->imissed = 0; 159998c4b12aSOphir Munk free: 160098c4b12aSOphir Munk mlx5_free(strings); 160198c4b12aSOphir Munk } 160298c4b12aSOphir Munk 160398c4b12aSOphir Munk /** 160498c4b12aSOphir Munk * Get MAC address by querying netdevice. 160598c4b12aSOphir Munk * 160698c4b12aSOphir Munk * @param[in] dev 160798c4b12aSOphir Munk * Pointer to Ethernet device. 160898c4b12aSOphir Munk * @param[out] mac 160998c4b12aSOphir Munk * MAC address output buffer. 161098c4b12aSOphir Munk * 161198c4b12aSOphir Munk * @return 161298c4b12aSOphir Munk * 0 on success, a negative errno value otherwise and rte_errno is set. 161398c4b12aSOphir Munk */ 161498c4b12aSOphir Munk int 161598c4b12aSOphir Munk mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN]) 161698c4b12aSOphir Munk { 161798c4b12aSOphir Munk struct ifreq request; 161898c4b12aSOphir Munk int ret; 161998c4b12aSOphir Munk 162098c4b12aSOphir Munk ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request); 162198c4b12aSOphir Munk if (ret) 162298c4b12aSOphir Munk return ret; 162398c4b12aSOphir Munk memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN); 162498c4b12aSOphir Munk return 0; 162598c4b12aSOphir Munk } 162698c4b12aSOphir Munk 1627