xref: /dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c (revision d0f858a6c6f0138d3d5fb099612a8a699d78f5b7)
11256805dSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
21256805dSOphir Munk  * Copyright 2015 6WIND S.A.
31256805dSOphir Munk  * Copyright 2015 Mellanox Technologies, Ltd
41256805dSOphir Munk  */
51256805dSOphir Munk 
61256805dSOphir Munk #include <stddef.h>
71256805dSOphir Munk #include <inttypes.h>
81256805dSOphir Munk #include <unistd.h>
91256805dSOphir Munk #include <stdbool.h>
101256805dSOphir Munk #include <stdint.h>
111256805dSOphir Munk #include <stdio.h>
121256805dSOphir Munk #include <string.h>
131256805dSOphir Munk #include <stdlib.h>
141256805dSOphir Munk #include <errno.h>
151256805dSOphir Munk #include <dirent.h>
161256805dSOphir Munk #include <net/if.h>
171256805dSOphir Munk #include <sys/ioctl.h>
181256805dSOphir Munk #include <sys/socket.h>
191256805dSOphir Munk #include <netinet/in.h>
201256805dSOphir Munk #include <linux/ethtool.h>
211256805dSOphir Munk #include <linux/sockios.h>
221256805dSOphir Munk #include <fcntl.h>
231256805dSOphir Munk #include <stdalign.h>
241256805dSOphir Munk #include <sys/un.h>
251256805dSOphir Munk #include <time.h>
261256805dSOphir Munk 
2716deeedfSThomas Monjalon #include <ethdev_linux_ethtool.h>
28df96fd0dSBruce Richardson #include <ethdev_driver.h>
291f37cb2bSDavid Marchand #include <bus_pci_driver.h>
301256805dSOphir Munk #include <rte_mbuf.h>
311256805dSOphir Munk #include <rte_common.h>
329b31fc90SViacheslav Ovsiienko #include <rte_eal_paging.h>
331256805dSOphir Munk #include <rte_interrupts.h>
341256805dSOphir Munk #include <rte_malloc.h>
351256805dSOphir Munk #include <rte_string_fns.h>
361256805dSOphir Munk #include <rte_rwlock.h>
371256805dSOphir Munk #include <rte_cycles.h>
381256805dSOphir Munk 
391256805dSOphir Munk #include <mlx5_glue.h>
401256805dSOphir Munk #include <mlx5_devx_cmds.h>
411256805dSOphir Munk #include <mlx5_common.h>
422175c4dcSSuanming Mou #include <mlx5_malloc.h>
4317f95513SDmitry Kozlyuk #include <mlx5_nl.h>
441256805dSOphir Munk 
451256805dSOphir Munk #include "mlx5.h"
461256805dSOphir Munk #include "mlx5_rxtx.h"
471256805dSOphir Munk #include "mlx5_utils.h"
481256805dSOphir Munk 
49919488fbSXueming Li /* Get interface index from SubFunction device name. */
50919488fbSXueming Li int
51919488fbSXueming Li mlx5_auxiliary_get_ifindex(const char *sf_name)
52919488fbSXueming Li {
53919488fbSXueming Li 	char if_name[IF_NAMESIZE] = { 0 };
54919488fbSXueming Li 
55919488fbSXueming Li 	if (mlx5_auxiliary_get_child_name(sf_name, "/net",
56919488fbSXueming Li 					  if_name, sizeof(if_name)) != 0)
57919488fbSXueming Li 		return -rte_errno;
58919488fbSXueming Li 	return if_nametoindex(if_name);
59919488fbSXueming Li }
601256805dSOphir Munk 
611256805dSOphir Munk /**
621256805dSOphir Munk  * Get interface name from private structure.
631256805dSOphir Munk  *
64aec086c9SMatan Azrad  * This is a port representor-aware version of mlx5_get_ifname_sysfs().
651256805dSOphir Munk  *
661256805dSOphir Munk  * @param[in] dev
671256805dSOphir Munk  *   Pointer to Ethernet device.
681256805dSOphir Munk  * @param[out] ifname
691256805dSOphir Munk  *   Interface name output buffer.
701256805dSOphir Munk  *
711256805dSOphir Munk  * @return
721256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
731256805dSOphir Munk  */
741256805dSOphir Munk int
7528743807STal Shnaiderman mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[MLX5_NAMESIZE])
761256805dSOphir Munk {
771256805dSOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
781256805dSOphir Munk 	unsigned int ifindex;
791256805dSOphir Munk 
801256805dSOphir Munk 	MLX5_ASSERT(priv);
811256805dSOphir Munk 	MLX5_ASSERT(priv->sh);
82f5f4c482SXueming Li 	if (priv->master && priv->sh->bond.ifindex > 0) {
83f5f4c482SXueming Li 		memcpy(ifname, priv->sh->bond.ifname, MLX5_NAMESIZE);
84c21e5facSXueming Li 		return 0;
85c21e5facSXueming Li 	}
861256805dSOphir Munk 	ifindex = mlx5_ifindex(dev);
871256805dSOphir Munk 	if (!ifindex) {
881256805dSOphir Munk 		if (!priv->representor)
89aec086c9SMatan Azrad 			return mlx5_get_ifname_sysfs(priv->sh->ibdev_path,
90aec086c9SMatan Azrad 						     *ifname);
911256805dSOphir Munk 		rte_errno = ENXIO;
921256805dSOphir Munk 		return -rte_errno;
931256805dSOphir Munk 	}
941256805dSOphir Munk 	if (if_indextoname(ifindex, &(*ifname)[0]))
951256805dSOphir Munk 		return 0;
961256805dSOphir Munk 	rte_errno = errno;
971256805dSOphir Munk 	return -rte_errno;
981256805dSOphir Munk }
991256805dSOphir Munk 
1001256805dSOphir Munk /**
1017ed15acdSXueming Li  * Perform ifreq ioctl() on associated netdev ifname.
1027ed15acdSXueming Li  *
1037ed15acdSXueming Li  * @param[in] ifname
1047ed15acdSXueming Li  *   Pointer to netdev name.
1057ed15acdSXueming Li  * @param req
1067ed15acdSXueming Li  *   Request number to pass to ioctl().
1077ed15acdSXueming Li  * @param[out] ifr
1087ed15acdSXueming Li  *   Interface request structure output buffer.
1097ed15acdSXueming Li  *
1107ed15acdSXueming Li  * @return
1117ed15acdSXueming Li  *   0 on success, a negative errno value otherwise and rte_errno is set.
1127ed15acdSXueming Li  */
1137ed15acdSXueming Li static int
1147ed15acdSXueming Li mlx5_ifreq_by_ifname(const char *ifname, int req, struct ifreq *ifr)
1157ed15acdSXueming Li {
1167ed15acdSXueming Li 	int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
1177ed15acdSXueming Li 	int ret = 0;
1187ed15acdSXueming Li 
1197ed15acdSXueming Li 	if (sock == -1) {
1207ed15acdSXueming Li 		rte_errno = errno;
1217ed15acdSXueming Li 		return -rte_errno;
1227ed15acdSXueming Li 	}
1237ed15acdSXueming Li 	rte_strscpy(ifr->ifr_name, ifname, sizeof(ifr->ifr_name));
1247ed15acdSXueming Li 	ret = ioctl(sock, req, ifr);
1257ed15acdSXueming Li 	if (ret == -1) {
1267ed15acdSXueming Li 		rte_errno = errno;
1277ed15acdSXueming Li 		goto error;
1287ed15acdSXueming Li 	}
1297ed15acdSXueming Li 	close(sock);
1307ed15acdSXueming Li 	return 0;
1317ed15acdSXueming Li error:
1327ed15acdSXueming Li 	close(sock);
1337ed15acdSXueming Li 	return -rte_errno;
1347ed15acdSXueming Li }
1357ed15acdSXueming Li 
1367ed15acdSXueming Li /**
1371256805dSOphir Munk  * Perform ifreq ioctl() on associated Ethernet device.
1381256805dSOphir Munk  *
1391256805dSOphir Munk  * @param[in] dev
1401256805dSOphir Munk  *   Pointer to Ethernet device.
1411256805dSOphir Munk  * @param req
1421256805dSOphir Munk  *   Request number to pass to ioctl().
1431256805dSOphir Munk  * @param[out] ifr
1441256805dSOphir Munk  *   Interface request structure output buffer.
1451256805dSOphir Munk  *
1461256805dSOphir Munk  * @return
1471256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
1481256805dSOphir Munk  */
14998c4b12aSOphir Munk static int
1501256805dSOphir Munk mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
1511256805dSOphir Munk {
1527ed15acdSXueming Li 	char ifname[sizeof(ifr->ifr_name)];
1537ed15acdSXueming Li 	int ret;
1541256805dSOphir Munk 
1557ed15acdSXueming Li 	ret = mlx5_get_ifname(dev, &ifname);
1561256805dSOphir Munk 	if (ret)
1571256805dSOphir Munk 		return -rte_errno;
1587ed15acdSXueming Li 	return mlx5_ifreq_by_ifname(ifname, req, ifr);
1591256805dSOphir Munk }
1601256805dSOphir Munk 
1611256805dSOphir Munk /**
1621256805dSOphir Munk  * Get device MTU.
1631256805dSOphir Munk  *
1641256805dSOphir Munk  * @param dev
1651256805dSOphir Munk  *   Pointer to Ethernet device.
1661256805dSOphir Munk  * @param[out] mtu
1671256805dSOphir Munk  *   MTU value output buffer.
1681256805dSOphir Munk  *
1691256805dSOphir Munk  * @return
1701256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
1711256805dSOphir Munk  */
1721256805dSOphir Munk int
1731256805dSOphir Munk mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
1741256805dSOphir Munk {
1751256805dSOphir Munk 	struct ifreq request;
1761256805dSOphir Munk 	int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
1771256805dSOphir Munk 
1781256805dSOphir Munk 	if (ret)
1791256805dSOphir Munk 		return ret;
1801256805dSOphir Munk 	*mtu = request.ifr_mtu;
1811256805dSOphir Munk 	return 0;
1821256805dSOphir Munk }
1831256805dSOphir Munk 
1841256805dSOphir Munk /**
1851256805dSOphir Munk  * Set device MTU.
1861256805dSOphir Munk  *
1871256805dSOphir Munk  * @param dev
1881256805dSOphir Munk  *   Pointer to Ethernet device.
1891256805dSOphir Munk  * @param mtu
1901256805dSOphir Munk  *   MTU value to set.
1911256805dSOphir Munk  *
1921256805dSOphir Munk  * @return
1931256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
1941256805dSOphir Munk  */
1951256805dSOphir Munk int
1961256805dSOphir Munk mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1971256805dSOphir Munk {
1981256805dSOphir Munk 	struct ifreq request = { .ifr_mtu = mtu, };
1991256805dSOphir Munk 
2001256805dSOphir Munk 	return mlx5_ifreq(dev, SIOCSIFMTU, &request);
2011256805dSOphir Munk }
2021256805dSOphir Munk 
2031256805dSOphir Munk /**
2041256805dSOphir Munk  * Set device flags.
2051256805dSOphir Munk  *
2061256805dSOphir Munk  * @param dev
2071256805dSOphir Munk  *   Pointer to Ethernet device.
2081256805dSOphir Munk  * @param keep
2091256805dSOphir Munk  *   Bitmask for flags that must remain untouched.
2101256805dSOphir Munk  * @param flags
2111256805dSOphir Munk  *   Bitmask for flags to modify.
2121256805dSOphir Munk  *
2131256805dSOphir Munk  * @return
2141256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
2151256805dSOphir Munk  */
21698c4b12aSOphir Munk static int
2171256805dSOphir Munk mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
2181256805dSOphir Munk {
2191256805dSOphir Munk 	struct ifreq request;
2201256805dSOphir Munk 	int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
2211256805dSOphir Munk 
2221256805dSOphir Munk 	if (ret)
2231256805dSOphir Munk 		return ret;
2241256805dSOphir Munk 	request.ifr_flags &= keep;
2251256805dSOphir Munk 	request.ifr_flags |= flags & ~keep;
2261256805dSOphir Munk 	return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
2271256805dSOphir Munk }
2281256805dSOphir Munk 
2291256805dSOphir Munk /**
2301256805dSOphir Munk  * Get device current raw clock counter
2311256805dSOphir Munk  *
2321256805dSOphir Munk  * @param dev
2331256805dSOphir Munk  *   Pointer to Ethernet device structure.
2341256805dSOphir Munk  * @param[out] time
2351256805dSOphir Munk  *   Current raw clock counter of the device.
2361256805dSOphir Munk  *
2371256805dSOphir Munk  * @return
2381256805dSOphir Munk  *   0 if the clock has correctly been read
2391256805dSOphir Munk  *   The value of errno in case of error
2401256805dSOphir Munk  */
2411256805dSOphir Munk int
2421256805dSOphir Munk mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
2431256805dSOphir Munk {
2441256805dSOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
245ca1418ceSMichael Baum 	struct ibv_context *ctx = priv->sh->cdev->ctx;
2461256805dSOphir Munk 	struct ibv_values_ex values;
2471256805dSOphir Munk 	int err = 0;
2481256805dSOphir Munk 
2491256805dSOphir Munk 	values.comp_mask = IBV_VALUES_MASK_RAW_CLOCK;
2501256805dSOphir Munk 	err = mlx5_glue->query_rt_values_ex(ctx, &values);
2511256805dSOphir Munk 	if (err != 0) {
2521256805dSOphir Munk 		DRV_LOG(WARNING, "Could not query the clock !");
2531256805dSOphir Munk 		return err;
2541256805dSOphir Munk 	}
2551256805dSOphir Munk 	*clock = values.raw_clock.tv_nsec;
2561256805dSOphir Munk 	return 0;
2571256805dSOphir Munk }
2581256805dSOphir Munk 
2591256805dSOphir Munk /**
2601256805dSOphir Munk  * Retrieve the master device for representor in the same switch domain.
2611256805dSOphir Munk  *
2621256805dSOphir Munk  * @param dev
2631256805dSOphir Munk  *   Pointer to representor Ethernet device structure.
2641256805dSOphir Munk  *
2651256805dSOphir Munk  * @return
2661256805dSOphir Munk  *   Master device structure  on success, NULL otherwise.
2671256805dSOphir Munk  */
2681256805dSOphir Munk static struct rte_eth_dev *
2691256805dSOphir Munk mlx5_find_master_dev(struct rte_eth_dev *dev)
2701256805dSOphir Munk {
2711256805dSOphir Munk 	struct mlx5_priv *priv;
2721256805dSOphir Munk 	uint16_t port_id;
2731256805dSOphir Munk 	uint16_t domain_id;
2741256805dSOphir Munk 
2751256805dSOphir Munk 	priv = dev->data->dev_private;
2761256805dSOphir Munk 	domain_id = priv->domain_id;
2771256805dSOphir Munk 	MLX5_ASSERT(priv->representor);
27856bb3c84SXueming Li 	MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
2791256805dSOphir Munk 		struct mlx5_priv *opriv =
2801256805dSOphir Munk 			rte_eth_devices[port_id].data->dev_private;
2811256805dSOphir Munk 		if (opriv &&
2821256805dSOphir Munk 		    opriv->master &&
2831256805dSOphir Munk 		    opriv->domain_id == domain_id &&
2841256805dSOphir Munk 		    opriv->sh == priv->sh)
2851256805dSOphir Munk 			return &rte_eth_devices[port_id];
2861256805dSOphir Munk 	}
2871256805dSOphir Munk 	return NULL;
2881256805dSOphir Munk }
2891256805dSOphir Munk 
2901256805dSOphir Munk /**
2911256805dSOphir Munk  * DPDK callback to retrieve physical link information.
2921256805dSOphir Munk  *
2931256805dSOphir Munk  * @param dev
2941256805dSOphir Munk  *   Pointer to Ethernet device structure.
2951256805dSOphir Munk  * @param[out] link
2961256805dSOphir Munk  *   Storage for current link status.
2971256805dSOphir Munk  *
2981256805dSOphir Munk  * @return
2991256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
3001256805dSOphir Munk  */
3011256805dSOphir Munk static int
3021256805dSOphir Munk mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
3031256805dSOphir Munk 			       struct rte_eth_link *link)
3041256805dSOphir Munk {
3051256805dSOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
3061256805dSOphir Munk 	struct ethtool_cmd edata = {
3071256805dSOphir Munk 		.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
3081256805dSOphir Munk 	};
3091256805dSOphir Munk 	struct ifreq ifr;
3101256805dSOphir Munk 	struct rte_eth_link dev_link;
3111256805dSOphir Munk 	int link_speed = 0;
3121256805dSOphir Munk 	int ret;
3131256805dSOphir Munk 
3141256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
3151256805dSOphir Munk 	if (ret) {
3161256805dSOphir Munk 		DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
3171256805dSOphir Munk 			dev->data->port_id, strerror(rte_errno));
3181256805dSOphir Munk 		return ret;
3191256805dSOphir Munk 	}
3201256805dSOphir Munk 	dev_link = (struct rte_eth_link) {
3211256805dSOphir Munk 		.link_status = ((ifr.ifr_flags & IFF_UP) &&
3221256805dSOphir Munk 				(ifr.ifr_flags & IFF_RUNNING)),
3231256805dSOphir Munk 	};
3241256805dSOphir Munk 	ifr = (struct ifreq) {
3251256805dSOphir Munk 		.ifr_data = (void *)&edata,
3261256805dSOphir Munk 	};
3271256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
3281256805dSOphir Munk 	if (ret) {
3291256805dSOphir Munk 		if (ret == -ENOTSUP && priv->representor) {
3301256805dSOphir Munk 			struct rte_eth_dev *master;
3311256805dSOphir Munk 
3321256805dSOphir Munk 			/*
3331256805dSOphir Munk 			 * For representors we can try to inherit link
3341256805dSOphir Munk 			 * settings from the master device. Actually
3351256805dSOphir Munk 			 * link settings do not make a lot of sense
3361256805dSOphir Munk 			 * for representors due to missing physical
3371256805dSOphir Munk 			 * link. The old kernel drivers supported
3381256805dSOphir Munk 			 * emulated settings query for representors,
3391256805dSOphir Munk 			 * the new ones do not, so we have to add
3401256805dSOphir Munk 			 * this code for compatibility issues.
3411256805dSOphir Munk 			 */
3421256805dSOphir Munk 			master = mlx5_find_master_dev(dev);
3431256805dSOphir Munk 			if (master) {
3441256805dSOphir Munk 				ifr = (struct ifreq) {
3451256805dSOphir Munk 					.ifr_data = (void *)&edata,
3461256805dSOphir Munk 				};
3471256805dSOphir Munk 				ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr);
3481256805dSOphir Munk 			}
3491256805dSOphir Munk 		}
3501256805dSOphir Munk 		if (ret) {
3511256805dSOphir Munk 			DRV_LOG(WARNING,
3521256805dSOphir Munk 				"port %u ioctl(SIOCETHTOOL,"
3531256805dSOphir Munk 				" ETHTOOL_GSET) failed: %s",
3541256805dSOphir Munk 				dev->data->port_id, strerror(rte_errno));
3551256805dSOphir Munk 			return ret;
3561256805dSOphir Munk 		}
3571256805dSOphir Munk 	}
3581256805dSOphir Munk 	link_speed = ethtool_cmd_speed(&edata);
3591256805dSOphir Munk 	if (link_speed == -1)
360295968d1SFerruh Yigit 		dev_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
3611256805dSOphir Munk 	else
3621256805dSOphir Munk 		dev_link.link_speed = link_speed;
3631256805dSOphir Munk 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
364295968d1SFerruh Yigit 				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
3651256805dSOphir Munk 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
366295968d1SFerruh Yigit 			RTE_ETH_LINK_SPEED_FIXED);
3671256805dSOphir Munk 	*link = dev_link;
36816deeedfSThomas Monjalon 	priv->link_speed_capa = rte_eth_link_speed_gset(edata.supported);
3691256805dSOphir Munk 	return 0;
3701256805dSOphir Munk }
3711256805dSOphir Munk 
3721256805dSOphir Munk /**
3731256805dSOphir Munk  * Retrieve physical link information (unlocked version using new ioctl).
3741256805dSOphir Munk  *
3751256805dSOphir Munk  * @param dev
3761256805dSOphir Munk  *   Pointer to Ethernet device structure.
3771256805dSOphir Munk  * @param[out] link
3781256805dSOphir Munk  *   Storage for current link status.
3791256805dSOphir Munk  *
3801256805dSOphir Munk  * @return
3811256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
3821256805dSOphir Munk  */
3831256805dSOphir Munk static int
3841256805dSOphir Munk mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
3851256805dSOphir Munk 			     struct rte_eth_link *link)
3861256805dSOphir Munk 
3871256805dSOphir Munk {
3881256805dSOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
3891256805dSOphir Munk 	struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
3901256805dSOphir Munk 	struct ifreq ifr;
3911256805dSOphir Munk 	struct rte_eth_link dev_link;
3921256805dSOphir Munk 	struct rte_eth_dev *master = NULL;
3931256805dSOphir Munk 	int ret;
3941256805dSOphir Munk 
3951256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
3961256805dSOphir Munk 	if (ret) {
3971256805dSOphir Munk 		DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
3981256805dSOphir Munk 			dev->data->port_id, strerror(rte_errno));
3991256805dSOphir Munk 		return ret;
4001256805dSOphir Munk 	}
4011256805dSOphir Munk 	dev_link = (struct rte_eth_link) {
4021256805dSOphir Munk 		.link_status = ((ifr.ifr_flags & IFF_UP) &&
4031256805dSOphir Munk 				(ifr.ifr_flags & IFF_RUNNING)),
4041256805dSOphir Munk 	};
4051256805dSOphir Munk 	ifr = (struct ifreq) {
4061256805dSOphir Munk 		.ifr_data = (void *)&gcmd,
4071256805dSOphir Munk 	};
4081256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
4091256805dSOphir Munk 	if (ret) {
4101256805dSOphir Munk 		if (ret == -ENOTSUP && priv->representor) {
4111256805dSOphir Munk 			/*
4121256805dSOphir Munk 			 * For representors we can try to inherit link
4131256805dSOphir Munk 			 * settings from the master device. Actually
4141256805dSOphir Munk 			 * link settings do not make a lot of sense
4151256805dSOphir Munk 			 * for representors due to missing physical
4161256805dSOphir Munk 			 * link. The old kernel drivers supported
4171256805dSOphir Munk 			 * emulated settings query for representors,
4181256805dSOphir Munk 			 * the new ones do not, so we have to add
4191256805dSOphir Munk 			 * this code for compatibility issues.
4201256805dSOphir Munk 			 */
4211256805dSOphir Munk 			master = mlx5_find_master_dev(dev);
4221256805dSOphir Munk 			if (master) {
4231256805dSOphir Munk 				ifr = (struct ifreq) {
4241256805dSOphir Munk 					.ifr_data = (void *)&gcmd,
4251256805dSOphir Munk 				};
4261256805dSOphir Munk 				ret = mlx5_ifreq(master, SIOCETHTOOL, &ifr);
4271256805dSOphir Munk 			}
4281256805dSOphir Munk 		}
4291256805dSOphir Munk 		if (ret) {
4301256805dSOphir Munk 			DRV_LOG(DEBUG,
4311256805dSOphir Munk 				"port %u ioctl(SIOCETHTOOL,"
4321256805dSOphir Munk 				" ETHTOOL_GLINKSETTINGS) failed: %s",
4331256805dSOphir Munk 				dev->data->port_id, strerror(rte_errno));
4341256805dSOphir Munk 			return ret;
4351256805dSOphir Munk 		}
4361256805dSOphir Munk 	}
4371256805dSOphir Munk 	gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
4381256805dSOphir Munk 
4391256805dSOphir Munk 	alignas(struct ethtool_link_settings)
4401256805dSOphir Munk 	uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
4411256805dSOphir Munk 		     sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
4421256805dSOphir Munk 	struct ethtool_link_settings *ecmd = (void *)data;
4431256805dSOphir Munk 
4441256805dSOphir Munk 	*ecmd = gcmd;
4451256805dSOphir Munk 	ifr.ifr_data = (void *)ecmd;
4461256805dSOphir Munk 	ret = mlx5_ifreq(master ? master : dev, SIOCETHTOOL, &ifr);
4471256805dSOphir Munk 	if (ret) {
4481256805dSOphir Munk 		DRV_LOG(DEBUG,
4491256805dSOphir Munk 			"port %u ioctl(SIOCETHTOOL,"
4501256805dSOphir Munk 			"ETHTOOL_GLINKSETTINGS) failed: %s",
4511256805dSOphir Munk 			dev->data->port_id, strerror(rte_errno));
4521256805dSOphir Munk 		return ret;
4531256805dSOphir Munk 	}
45416deeedfSThomas Monjalon 
4551688c580SBenoît Ganne 	dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
456295968d1SFerruh Yigit 				RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
4571256805dSOphir Munk 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
458295968d1SFerruh Yigit 				RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
4591256805dSOphir Munk 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
460295968d1SFerruh Yigit 				  RTE_ETH_LINK_SPEED_FIXED);
4611256805dSOphir Munk 	*link = dev_link;
46216deeedfSThomas Monjalon 
46316deeedfSThomas Monjalon 	priv->link_speed_capa = rte_eth_link_speed_glink(ecmd->link_mode_masks,
46416deeedfSThomas Monjalon 			ecmd->link_mode_masks_nwords);
46516deeedfSThomas Monjalon 
4661256805dSOphir Munk 	return 0;
4671256805dSOphir Munk }
4681256805dSOphir Munk 
4691256805dSOphir Munk /**
4701256805dSOphir Munk  * DPDK callback to retrieve physical link information.
4711256805dSOphir Munk  *
4721256805dSOphir Munk  * @param dev
4731256805dSOphir Munk  *   Pointer to Ethernet device structure.
4741256805dSOphir Munk  * @param wait_to_complete
4751256805dSOphir Munk  *   Wait for request completion.
4761256805dSOphir Munk  *
4771256805dSOphir Munk  * @return
4781256805dSOphir Munk  *   0 if link status was not updated, positive if it was, a negative errno
4791256805dSOphir Munk  *   value otherwise and rte_errno is set.
4801256805dSOphir Munk  */
4811256805dSOphir Munk int
4821256805dSOphir Munk mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
4831256805dSOphir Munk {
4841256805dSOphir Munk 	int ret;
4851256805dSOphir Munk 	struct rte_eth_link dev_link;
4861256805dSOphir Munk 	time_t start_time = time(NULL);
4871256805dSOphir Munk 	int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT;
4881256805dSOphir Munk 
4891256805dSOphir Munk 	do {
4901256805dSOphir Munk 		ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
4911256805dSOphir Munk 		if (ret == -ENOTSUP)
4921256805dSOphir Munk 			ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
4931256805dSOphir Munk 		if (ret == 0)
4941256805dSOphir Munk 			break;
4951256805dSOphir Munk 		/* Handle wait to complete situation. */
4961256805dSOphir Munk 		if ((wait_to_complete || retry) && ret == -EAGAIN) {
4971256805dSOphir Munk 			if (abs((int)difftime(time(NULL), start_time)) <
4981256805dSOphir Munk 			    MLX5_LINK_STATUS_TIMEOUT) {
4991256805dSOphir Munk 				usleep(0);
5001256805dSOphir Munk 				continue;
5011256805dSOphir Munk 			} else {
5021256805dSOphir Munk 				rte_errno = EBUSY;
5031256805dSOphir Munk 				return -rte_errno;
5041256805dSOphir Munk 			}
5051256805dSOphir Munk 		} else if (ret < 0) {
5061256805dSOphir Munk 			return ret;
5071256805dSOphir Munk 		}
5081256805dSOphir Munk 	} while (wait_to_complete || retry-- > 0);
5091256805dSOphir Munk 	ret = !!memcmp(&dev->data->dev_link, &dev_link,
5101256805dSOphir Munk 		       sizeof(struct rte_eth_link));
5111256805dSOphir Munk 	dev->data->dev_link = dev_link;
5121256805dSOphir Munk 	return ret;
5131256805dSOphir Munk }
5141256805dSOphir Munk 
5151256805dSOphir Munk /**
5161256805dSOphir Munk  * DPDK callback to get flow control status.
5171256805dSOphir Munk  *
5181256805dSOphir Munk  * @param dev
5191256805dSOphir Munk  *   Pointer to Ethernet device structure.
5201256805dSOphir Munk  * @param[out] fc_conf
5211256805dSOphir Munk  *   Flow control output buffer.
5221256805dSOphir Munk  *
5231256805dSOphir Munk  * @return
5241256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
5251256805dSOphir Munk  */
5261256805dSOphir Munk int
5271256805dSOphir Munk mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5281256805dSOphir Munk {
5291256805dSOphir Munk 	struct ifreq ifr;
5301256805dSOphir Munk 	struct ethtool_pauseparam ethpause = {
5311256805dSOphir Munk 		.cmd = ETHTOOL_GPAUSEPARAM
5321256805dSOphir Munk 	};
5331256805dSOphir Munk 	int ret;
5341256805dSOphir Munk 
5351256805dSOphir Munk 	ifr.ifr_data = (void *)&ethpause;
5361256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
5371256805dSOphir Munk 	if (ret) {
53884ba1440SEli Britstein 		DRV_LOG(DEBUG,
5391256805dSOphir Munk 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
5401256805dSOphir Munk 			" %s",
5411256805dSOphir Munk 			dev->data->port_id, strerror(rte_errno));
5421256805dSOphir Munk 		return ret;
5431256805dSOphir Munk 	}
5441256805dSOphir Munk 	fc_conf->autoneg = ethpause.autoneg;
5451256805dSOphir Munk 	if (ethpause.rx_pause && ethpause.tx_pause)
546295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_FULL;
5471256805dSOphir Munk 	else if (ethpause.rx_pause)
548295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
5491256805dSOphir Munk 	else if (ethpause.tx_pause)
550295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
5511256805dSOphir Munk 	else
552295968d1SFerruh Yigit 		fc_conf->mode = RTE_ETH_FC_NONE;
5531256805dSOphir Munk 	return 0;
5541256805dSOphir Munk }
5551256805dSOphir Munk 
5561256805dSOphir Munk /**
5571256805dSOphir Munk  * DPDK callback to modify flow control parameters.
5581256805dSOphir Munk  *
5591256805dSOphir Munk  * @param dev
5601256805dSOphir Munk  *   Pointer to Ethernet device structure.
5611256805dSOphir Munk  * @param[in] fc_conf
5621256805dSOphir Munk  *   Flow control parameters.
5631256805dSOphir Munk  *
5641256805dSOphir Munk  * @return
5651256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
5661256805dSOphir Munk  */
5671256805dSOphir Munk int
5681256805dSOphir Munk mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
5691256805dSOphir Munk {
5701256805dSOphir Munk 	struct ifreq ifr;
5711256805dSOphir Munk 	struct ethtool_pauseparam ethpause = {
5721256805dSOphir Munk 		.cmd = ETHTOOL_SPAUSEPARAM
5731256805dSOphir Munk 	};
5741256805dSOphir Munk 	int ret;
5751256805dSOphir Munk 
5761256805dSOphir Munk 	ifr.ifr_data = (void *)&ethpause;
5771256805dSOphir Munk 	ethpause.autoneg = fc_conf->autoneg;
578295968d1SFerruh Yigit 	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
579295968d1SFerruh Yigit 	    (fc_conf->mode & RTE_ETH_FC_RX_PAUSE))
5801256805dSOphir Munk 		ethpause.rx_pause = 1;
5811256805dSOphir Munk 	else
5821256805dSOphir Munk 		ethpause.rx_pause = 0;
5831256805dSOphir Munk 
584295968d1SFerruh Yigit 	if (((fc_conf->mode & RTE_ETH_FC_FULL) == RTE_ETH_FC_FULL) ||
585295968d1SFerruh Yigit 	    (fc_conf->mode & RTE_ETH_FC_TX_PAUSE))
5861256805dSOphir Munk 		ethpause.tx_pause = 1;
5871256805dSOphir Munk 	else
5881256805dSOphir Munk 		ethpause.tx_pause = 0;
5891256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
5901256805dSOphir Munk 	if (ret) {
5911256805dSOphir Munk 		DRV_LOG(WARNING,
5921256805dSOphir Munk 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
5931256805dSOphir Munk 			" failed: %s",
5941256805dSOphir Munk 			dev->data->port_id, strerror(rte_errno));
5951256805dSOphir Munk 		return ret;
5961256805dSOphir Munk 	}
5971256805dSOphir Munk 	return 0;
5981256805dSOphir Munk }
5991256805dSOphir Munk 
6001256805dSOphir Munk /**
6011256805dSOphir Munk  * Handle asynchronous removal event for entire multiport device.
6021256805dSOphir Munk  *
6031256805dSOphir Munk  * @param sh
6041256805dSOphir Munk  *   Infiniband device shared context.
6051256805dSOphir Munk  */
6061256805dSOphir Munk static void
6071256805dSOphir Munk mlx5_dev_interrupt_device_fatal(struct mlx5_dev_ctx_shared *sh)
6081256805dSOphir Munk {
6091256805dSOphir Munk 	uint32_t i;
6101256805dSOphir Munk 
6111256805dSOphir Munk 	for (i = 0; i < sh->max_port; ++i) {
6121256805dSOphir Munk 		struct rte_eth_dev *dev;
61322dc56cfSViacheslav Ovsiienko 		struct mlx5_priv *priv;
6141256805dSOphir Munk 
6151256805dSOphir Munk 		if (sh->port[i].ih_port_id >= RTE_MAX_ETHPORTS) {
6161256805dSOphir Munk 			/*
6171256805dSOphir Munk 			 * Or not existing port either no
6181256805dSOphir Munk 			 * handler installed for this port.
6191256805dSOphir Munk 			 */
6201256805dSOphir Munk 			continue;
6211256805dSOphir Munk 		}
6221256805dSOphir Munk 		dev = &rte_eth_devices[sh->port[i].ih_port_id];
6231256805dSOphir Munk 		MLX5_ASSERT(dev);
62422dc56cfSViacheslav Ovsiienko 		priv = dev->data->dev_private;
62522dc56cfSViacheslav Ovsiienko 		MLX5_ASSERT(priv);
62622dc56cfSViacheslav Ovsiienko 		if (!priv->rmv_notified && dev->data->dev_conf.intr_conf.rmv) {
62722dc56cfSViacheslav Ovsiienko 			/* Notify driver about removal only once. */
62822dc56cfSViacheslav Ovsiienko 			priv->rmv_notified = 1;
6295723fbedSFerruh Yigit 			rte_eth_dev_callback_process
6301256805dSOphir Munk 				(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
6311256805dSOphir Munk 		}
6321256805dSOphir Munk 	}
63322dc56cfSViacheslav Ovsiienko }
6341256805dSOphir Munk 
63553d1e65cSHaifei Luo static bool
63653d1e65cSHaifei Luo mlx5_dev_nl_ifindex_verify(uint32_t if_index, struct mlx5_priv *priv)
63753d1e65cSHaifei Luo {
63853d1e65cSHaifei Luo 	struct mlx5_bond_info *bond = &priv->sh->bond;
63953d1e65cSHaifei Luo 	int i;
64053d1e65cSHaifei Luo 
64153d1e65cSHaifei Luo 	if (bond->n_port == 0)
64253d1e65cSHaifei Luo 		return (if_index == priv->if_index);
64353d1e65cSHaifei Luo 
64453d1e65cSHaifei Luo 	if (if_index == bond->ifindex)
64553d1e65cSHaifei Luo 		return true;
64653d1e65cSHaifei Luo 	for (i = 0; i < bond->n_port; i++) {
64753d1e65cSHaifei Luo 		if (i >= MLX5_BOND_MAX_PORTS)
64853d1e65cSHaifei Luo 			return false;
64953d1e65cSHaifei Luo 		if (if_index == bond->ports[i].ifindex)
65053d1e65cSHaifei Luo 			return true;
65153d1e65cSHaifei Luo 	}
65253d1e65cSHaifei Luo 
65353d1e65cSHaifei Luo 	return false;
65453d1e65cSHaifei Luo }
65553d1e65cSHaifei Luo 
65653d1e65cSHaifei Luo static void
65753d1e65cSHaifei Luo mlx5_link_update_bond(struct rte_eth_dev *dev)
65853d1e65cSHaifei Luo {
65953d1e65cSHaifei Luo 	struct mlx5_priv *priv = dev->data->dev_private;
66053d1e65cSHaifei Luo 	struct mlx5_bond_info *bond = &priv->sh->bond;
66153d1e65cSHaifei Luo 	struct ifreq ifr = (struct ifreq) {
66253d1e65cSHaifei Luo 		.ifr_flags = 0,
66353d1e65cSHaifei Luo 	};
66453d1e65cSHaifei Luo 	int ret;
66553d1e65cSHaifei Luo 
66653d1e65cSHaifei Luo 	ret = mlx5_ifreq_by_ifname(bond->ifname, SIOCGIFFLAGS, &ifr);
66753d1e65cSHaifei Luo 	if (ret) {
66853d1e65cSHaifei Luo 		DRV_LOG(WARNING, "ifname %s ioctl(SIOCGIFFLAGS) failed: %s",
66953d1e65cSHaifei Luo 			bond->ifname, strerror(rte_errno));
67053d1e65cSHaifei Luo 		return;
67153d1e65cSHaifei Luo 	}
67253d1e65cSHaifei Luo 	dev->data->dev_link.link_status =
67353d1e65cSHaifei Luo 		((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING));
67453d1e65cSHaifei Luo }
67553d1e65cSHaifei Luo 
67617f95513SDmitry Kozlyuk static void
67717f95513SDmitry Kozlyuk mlx5_dev_interrupt_nl_cb(struct nlmsghdr *hdr, void *cb_arg)
67817f95513SDmitry Kozlyuk {
67917f95513SDmitry Kozlyuk 	struct mlx5_dev_ctx_shared *sh = cb_arg;
68017f95513SDmitry Kozlyuk 	uint32_t i;
68117f95513SDmitry Kozlyuk 	uint32_t if_index;
68217f95513SDmitry Kozlyuk 
68317f95513SDmitry Kozlyuk 	if (mlx5_nl_parse_link_status_update(hdr, &if_index) < 0)
68417f95513SDmitry Kozlyuk 		return;
68517f95513SDmitry Kozlyuk 	for (i = 0; i < sh->max_port; i++) {
68617f95513SDmitry Kozlyuk 		struct mlx5_dev_shared_port *port = &sh->port[i];
68717f95513SDmitry Kozlyuk 		struct rte_eth_dev *dev;
68817f95513SDmitry Kozlyuk 		struct mlx5_priv *priv;
68917f95513SDmitry Kozlyuk 
69017f95513SDmitry Kozlyuk 		if (port->nl_ih_port_id >= RTE_MAX_ETHPORTS)
69117f95513SDmitry Kozlyuk 			continue;
69217f95513SDmitry Kozlyuk 		dev = &rte_eth_devices[port->nl_ih_port_id];
69317f95513SDmitry Kozlyuk 		/* Probing may initiate an LSC before configuration is done. */
69417f95513SDmitry Kozlyuk 		if (dev->data->dev_configured &&
69517f95513SDmitry Kozlyuk 		    !dev->data->dev_conf.intr_conf.lsc)
69617f95513SDmitry Kozlyuk 			break;
69717f95513SDmitry Kozlyuk 		priv = dev->data->dev_private;
69853d1e65cSHaifei Luo 		if (mlx5_dev_nl_ifindex_verify(if_index, priv)) {
69917f95513SDmitry Kozlyuk 			/* Block logical LSC events. */
70017f95513SDmitry Kozlyuk 			uint16_t prev_status = dev->data->dev_link.link_status;
70117f95513SDmitry Kozlyuk 
70253d1e65cSHaifei Luo 			if (mlx5_link_update(dev, 0) < 0) {
70317f95513SDmitry Kozlyuk 				DRV_LOG(ERR, "Failed to update link status: %s",
70417f95513SDmitry Kozlyuk 					rte_strerror(rte_errno));
70553d1e65cSHaifei Luo 			} else {
70653d1e65cSHaifei Luo 				if (priv->sh->bond.n_port)
70753d1e65cSHaifei Luo 					mlx5_link_update_bond(dev);
70853d1e65cSHaifei Luo 				if (prev_status != dev->data->dev_link.link_status)
70917f95513SDmitry Kozlyuk 					rte_eth_dev_callback_process
71017f95513SDmitry Kozlyuk 						(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
71153d1e65cSHaifei Luo 			}
71217f95513SDmitry Kozlyuk 			break;
71317f95513SDmitry Kozlyuk 		}
71417f95513SDmitry Kozlyuk 	}
71517f95513SDmitry Kozlyuk }
71617f95513SDmitry Kozlyuk 
71717f95513SDmitry Kozlyuk void
71817f95513SDmitry Kozlyuk mlx5_dev_interrupt_handler_nl(void *arg)
71917f95513SDmitry Kozlyuk {
72017f95513SDmitry Kozlyuk 	struct mlx5_dev_ctx_shared *sh = arg;
72117f95513SDmitry Kozlyuk 	int nlsk_fd = rte_intr_fd_get(sh->intr_handle_nl);
72217f95513SDmitry Kozlyuk 
72317f95513SDmitry Kozlyuk 	if (nlsk_fd < 0)
72417f95513SDmitry Kozlyuk 		return;
72517f95513SDmitry Kozlyuk 	if (mlx5_nl_read_events(nlsk_fd, mlx5_dev_interrupt_nl_cb, sh) < 0)
72617f95513SDmitry Kozlyuk 		DRV_LOG(ERR, "Failed to process Netlink events: %s",
72717f95513SDmitry Kozlyuk 			rte_strerror(rte_errno));
72817f95513SDmitry Kozlyuk }
72917f95513SDmitry Kozlyuk 
7301256805dSOphir Munk /**
7311256805dSOphir Munk  * Handle shared asynchronous events the NIC (removal event
7321256805dSOphir Munk  * and link status change). Supports multiport IB device.
7331256805dSOphir Munk  *
7341256805dSOphir Munk  * @param cb_arg
7351256805dSOphir Munk  *   Callback argument.
7361256805dSOphir Munk  */
7371256805dSOphir Munk void
7381256805dSOphir Munk mlx5_dev_interrupt_handler(void *cb_arg)
7391256805dSOphir Munk {
7401256805dSOphir Munk 	struct mlx5_dev_ctx_shared *sh = cb_arg;
7411256805dSOphir Munk 	struct ibv_async_event event;
7421256805dSOphir Munk 
7431256805dSOphir Munk 	/* Read all message from the IB device and acknowledge them. */
7441256805dSOphir Munk 	for (;;) {
7451256805dSOphir Munk 		struct rte_eth_dev *dev;
7461256805dSOphir Munk 		uint32_t tmp;
7471256805dSOphir Munk 
74822dc56cfSViacheslav Ovsiienko 		if (mlx5_glue->get_async_event(sh->cdev->ctx, &event)) {
74922dc56cfSViacheslav Ovsiienko 			if (errno == EIO) {
75022dc56cfSViacheslav Ovsiienko 				DRV_LOG(DEBUG,
75122dc56cfSViacheslav Ovsiienko 					"IBV async event queue closed on: %s",
75222dc56cfSViacheslav Ovsiienko 					sh->ibdev_name);
7531256805dSOphir Munk 				mlx5_dev_interrupt_device_fatal(sh);
75422dc56cfSViacheslav Ovsiienko 			}
75522dc56cfSViacheslav Ovsiienko 			break;
75622dc56cfSViacheslav Ovsiienko 		}
75722dc56cfSViacheslav Ovsiienko 		if (event.event_type == IBV_EVENT_DEVICE_FATAL) {
75822dc56cfSViacheslav Ovsiienko 			/*
75922dc56cfSViacheslav Ovsiienko 			 * The DEVICE_FATAL event can be called by kernel
76022dc56cfSViacheslav Ovsiienko 			 * twice - from mlx5 and uverbs layers, and port
76122dc56cfSViacheslav Ovsiienko 			 * index is not applicable. We should notify all
76222dc56cfSViacheslav Ovsiienko 			 * existing ports.
76322dc56cfSViacheslav Ovsiienko 			 */
76422dc56cfSViacheslav Ovsiienko 			mlx5_dev_interrupt_device_fatal(sh);
76522dc56cfSViacheslav Ovsiienko 			mlx5_glue->ack_async_event(&event);
7661256805dSOphir Munk 			continue;
7671256805dSOphir Munk 		}
76822dc56cfSViacheslav Ovsiienko 		/* Retrieve and check IB port index. */
76922dc56cfSViacheslav Ovsiienko 		tmp = (uint32_t)event.element.port_num;
77022dc56cfSViacheslav Ovsiienko 		MLX5_ASSERT(tmp <= sh->max_port);
7711256805dSOphir Munk 		if (!tmp) {
7721256805dSOphir Munk 			/* Unsupported device level event. */
7731256805dSOphir Munk 			mlx5_glue->ack_async_event(&event);
7741256805dSOphir Munk 			DRV_LOG(DEBUG,
7751256805dSOphir Munk 				"unsupported common event (type %d)",
7761256805dSOphir Munk 				event.event_type);
7771256805dSOphir Munk 			continue;
7781256805dSOphir Munk 		}
7791256805dSOphir Munk 		if (tmp > sh->max_port) {
7801256805dSOphir Munk 			/* Invalid IB port index. */
7811256805dSOphir Munk 			mlx5_glue->ack_async_event(&event);
7821256805dSOphir Munk 			DRV_LOG(DEBUG,
7831256805dSOphir Munk 				"cannot handle an event (type %d)"
7841256805dSOphir Munk 				"due to invalid IB port index (%u)",
7851256805dSOphir Munk 				event.event_type, tmp);
7861256805dSOphir Munk 			continue;
7871256805dSOphir Munk 		}
7881256805dSOphir Munk 		if (sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) {
7891256805dSOphir Munk 			/* No handler installed. */
7901256805dSOphir Munk 			mlx5_glue->ack_async_event(&event);
7911256805dSOphir Munk 			DRV_LOG(DEBUG,
7921256805dSOphir Munk 				"cannot handle an event (type %d)"
7931256805dSOphir Munk 				"due to no handler installed for port %u",
7941256805dSOphir Munk 				event.event_type, tmp);
7951256805dSOphir Munk 			continue;
7961256805dSOphir Munk 		}
7971256805dSOphir Munk 		/* Retrieve ethernet device descriptor. */
7981256805dSOphir Munk 		tmp = sh->port[tmp - 1].ih_port_id;
7991256805dSOphir Munk 		dev = &rte_eth_devices[tmp];
8001256805dSOphir Munk 		MLX5_ASSERT(dev);
8011256805dSOphir Munk 		DRV_LOG(DEBUG,
8021256805dSOphir Munk 			"port %u cannot handle an unknown event (type %d)",
8031256805dSOphir Munk 			dev->data->port_id, event.event_type);
8041256805dSOphir Munk 		mlx5_glue->ack_async_event(&event);
8051256805dSOphir Munk 	}
8061256805dSOphir Munk }
8071256805dSOphir Munk 
8081256805dSOphir Munk /**
8091256805dSOphir Munk  * Handle DEVX interrupts from the NIC.
8101256805dSOphir Munk  * This function is probably called from the DPDK host thread.
8111256805dSOphir Munk  *
8121256805dSOphir Munk  * @param cb_arg
8131256805dSOphir Munk  *   Callback argument.
8141256805dSOphir Munk  */
8151256805dSOphir Munk void
8161256805dSOphir Munk mlx5_dev_interrupt_handler_devx(void *cb_arg)
8171256805dSOphir Munk {
8181256805dSOphir Munk #ifndef HAVE_IBV_DEVX_ASYNC
8191256805dSOphir Munk 	(void)cb_arg;
8201256805dSOphir Munk 	return;
8211256805dSOphir Munk #else
8221256805dSOphir Munk 	struct mlx5_dev_ctx_shared *sh = cb_arg;
8231256805dSOphir Munk 	union {
8241256805dSOphir Munk 		struct mlx5dv_devx_async_cmd_hdr cmd_resp;
8251256805dSOphir Munk 		uint8_t buf[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
8261256805dSOphir Munk 			    MLX5_ST_SZ_BYTES(traffic_counter) +
8271256805dSOphir Munk 			    sizeof(struct mlx5dv_devx_async_cmd_hdr)];
8281256805dSOphir Munk 	} out;
8291256805dSOphir Munk 	uint8_t *buf = out.buf + sizeof(out.cmd_resp);
8301256805dSOphir Munk 
8311256805dSOphir Munk 	while (!mlx5_glue->devx_get_async_cmd_comp(sh->devx_comp,
8321256805dSOphir Munk 						   &out.cmd_resp,
8331256805dSOphir Munk 						   sizeof(out.buf)))
8341256805dSOphir Munk 		mlx5_flow_async_pool_query_handle
8351256805dSOphir Munk 			(sh, (uint64_t)out.cmd_resp.wr_id,
8361256805dSOphir Munk 			 mlx5_devx_get_out_command_status(buf));
8371256805dSOphir Munk #endif /* HAVE_IBV_DEVX_ASYNC */
8381256805dSOphir Munk }
8391256805dSOphir Munk 
8401256805dSOphir Munk /**
8411256805dSOphir Munk  * DPDK callback to bring the link DOWN.
8421256805dSOphir Munk  *
8431256805dSOphir Munk  * @param dev
8441256805dSOphir Munk  *   Pointer to Ethernet device structure.
8451256805dSOphir Munk  *
8461256805dSOphir Munk  * @return
8471256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
8481256805dSOphir Munk  */
8491256805dSOphir Munk int
8501256805dSOphir Munk mlx5_set_link_down(struct rte_eth_dev *dev)
8511256805dSOphir Munk {
8521256805dSOphir Munk 	return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
8531256805dSOphir Munk }
8541256805dSOphir Munk 
8551256805dSOphir Munk /**
8561256805dSOphir Munk  * DPDK callback to bring the link UP.
8571256805dSOphir Munk  *
8581256805dSOphir Munk  * @param dev
8591256805dSOphir Munk  *   Pointer to Ethernet device structure.
8601256805dSOphir Munk  *
8611256805dSOphir Munk  * @return
8621256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
8631256805dSOphir Munk  */
8641256805dSOphir Munk int
8651256805dSOphir Munk mlx5_set_link_up(struct rte_eth_dev *dev)
8661256805dSOphir Munk {
8671256805dSOphir Munk 	return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
8681256805dSOphir Munk }
8691256805dSOphir Munk 
8701256805dSOphir Munk /**
8711256805dSOphir Munk  * Check if mlx5 device was removed.
8721256805dSOphir Munk  *
8731256805dSOphir Munk  * @param dev
8741256805dSOphir Munk  *   Pointer to Ethernet device structure.
8751256805dSOphir Munk  *
8761256805dSOphir Munk  * @return
8771256805dSOphir Munk  *   1 when device is removed, otherwise 0.
8781256805dSOphir Munk  */
8791256805dSOphir Munk int
8801256805dSOphir Munk mlx5_is_removed(struct rte_eth_dev *dev)
8811256805dSOphir Munk {
8821256805dSOphir Munk 	struct ibv_device_attr device_attr;
8831256805dSOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
8841256805dSOphir Munk 
885ca1418ceSMichael Baum 	if (mlx5_glue->query_device(priv->sh->cdev->ctx, &device_attr) == EIO)
8861256805dSOphir Munk 		return 1;
8871256805dSOphir Munk 	return 0;
8881256805dSOphir Munk }
8891256805dSOphir Munk 
8901256805dSOphir Munk /**
891ef9ee13fSOphir Munk  * Analyze gathered port parameters via sysfs to recognize master
892ef9ee13fSOphir Munk  * and representor devices for E-Switch configuration.
893ef9ee13fSOphir Munk  *
894ef9ee13fSOphir Munk  * @param[in] device_dir
895ef9ee13fSOphir Munk  *   flag of presence of "device" directory under port device key.
896ef9ee13fSOphir Munk  * @param[inout] switch_info
897ef9ee13fSOphir Munk  *   Port information, including port name as a number and port name
898ef9ee13fSOphir Munk  *   type if recognized
899ef9ee13fSOphir Munk  *
900ef9ee13fSOphir Munk  * @return
901ef9ee13fSOphir Munk  *   master and representor flags are set in switch_info according to
902ef9ee13fSOphir Munk  *   recognized parameters (if any).
903ef9ee13fSOphir Munk  */
904ef9ee13fSOphir Munk static void
905ef9ee13fSOphir Munk mlx5_sysfs_check_switch_info(bool device_dir,
906ef9ee13fSOphir Munk 			     struct mlx5_switch_info *switch_info)
907ef9ee13fSOphir Munk {
908ef9ee13fSOphir Munk 	switch (switch_info->name_type) {
909ef9ee13fSOphir Munk 	case MLX5_PHYS_PORT_NAME_TYPE_UNKNOWN:
910ef9ee13fSOphir Munk 		/*
911ef9ee13fSOphir Munk 		 * Name is not recognized, assume the master,
912ef9ee13fSOphir Munk 		 * check the device directory presence.
913ef9ee13fSOphir Munk 		 */
914ef9ee13fSOphir Munk 		switch_info->master = device_dir;
915ef9ee13fSOphir Munk 		break;
916ef9ee13fSOphir Munk 	case MLX5_PHYS_PORT_NAME_TYPE_NOTSET:
917ef9ee13fSOphir Munk 		/*
918ef9ee13fSOphir Munk 		 * Name is not set, this assumes the legacy naming
919ef9ee13fSOphir Munk 		 * schema for master, just check if there is
920ef9ee13fSOphir Munk 		 * a device directory.
921ef9ee13fSOphir Munk 		 */
922ef9ee13fSOphir Munk 		switch_info->master = device_dir;
923ef9ee13fSOphir Munk 		break;
924ef9ee13fSOphir Munk 	case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
925ef9ee13fSOphir Munk 		/* New uplink naming schema recognized. */
926ef9ee13fSOphir Munk 		switch_info->master = 1;
927ef9ee13fSOphir Munk 		break;
928ef9ee13fSOphir Munk 	case MLX5_PHYS_PORT_NAME_TYPE_LEGACY:
929ef9ee13fSOphir Munk 		/* Legacy representors naming schema. */
930ef9ee13fSOphir Munk 		switch_info->representor = !device_dir;
931ef9ee13fSOphir Munk 		break;
932ef9ee13fSOphir Munk 	case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
933ef9ee13fSOphir Munk 		/* Fallthrough */
934ef9ee13fSOphir Munk 	case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
935cb95feefSXueming Li 		/* Fallthrough */
936cb95feefSXueming Li 	case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
937ef9ee13fSOphir Munk 		/* New representors naming schema. */
938ef9ee13fSOphir Munk 		switch_info->representor = 1;
939ef9ee13fSOphir Munk 		break;
94059df97f1SXueming Li 	default:
94159df97f1SXueming Li 		switch_info->master = device_dir;
94259df97f1SXueming Li 		break;
943ef9ee13fSOphir Munk 	}
944ef9ee13fSOphir Munk }
945ef9ee13fSOphir Munk 
946ef9ee13fSOphir Munk /**
9471256805dSOphir Munk  * Get switch information associated with network interface.
9481256805dSOphir Munk  *
9491256805dSOphir Munk  * @param ifindex
9501256805dSOphir Munk  *   Network interface index.
9511256805dSOphir Munk  * @param[out] info
9521256805dSOphir Munk  *   Switch information object, populated in case of success.
9531256805dSOphir Munk  *
9541256805dSOphir Munk  * @return
9551256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
9561256805dSOphir Munk  */
9571256805dSOphir Munk int
9581256805dSOphir Munk mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
9591256805dSOphir Munk {
9601256805dSOphir Munk 	char ifname[IF_NAMESIZE];
961f8a226edSBing Zhao 	char *port_name = NULL;
962f8a226edSBing Zhao 	size_t port_name_size = 0;
9631256805dSOphir Munk 	FILE *file;
9641256805dSOphir Munk 	struct mlx5_switch_info data = {
9651256805dSOphir Munk 		.master = 0,
9661256805dSOphir Munk 		.representor = 0,
9671256805dSOphir Munk 		.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET,
9681256805dSOphir Munk 		.port_name = 0,
9691256805dSOphir Munk 		.switch_id = 0,
9701256805dSOphir Munk 	};
9711256805dSOphir Munk 	DIR *dir;
9721256805dSOphir Munk 	bool port_switch_id_set = false;
9731256805dSOphir Munk 	bool device_dir = false;
9741256805dSOphir Munk 	char c;
975f8a226edSBing Zhao 	ssize_t line_size;
9761256805dSOphir Munk 
9771256805dSOphir Munk 	if (!if_indextoname(ifindex, ifname)) {
9781256805dSOphir Munk 		rte_errno = errno;
9791256805dSOphir Munk 		return -rte_errno;
9801256805dSOphir Munk 	}
9811256805dSOphir Munk 
9821256805dSOphir Munk 	MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
9831256805dSOphir Munk 	      ifname);
9841256805dSOphir Munk 	MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
9851256805dSOphir Munk 	      ifname);
9861256805dSOphir Munk 	MKSTR(pci_device, "/sys/class/net/%s/device",
9871256805dSOphir Munk 	      ifname);
9881256805dSOphir Munk 
9891256805dSOphir Munk 	file = fopen(phys_port_name, "rb");
9901256805dSOphir Munk 	if (file != NULL) {
991f8a226edSBing Zhao 		char *tail_nl;
992f8a226edSBing Zhao 
993f8a226edSBing Zhao 		line_size = getline(&port_name, &port_name_size, file);
994f8a226edSBing Zhao 		if (line_size < 0) {
9951296e8d0SDavid Marchand 			free(port_name);
996f8a226edSBing Zhao 			fclose(file);
997f8a226edSBing Zhao 			rte_errno = errno;
998f8a226edSBing Zhao 			return -rte_errno;
999f8a226edSBing Zhao 		} else if (line_size > 0) {
1000f8a226edSBing Zhao 			/* Remove tailing newline character. */
1001f8a226edSBing Zhao 			tail_nl = strchr(port_name, '\n');
1002f8a226edSBing Zhao 			if (tail_nl)
1003f8a226edSBing Zhao 				*tail_nl = '\0';
10041256805dSOphir Munk 			mlx5_translate_port_name(port_name, &data);
1005f8a226edSBing Zhao 		}
1006f8a226edSBing Zhao 		free(port_name);
1007bae645a2SAli Alnubani 		fclose(file);
10081256805dSOphir Munk 	}
10091256805dSOphir Munk 	file = fopen(phys_switch_id, "rb");
10101256805dSOphir Munk 	if (file == NULL) {
10111256805dSOphir Munk 		rte_errno = errno;
10121256805dSOphir Munk 		return -rte_errno;
10131256805dSOphir Munk 	}
10141256805dSOphir Munk 	port_switch_id_set =
10151256805dSOphir Munk 		fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
10161256805dSOphir Munk 		c == '\n';
10171256805dSOphir Munk 	fclose(file);
10181256805dSOphir Munk 	dir = opendir(pci_device);
10191256805dSOphir Munk 	if (dir != NULL) {
10201256805dSOphir Munk 		closedir(dir);
10211256805dSOphir Munk 		device_dir = true;
10221256805dSOphir Munk 	}
10231256805dSOphir Munk 	if (port_switch_id_set) {
10241256805dSOphir Munk 		/* We have some E-Switch configuration. */
10251256805dSOphir Munk 		mlx5_sysfs_check_switch_info(device_dir, &data);
10261256805dSOphir Munk 	}
10271256805dSOphir Munk 	*info = data;
10281256805dSOphir Munk 	MLX5_ASSERT(!(data.master && data.representor));
10291256805dSOphir Munk 	if (data.master && data.representor) {
10301256805dSOphir Munk 		DRV_LOG(ERR, "ifindex %u device is recognized as master"
10311256805dSOphir Munk 			     " and as representor", ifindex);
10321256805dSOphir Munk 		rte_errno = ENODEV;
10331256805dSOphir Munk 		return -rte_errno;
10341256805dSOphir Munk 	}
10351256805dSOphir Munk 	return 0;
10361256805dSOphir Munk }
10371256805dSOphir Munk 
10381256805dSOphir Munk /**
1039c21e5facSXueming Li  * Get bond information associated with network interface.
1040c21e5facSXueming Li  *
1041c21e5facSXueming Li  * @param pf_ifindex
1042c21e5facSXueming Li  *   Network interface index of bond slave interface
1043c21e5facSXueming Li  * @param[out] ifindex
1044c21e5facSXueming Li  *   Pointer to bond ifindex.
1045c21e5facSXueming Li  * @param[out] ifname
1046c21e5facSXueming Li  *   Pointer to bond ifname.
1047c21e5facSXueming Li  *
1048c21e5facSXueming Li  * @return
1049c21e5facSXueming Li  *   0 on success, a negative errno value otherwise and rte_errno is set.
1050c21e5facSXueming Li  */
1051c21e5facSXueming Li int
1052c21e5facSXueming Li mlx5_sysfs_bond_info(unsigned int pf_ifindex, unsigned int *ifindex,
1053c21e5facSXueming Li 		     char *ifname)
1054c21e5facSXueming Li {
1055c21e5facSXueming Li 	char name[IF_NAMESIZE];
1056c21e5facSXueming Li 	FILE *file;
1057c21e5facSXueming Li 	unsigned int index;
1058c21e5facSXueming Li 	int ret;
1059c21e5facSXueming Li 
1060c21e5facSXueming Li 	if (!if_indextoname(pf_ifindex, name) || !strlen(name)) {
1061c21e5facSXueming Li 		rte_errno = errno;
1062c21e5facSXueming Li 		return -rte_errno;
1063c21e5facSXueming Li 	}
1064c21e5facSXueming Li 	MKSTR(bond_if, "/sys/class/net/%s/master/ifindex", name);
1065c21e5facSXueming Li 	/* read bond ifindex */
1066c21e5facSXueming Li 	file = fopen(bond_if, "rb");
1067c21e5facSXueming Li 	if (file == NULL) {
1068c21e5facSXueming Li 		rte_errno = errno;
1069c21e5facSXueming Li 		return -rte_errno;
1070c21e5facSXueming Li 	}
1071c21e5facSXueming Li 	ret = fscanf(file, "%u", &index);
1072c21e5facSXueming Li 	fclose(file);
1073c21e5facSXueming Li 	if (ret <= 0) {
1074c21e5facSXueming Li 		rte_errno = errno;
1075c21e5facSXueming Li 		return -rte_errno;
1076c21e5facSXueming Li 	}
1077c21e5facSXueming Li 	if (ifindex)
1078c21e5facSXueming Li 		*ifindex = index;
1079c21e5facSXueming Li 
1080c21e5facSXueming Li 	/* read bond device name from symbol link */
1081c21e5facSXueming Li 	if (ifname) {
1082c21e5facSXueming Li 		if (!if_indextoname(index, ifname)) {
1083c21e5facSXueming Li 			rte_errno = errno;
1084c21e5facSXueming Li 			return -rte_errno;
1085c21e5facSXueming Li 		}
1086c21e5facSXueming Li 	}
1087c21e5facSXueming Li 	return 0;
1088c21e5facSXueming Li }
1089c21e5facSXueming Li 
1090c21e5facSXueming Li /**
10911256805dSOphir Munk  * DPDK callback to retrieve plug-in module EEPROM information (type and size).
10921256805dSOphir Munk  *
10931256805dSOphir Munk  * @param dev
10941256805dSOphir Munk  *   Pointer to Ethernet device structure.
10951256805dSOphir Munk  * @param[out] modinfo
10961256805dSOphir Munk  *   Storage for plug-in module EEPROM information.
10971256805dSOphir Munk  *
10981256805dSOphir Munk  * @return
10991256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
11001256805dSOphir Munk  */
11011256805dSOphir Munk int
11021256805dSOphir Munk mlx5_get_module_info(struct rte_eth_dev *dev,
11031256805dSOphir Munk 		     struct rte_eth_dev_module_info *modinfo)
11041256805dSOphir Munk {
11051256805dSOphir Munk 	struct ethtool_modinfo info = {
11061256805dSOphir Munk 		.cmd = ETHTOOL_GMODULEINFO,
11071256805dSOphir Munk 	};
11081256805dSOphir Munk 	struct ifreq ifr = (struct ifreq) {
11091256805dSOphir Munk 		.ifr_data = (void *)&info,
11101256805dSOphir Munk 	};
11111256805dSOphir Munk 	int ret = 0;
11121256805dSOphir Munk 
1113e2bd08d5SChengchang Tang 	if (!dev) {
11141256805dSOphir Munk 		DRV_LOG(WARNING, "missing argument, cannot get module info");
11151256805dSOphir Munk 		rte_errno = EINVAL;
11161256805dSOphir Munk 		return -rte_errno;
11171256805dSOphir Munk 	}
11181256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
11191256805dSOphir Munk 	if (ret) {
11201256805dSOphir Munk 		DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s",
11211256805dSOphir Munk 			dev->data->port_id, strerror(rte_errno));
11221256805dSOphir Munk 		return ret;
11231256805dSOphir Munk 	}
11241256805dSOphir Munk 	modinfo->type = info.type;
11251256805dSOphir Munk 	modinfo->eeprom_len = info.eeprom_len;
11261256805dSOphir Munk 	return ret;
11271256805dSOphir Munk }
11281256805dSOphir Munk 
11291256805dSOphir Munk /**
11301256805dSOphir Munk  * DPDK callback to retrieve plug-in module EEPROM data.
11311256805dSOphir Munk  *
11321256805dSOphir Munk  * @param dev
11331256805dSOphir Munk  *   Pointer to Ethernet device structure.
11341256805dSOphir Munk  * @param[out] info
11351256805dSOphir Munk  *   Storage for plug-in module EEPROM data.
11361256805dSOphir Munk  *
11371256805dSOphir Munk  * @return
11381256805dSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
11391256805dSOphir Munk  */
11401256805dSOphir Munk int mlx5_get_module_eeprom(struct rte_eth_dev *dev,
11411256805dSOphir Munk 			   struct rte_dev_eeprom_info *info)
11421256805dSOphir Munk {
11431256805dSOphir Munk 	struct ethtool_eeprom *eeprom;
11441256805dSOphir Munk 	struct ifreq ifr;
11451256805dSOphir Munk 	int ret = 0;
11461256805dSOphir Munk 
1147e2bd08d5SChengchang Tang 	if (!dev) {
11481256805dSOphir Munk 		DRV_LOG(WARNING, "missing argument, cannot get module eeprom");
11491256805dSOphir Munk 		rte_errno = EINVAL;
11501256805dSOphir Munk 		return -rte_errno;
11511256805dSOphir Munk 	}
11522175c4dcSSuanming Mou 	eeprom = mlx5_malloc(MLX5_MEM_ZERO,
11532175c4dcSSuanming Mou 			     (sizeof(struct ethtool_eeprom) + info->length), 0,
11542175c4dcSSuanming Mou 			     SOCKET_ID_ANY);
11551256805dSOphir Munk 	if (!eeprom) {
11561256805dSOphir Munk 		DRV_LOG(WARNING, "port %u cannot allocate memory for "
11571256805dSOphir Munk 			"eeprom data", dev->data->port_id);
11581256805dSOphir Munk 		rte_errno = ENOMEM;
11591256805dSOphir Munk 		return -rte_errno;
11601256805dSOphir Munk 	}
11611256805dSOphir Munk 	eeprom->cmd = ETHTOOL_GMODULEEEPROM;
11621256805dSOphir Munk 	eeprom->offset = info->offset;
11631256805dSOphir Munk 	eeprom->len = info->length;
11641256805dSOphir Munk 	ifr = (struct ifreq) {
11651256805dSOphir Munk 		.ifr_data = (void *)eeprom,
11661256805dSOphir Munk 	};
11671256805dSOphir Munk 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
11681256805dSOphir Munk 	if (ret)
11691256805dSOphir Munk 		DRV_LOG(WARNING, "port %u ioctl(SIOCETHTOOL) failed: %s",
11701256805dSOphir Munk 			dev->data->port_id, strerror(rte_errno));
11711256805dSOphir Munk 	else
11721256805dSOphir Munk 		rte_memcpy(info->data, eeprom->data, info->length);
11732175c4dcSSuanming Mou 	mlx5_free(eeprom);
11741256805dSOphir Munk 	return ret;
11751256805dSOphir Munk }
117698c4b12aSOphir Munk 
117798c4b12aSOphir Munk /**
117898c4b12aSOphir Munk  * Read device counters table.
117998c4b12aSOphir Munk  *
118098c4b12aSOphir Munk  * @param dev
118198c4b12aSOphir Munk  *   Pointer to Ethernet device.
11827ed15acdSXueming Li  * @param[in] pf
11837ed15acdSXueming Li  *   PF index in case of bonding device, -1 otherwise
118498c4b12aSOphir Munk  * @param[out] stats
118598c4b12aSOphir Munk  *   Counters table output buffer.
118698c4b12aSOphir Munk  *
118798c4b12aSOphir Munk  * @return
118898c4b12aSOphir Munk  *   0 on success and stats is filled, negative errno value otherwise and
118998c4b12aSOphir Munk  *   rte_errno is set.
119098c4b12aSOphir Munk  */
11917ed15acdSXueming Li static int
11927ed15acdSXueming Li _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats)
119398c4b12aSOphir Munk {
119498c4b12aSOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
119598c4b12aSOphir Munk 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
119698c4b12aSOphir Munk 	unsigned int i;
119798c4b12aSOphir Munk 	struct ifreq ifr;
1198a687c3e6SBing Zhao 	unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd);
1199a687c3e6SBing Zhao 	unsigned int stats_sz = max_stats_n * sizeof(uint64_t);
120098c4b12aSOphir Munk 	unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
120198c4b12aSOphir Munk 	struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
120298c4b12aSOphir Munk 	int ret;
1203a687c3e6SBing Zhao 	uint16_t i_idx, o_idx;
120498c4b12aSOphir Munk 
120598c4b12aSOphir Munk 	et_stats->cmd = ETHTOOL_GSTATS;
1206a687c3e6SBing Zhao 	/* Pass the maximum value, the driver may ignore this. */
1207a687c3e6SBing Zhao 	et_stats->n_stats = max_stats_n;
120898c4b12aSOphir Munk 	ifr.ifr_data = (caddr_t)et_stats;
12097ed15acdSXueming Li 	if (pf >= 0)
12107ed15acdSXueming Li 		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname,
12117ed15acdSXueming Li 					   SIOCETHTOOL, &ifr);
12127ed15acdSXueming Li 	else
121398c4b12aSOphir Munk 		ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
121498c4b12aSOphir Munk 	if (ret) {
121598c4b12aSOphir Munk 		DRV_LOG(WARNING,
121698c4b12aSOphir Munk 			"port %u unable to read statistic values from device",
121798c4b12aSOphir Munk 			dev->data->port_id);
121898c4b12aSOphir Munk 		return ret;
121998c4b12aSOphir Munk 	}
1220a687c3e6SBing Zhao 	if (pf <= 0) {
1221a687c3e6SBing Zhao 		for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) {
1222a687c3e6SBing Zhao 			i_idx = xstats_ctrl->dev_table_idx[i];
1223a687c3e6SBing Zhao 			if (i_idx == UINT16_MAX || xstats_ctrl->info[i].dev)
12247ed15acdSXueming Li 				continue;
1225a687c3e6SBing Zhao 			o_idx = xstats_ctrl->xstats_o_idx[i];
1226a687c3e6SBing Zhao 			stats[o_idx] += (uint64_t)et_stats->data[i_idx];
1227a687c3e6SBing Zhao 		}
1228a687c3e6SBing Zhao 	} else {
1229a687c3e6SBing Zhao 		for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) {
1230a687c3e6SBing Zhao 			i_idx = xstats_ctrl->dev_table_idx_2nd[i];
1231a687c3e6SBing Zhao 			if (i_idx == UINT16_MAX)
1232a687c3e6SBing Zhao 				continue;
1233a687c3e6SBing Zhao 			o_idx = xstats_ctrl->xstats_o_idx_2nd[i];
1234a687c3e6SBing Zhao 			stats[o_idx] += (uint64_t)et_stats->data[i_idx];
1235a687c3e6SBing Zhao 		}
12367ed15acdSXueming Li 	}
12377ed15acdSXueming Li 	return 0;
12387ed15acdSXueming Li }
12397ed15acdSXueming Li 
1240a687c3e6SBing Zhao /*
12417ed15acdSXueming Li  * Read device counters.
12427ed15acdSXueming Li  *
12437ed15acdSXueming Li  * @param dev
12447ed15acdSXueming Li  *   Pointer to Ethernet device.
1245a687c3e6SBing Zhao  * @param bond_master
1246a687c3e6SBing Zhao  *   Indicate if the device is a bond master.
1247a687c3e6SBing Zhao  * @param stats
12487ed15acdSXueming Li  *   Counters table output buffer.
12497ed15acdSXueming Li  *
12507ed15acdSXueming Li  * @return
12517ed15acdSXueming Li  *   0 on success and stats is filled, negative errno value otherwise and
12527ed15acdSXueming Li  *   rte_errno is set.
12537ed15acdSXueming Li  */
12547ed15acdSXueming Li int
1255a687c3e6SBing Zhao mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats)
12567ed15acdSXueming Li {
12577ed15acdSXueming Li 	struct mlx5_priv *priv = dev->data->dev_private;
12587ed15acdSXueming Li 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
12597ed15acdSXueming Li 	int ret = 0, i;
12607ed15acdSXueming Li 
12617ed15acdSXueming Li 	memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n);
12627ed15acdSXueming Li 	/* Read ifreq counters. */
1263a687c3e6SBing Zhao 	if (bond_master) {
12647ed15acdSXueming Li 		/* Sum xstats from bonding device member ports. */
12657ed15acdSXueming Li 		for (i = 0; i < priv->sh->bond.n_port; i++) {
12667ed15acdSXueming Li 			ret = _mlx5_os_read_dev_counters(dev, i, stats);
12677ed15acdSXueming Li 			if (ret)
12687ed15acdSXueming Li 				return ret;
12697ed15acdSXueming Li 		}
12707ed15acdSXueming Li 	} else {
12717ed15acdSXueming Li 		ret = _mlx5_os_read_dev_counters(dev, -1, stats);
1272eadc35dfSGeoffrey Le Gourriérec 		if (ret)
1273eadc35dfSGeoffrey Le Gourriérec 			return ret;
12747ed15acdSXueming Li 	}
1275a687c3e6SBing Zhao 	/*
1276a687c3e6SBing Zhao 	 * Read IB counters.
1277a687c3e6SBing Zhao 	 * The counters are unique per IB device but not per net IF.
1278a687c3e6SBing Zhao 	 * In bonding mode, getting the stats name only from 1 port is enough.
1279a687c3e6SBing Zhao 	 */
1280a687c3e6SBing Zhao 	for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) {
12817ed15acdSXueming Li 		if (!xstats_ctrl->info[i].dev)
12827ed15acdSXueming Li 			continue;
128398c4b12aSOphir Munk 		/* return last xstats counter if fail to read. */
1284eadc35dfSGeoffrey Le Gourriérec 		if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name,
1285eadc35dfSGeoffrey Le Gourriérec 					  &stats[i]) == 0)
128698c4b12aSOphir Munk 			xstats_ctrl->xstats[i] = stats[i];
128798c4b12aSOphir Munk 		else
128898c4b12aSOphir Munk 			stats[i] = xstats_ctrl->xstats[i];
128998c4b12aSOphir Munk 	}
12907ed15acdSXueming Li 	return ret;
129198c4b12aSOphir Munk }
129298c4b12aSOphir Munk 
1293a687c3e6SBing Zhao /*
129498c4b12aSOphir Munk  * Query the number of statistics provided by ETHTOOL.
129598c4b12aSOphir Munk  *
129698c4b12aSOphir Munk  * @param dev
129798c4b12aSOphir Munk  *   Pointer to Ethernet device.
1298a687c3e6SBing Zhao  * @param bond_master
1299a687c3e6SBing Zhao  *   Indicate if the device is a bond master.
1300a687c3e6SBing Zhao  * @param n_stats
1301a687c3e6SBing Zhao  *   Pointer to number of stats to store.
1302a687c3e6SBing Zhao  * @param n_stats_sec
1303a687c3e6SBing Zhao  *   Pointer to number of stats to store for the 2nd port of the bond.
130498c4b12aSOphir Munk  *
130598c4b12aSOphir Munk  * @return
1306a687c3e6SBing Zhao  *   0 on success, negative errno value otherwise and rte_errno is set.
130798c4b12aSOphir Munk  */
130898c4b12aSOphir Munk int
1309a687c3e6SBing Zhao mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master,
1310a687c3e6SBing Zhao 		    uint16_t *n_stats, uint16_t *n_stats_sec)
131198c4b12aSOphir Munk {
13127ed15acdSXueming Li 	struct mlx5_priv *priv = dev->data->dev_private;
131398c4b12aSOphir Munk 	struct ethtool_drvinfo drvinfo;
131498c4b12aSOphir Munk 	struct ifreq ifr;
131598c4b12aSOphir Munk 	int ret;
131698c4b12aSOphir Munk 
131798c4b12aSOphir Munk 	drvinfo.cmd = ETHTOOL_GDRVINFO;
131898c4b12aSOphir Munk 	ifr.ifr_data = (caddr_t)&drvinfo;
1319a687c3e6SBing Zhao 	/* Bonding PFs. */
1320a687c3e6SBing Zhao 	if (bond_master) {
13217ed15acdSXueming Li 		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname,
13227ed15acdSXueming Li 					   SIOCETHTOOL, &ifr);
1323a687c3e6SBing Zhao 		if (ret) {
1324a687c3e6SBing Zhao 			DRV_LOG(WARNING, "bonding port %u unable to query number of"
1325a687c3e6SBing Zhao 				" statistics for the 1st slave, %d", PORT_ID(priv), ret);
1326a687c3e6SBing Zhao 			return ret;
1327a687c3e6SBing Zhao 		}
1328a687c3e6SBing Zhao 		*n_stats = drvinfo.n_stats;
1329a687c3e6SBing Zhao 		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname,
1330a687c3e6SBing Zhao 					   SIOCETHTOOL, &ifr);
1331a687c3e6SBing Zhao 		if (ret) {
1332a687c3e6SBing Zhao 			DRV_LOG(WARNING, "bonding port %u unable to query number of"
1333a687c3e6SBing Zhao 				" statistics for the 2nd slave, %d", PORT_ID(priv), ret);
1334a687c3e6SBing Zhao 			return ret;
1335a687c3e6SBing Zhao 		}
1336a687c3e6SBing Zhao 		*n_stats_sec = drvinfo.n_stats;
1337a687c3e6SBing Zhao 	} else {
133898c4b12aSOphir Munk 		ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
133998c4b12aSOphir Munk 		if (ret) {
134098c4b12aSOphir Munk 			DRV_LOG(WARNING, "port %u unable to query number of statistics",
1341a687c3e6SBing Zhao 				PORT_ID(priv));
134298c4b12aSOphir Munk 			return ret;
134398c4b12aSOphir Munk 		}
1344a687c3e6SBing Zhao 		*n_stats = drvinfo.n_stats;
1345a687c3e6SBing Zhao 	}
1346a687c3e6SBing Zhao 	return 0;
134798c4b12aSOphir Munk }
134898c4b12aSOphir Munk 
134998c4b12aSOphir Munk static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
135098c4b12aSOphir Munk 	{
13511101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_unicast_bytes",
135298c4b12aSOphir Munk 		.ctr_name = "rx_vport_unicast_bytes",
135398c4b12aSOphir Munk 	},
135498c4b12aSOphir Munk 	{
13551101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_multicast_bytes",
135698c4b12aSOphir Munk 		.ctr_name = "rx_vport_multicast_bytes",
135798c4b12aSOphir Munk 	},
135898c4b12aSOphir Munk 	{
13591101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_broadcast_bytes",
136098c4b12aSOphir Munk 		.ctr_name = "rx_vport_broadcast_bytes",
136198c4b12aSOphir Munk 	},
136298c4b12aSOphir Munk 	{
13631101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_unicast_packets",
136498c4b12aSOphir Munk 		.ctr_name = "rx_vport_unicast_packets",
136598c4b12aSOphir Munk 	},
136698c4b12aSOphir Munk 	{
13671101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_multicast_packets",
136898c4b12aSOphir Munk 		.ctr_name = "rx_vport_multicast_packets",
136998c4b12aSOphir Munk 	},
137098c4b12aSOphir Munk 	{
13711101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_broadcast_packets",
137298c4b12aSOphir Munk 		.ctr_name = "rx_vport_broadcast_packets",
137398c4b12aSOphir Munk 	},
137498c4b12aSOphir Munk 	{
13751101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_unicast_bytes",
137698c4b12aSOphir Munk 		.ctr_name = "tx_vport_unicast_bytes",
137798c4b12aSOphir Munk 	},
137898c4b12aSOphir Munk 	{
13791101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_multicast_bytes",
138098c4b12aSOphir Munk 		.ctr_name = "tx_vport_multicast_bytes",
138198c4b12aSOphir Munk 	},
138298c4b12aSOphir Munk 	{
13831101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_broadcast_bytes",
138498c4b12aSOphir Munk 		.ctr_name = "tx_vport_broadcast_bytes",
138598c4b12aSOphir Munk 	},
138698c4b12aSOphir Munk 	{
13871101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_unicast_packets",
138898c4b12aSOphir Munk 		.ctr_name = "tx_vport_unicast_packets",
138998c4b12aSOphir Munk 	},
139098c4b12aSOphir Munk 	{
13911101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_multicast_packets",
139298c4b12aSOphir Munk 		.ctr_name = "tx_vport_multicast_packets",
139398c4b12aSOphir Munk 	},
139498c4b12aSOphir Munk 	{
13951101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_broadcast_packets",
139698c4b12aSOphir Munk 		.ctr_name = "tx_vport_broadcast_packets",
139798c4b12aSOphir Munk 	},
139898c4b12aSOphir Munk 	{
13991101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_wqe_errors",
140098c4b12aSOphir Munk 		.ctr_name = "rx_wqe_err",
140198c4b12aSOphir Munk 	},
140298c4b12aSOphir Munk 	{
14031101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_phy_crc_errors",
140498c4b12aSOphir Munk 		.ctr_name = "rx_crc_errors_phy",
140598c4b12aSOphir Munk 	},
140698c4b12aSOphir Munk 	{
14071101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_phy_in_range_len_errors",
140898c4b12aSOphir Munk 		.ctr_name = "rx_in_range_len_errors_phy",
140998c4b12aSOphir Munk 	},
141098c4b12aSOphir Munk 	{
14111101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_phy_symbol_errors",
141298c4b12aSOphir Munk 		.ctr_name = "rx_symbol_err_phy",
141398c4b12aSOphir Munk 	},
141498c4b12aSOphir Munk 	{
14151101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_phy_errors",
141698c4b12aSOphir Munk 		.ctr_name = "tx_errors_phy",
141798c4b12aSOphir Munk 	},
141898c4b12aSOphir Munk 	{
141998c4b12aSOphir Munk 		.dpdk_name = "rx_out_of_buffer",
142098c4b12aSOphir Munk 		.ctr_name = "out_of_buffer",
142198c4b12aSOphir Munk 		.dev = 1,
142298c4b12aSOphir Munk 	},
142398c4b12aSOphir Munk 	{
1424*d0f858a6SShani Peretz 		.dpdk_name = "dev_internal_queue_oob",
1425*d0f858a6SShani Peretz 		.ctr_name = "dev_internal_queue_oob",
1426*d0f858a6SShani Peretz 	},
1427*d0f858a6SShani Peretz 	{
14281101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_phy_packets",
142998c4b12aSOphir Munk 		.ctr_name = "tx_packets_phy",
143098c4b12aSOphir Munk 	},
143198c4b12aSOphir Munk 	{
14321101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_phy_packets",
143398c4b12aSOphir Munk 		.ctr_name = "rx_packets_phy",
143498c4b12aSOphir Munk 	},
143598c4b12aSOphir Munk 	{
14361101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_phy_discard_packets",
143798c4b12aSOphir Munk 		.ctr_name = "tx_discards_phy",
143898c4b12aSOphir Munk 	},
143998c4b12aSOphir Munk 	{
14401101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_phy_discard_packets",
144198c4b12aSOphir Munk 		.ctr_name = "rx_discards_phy",
144298c4b12aSOphir Munk 	},
144398c4b12aSOphir Munk 	{
14442bd03a43SRongwei Liu 		.dpdk_name = "rx_prio0_buf_discard_packets",
14452bd03a43SRongwei Liu 		.ctr_name = "rx_prio0_buf_discard",
14462bd03a43SRongwei Liu 	},
14472bd03a43SRongwei Liu 	{
14482bd03a43SRongwei Liu 		.dpdk_name = "rx_prio1_buf_discard_packets",
14492bd03a43SRongwei Liu 		.ctr_name = "rx_prio1_buf_discard",
14502bd03a43SRongwei Liu 	},
14512bd03a43SRongwei Liu 	{
14522bd03a43SRongwei Liu 		.dpdk_name = "rx_prio2_buf_discard_packets",
14532bd03a43SRongwei Liu 		.ctr_name = "rx_prio2_buf_discard",
14542bd03a43SRongwei Liu 	},
14552bd03a43SRongwei Liu 	{
14562bd03a43SRongwei Liu 		.dpdk_name = "rx_prio3_buf_discard_packets",
14572bd03a43SRongwei Liu 		.ctr_name = "rx_prio3_buf_discard",
14582bd03a43SRongwei Liu 	},
14592bd03a43SRongwei Liu 	{
14602bd03a43SRongwei Liu 		.dpdk_name = "rx_prio4_buf_discard_packets",
14612bd03a43SRongwei Liu 		.ctr_name = "rx_prio4_buf_discard",
14622bd03a43SRongwei Liu 	},
14632bd03a43SRongwei Liu 	{
14642bd03a43SRongwei Liu 		.dpdk_name = "rx_prio5_buf_discard_packets",
14652bd03a43SRongwei Liu 		.ctr_name = "rx_prio5_buf_discard",
14662bd03a43SRongwei Liu 	},
14672bd03a43SRongwei Liu 	{
14682bd03a43SRongwei Liu 		.dpdk_name = "rx_prio6_buf_discard_packets",
14692bd03a43SRongwei Liu 		.ctr_name = "rx_prio6_buf_discard",
14702bd03a43SRongwei Liu 	},
14712bd03a43SRongwei Liu 	{
14722bd03a43SRongwei Liu 		.dpdk_name = "rx_prio7_buf_discard_packets",
14732bd03a43SRongwei Liu 		.ctr_name = "rx_prio7_buf_discard",
14742bd03a43SRongwei Liu 	},
14752bd03a43SRongwei Liu 	{
14762bd03a43SRongwei Liu 		.dpdk_name = "rx_prio0_cong_discard_packets",
14772bd03a43SRongwei Liu 		.ctr_name = "rx_prio0_cong_discard",
14782bd03a43SRongwei Liu 	},
14792bd03a43SRongwei Liu 	{
14802bd03a43SRongwei Liu 		.dpdk_name = "rx_prio1_cong_discard_packets",
14812bd03a43SRongwei Liu 		.ctr_name = "rx_prio1_cong_discard",
14822bd03a43SRongwei Liu 	},
14832bd03a43SRongwei Liu 	{
14842bd03a43SRongwei Liu 		.dpdk_name = "rx_prio2_cong_discard_packets",
14852bd03a43SRongwei Liu 		.ctr_name = "rx_prio2_cong_discard",
14862bd03a43SRongwei Liu 	},
14872bd03a43SRongwei Liu 	{
14882bd03a43SRongwei Liu 		.dpdk_name = "rx_prio3_cong_discard_packets",
14892bd03a43SRongwei Liu 		.ctr_name = "rx_prio3_cong_discard",
14902bd03a43SRongwei Liu 	},
14912bd03a43SRongwei Liu 	{
14922bd03a43SRongwei Liu 		.dpdk_name = "rx_prio4_cong_discard_packets",
14932bd03a43SRongwei Liu 		.ctr_name = "rx_prio4_cong_discard",
14942bd03a43SRongwei Liu 	},
14952bd03a43SRongwei Liu 	{
14962bd03a43SRongwei Liu 		.dpdk_name = "rx_prio5_cong_discard_packets",
14972bd03a43SRongwei Liu 		.ctr_name = "rx_prio5_cong_discard",
14982bd03a43SRongwei Liu 	},
14992bd03a43SRongwei Liu 	{
15002bd03a43SRongwei Liu 		.dpdk_name = "rx_prio6_cong_discard_packets",
15012bd03a43SRongwei Liu 		.ctr_name = "rx_prio6_cong_discard",
15022bd03a43SRongwei Liu 	},
15032bd03a43SRongwei Liu 	{
15042bd03a43SRongwei Liu 		.dpdk_name = "rx_prio7_cong_discard_packets",
15052bd03a43SRongwei Liu 		.ctr_name = "rx_prio7_cong_discard",
15062bd03a43SRongwei Liu 	},
15072bd03a43SRongwei Liu 	{
15081101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_phy_bytes",
150998c4b12aSOphir Munk 		.ctr_name = "tx_bytes_phy",
151098c4b12aSOphir Munk 	},
151198c4b12aSOphir Munk 	{
15121101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_phy_bytes",
151398c4b12aSOphir Munk 		.ctr_name = "rx_bytes_phy",
151498c4b12aSOphir Munk 	},
151598c4b12aSOphir Munk 	/* Representor only */
151698c4b12aSOphir Munk 	{
15171101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_vport_packets",
151898c4b12aSOphir Munk 		.ctr_name = "vport_rx_packets",
151998c4b12aSOphir Munk 	},
152098c4b12aSOphir Munk 	{
15211101809bSViacheslav Ovsiienko 		.dpdk_name = "rx_vport_bytes",
152298c4b12aSOphir Munk 		.ctr_name = "vport_rx_bytes",
152398c4b12aSOphir Munk 	},
152498c4b12aSOphir Munk 	{
15251101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_vport_packets",
152698c4b12aSOphir Munk 		.ctr_name = "vport_tx_packets",
152798c4b12aSOphir Munk 	},
152898c4b12aSOphir Munk 	{
15291101809bSViacheslav Ovsiienko 		.dpdk_name = "tx_vport_bytes",
153098c4b12aSOphir Munk 		.ctr_name = "vport_tx_bytes",
153198c4b12aSOphir Munk 	},
15328e7869f0SWathsala Vithanage 	/**
15338e7869f0SWathsala Vithanage 	 * Device counters: These counters are for the
15348e7869f0SWathsala Vithanage 	 * entire PCI device (NIC). These counters are
15358e7869f0SWathsala Vithanage 	 * not counting on a per port/queue basis.
15368e7869f0SWathsala Vithanage 	 */
15378e7869f0SWathsala Vithanage 	{
15388e7869f0SWathsala Vithanage 		.dpdk_name = "rx_pci_signal_integrity",
15398e7869f0SWathsala Vithanage 		.ctr_name = "rx_pci_signal_integrity",
15408e7869f0SWathsala Vithanage 	},
15418e7869f0SWathsala Vithanage 	{
15428e7869f0SWathsala Vithanage 		.dpdk_name = "tx_pci_signal_integrity",
15438e7869f0SWathsala Vithanage 		.ctr_name = "tx_pci_signal_integrity",
15448e7869f0SWathsala Vithanage 	},
15458e7869f0SWathsala Vithanage 	{
15468e7869f0SWathsala Vithanage 		.dpdk_name = "outbound_pci_buffer_overflow",
15478e7869f0SWathsala Vithanage 		.ctr_name = "outbound_pci_buffer_overflow",
15488e7869f0SWathsala Vithanage 	},
15498e7869f0SWathsala Vithanage 	{
15508e7869f0SWathsala Vithanage 		.dpdk_name = "outbound_pci_stalled_rd",
15518e7869f0SWathsala Vithanage 		.ctr_name = "outbound_pci_stalled_rd",
15528e7869f0SWathsala Vithanage 	},
15538e7869f0SWathsala Vithanage 	{
15548e7869f0SWathsala Vithanage 		.dpdk_name = "outbound_pci_stalled_wr",
15558e7869f0SWathsala Vithanage 		.ctr_name = "outbound_pci_stalled_wr",
15568e7869f0SWathsala Vithanage 	},
15578e7869f0SWathsala Vithanage 	{
15588e7869f0SWathsala Vithanage 		.dpdk_name = "outbound_pci_stalled_rd_events",
15598e7869f0SWathsala Vithanage 		.ctr_name = "outbound_pci_stalled_rd_events",
15608e7869f0SWathsala Vithanage 	},
15618e7869f0SWathsala Vithanage 	{
15628e7869f0SWathsala Vithanage 		.dpdk_name = "outbound_pci_stalled_wr_events",
15638e7869f0SWathsala Vithanage 		.ctr_name = "outbound_pci_stalled_wr_events",
15648e7869f0SWathsala Vithanage 	},
15658e7869f0SWathsala Vithanage 	{
15668e7869f0SWathsala Vithanage 		.dpdk_name = "dev_out_of_buffer",
15678e7869f0SWathsala Vithanage 		.ctr_name = "dev_out_of_buffer",
15688e7869f0SWathsala Vithanage 	},
156998c4b12aSOphir Munk };
157098c4b12aSOphir Munk 
157198c4b12aSOphir Munk static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
157298c4b12aSOphir Munk 
1573a687c3e6SBing Zhao static int
1574a687c3e6SBing Zhao mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master,
1575a687c3e6SBing Zhao 			  struct ethtool_gstrings *strings,
1576a687c3e6SBing Zhao 			  uint32_t stats_n, uint32_t stats_n_2nd)
1577a687c3e6SBing Zhao {
1578a687c3e6SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
1579a687c3e6SBing Zhao 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
1580a687c3e6SBing Zhao 	struct ifreq ifr;
1581a687c3e6SBing Zhao 	int ret;
1582a687c3e6SBing Zhao 	uint32_t i, j, idx;
1583a687c3e6SBing Zhao 
1584a687c3e6SBing Zhao 	/* Ensure no out of bounds access before. */
1585a687c3e6SBing Zhao 	MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS);
1586a687c3e6SBing Zhao 	strings->cmd = ETHTOOL_GSTRINGS;
1587a687c3e6SBing Zhao 	strings->string_set = ETH_SS_STATS;
1588a687c3e6SBing Zhao 	strings->len = stats_n;
1589a687c3e6SBing Zhao 	ifr.ifr_data = (caddr_t)strings;
1590a687c3e6SBing Zhao 	if (bond_master)
1591a687c3e6SBing Zhao 		ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname,
1592a687c3e6SBing Zhao 					   SIOCETHTOOL, &ifr);
1593a687c3e6SBing Zhao 	else
1594a687c3e6SBing Zhao 		ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1595a687c3e6SBing Zhao 	if (ret) {
1596a687c3e6SBing Zhao 		DRV_LOG(WARNING, "port %u unable to get statistic names with %d",
1597a687c3e6SBing Zhao 			PORT_ID(priv), ret);
1598a687c3e6SBing Zhao 		return ret;
1599a687c3e6SBing Zhao 	}
1600a687c3e6SBing Zhao 	/* Reorganize the orders to reduce the iterations. */
1601a687c3e6SBing Zhao 	for (j = 0; j < xstats_n; j++) {
1602a687c3e6SBing Zhao 		xstats_ctrl->dev_table_idx[j] = UINT16_MAX;
1603a687c3e6SBing Zhao 		for (i = 0; i < stats_n; i++) {
1604a687c3e6SBing Zhao 			const char *curr_string =
1605a687c3e6SBing Zhao 				(const char *)&strings->data[i * ETH_GSTRING_LEN];
1606a687c3e6SBing Zhao 
1607a687c3e6SBing Zhao 			if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) {
1608a687c3e6SBing Zhao 				idx = xstats_ctrl->mlx5_stats_n++;
1609a687c3e6SBing Zhao 				xstats_ctrl->dev_table_idx[j] = i;
1610a687c3e6SBing Zhao 				xstats_ctrl->xstats_o_idx[j] = idx;
1611a687c3e6SBing Zhao 				xstats_ctrl->info[idx] = mlx5_counters_init[j];
1612a687c3e6SBing Zhao 			}
1613a687c3e6SBing Zhao 		}
1614a687c3e6SBing Zhao 	}
1615a687c3e6SBing Zhao 	if (!bond_master) {
1616a687c3e6SBing Zhao 		/* Add dev counters, unique per IB device. */
1617a687c3e6SBing Zhao 		for (j = 0; j != xstats_n; j++) {
1618a687c3e6SBing Zhao 			if (mlx5_counters_init[j].dev) {
1619a687c3e6SBing Zhao 				idx = xstats_ctrl->mlx5_stats_n++;
1620a687c3e6SBing Zhao 				xstats_ctrl->info[idx] = mlx5_counters_init[j];
1621a687c3e6SBing Zhao 				xstats_ctrl->hw_stats[idx] = 0;
1622a687c3e6SBing Zhao 			}
1623a687c3e6SBing Zhao 		}
1624a687c3e6SBing Zhao 		return 0;
1625a687c3e6SBing Zhao 	}
1626a687c3e6SBing Zhao 
1627a687c3e6SBing Zhao 	strings->len = stats_n_2nd;
1628a687c3e6SBing Zhao 	ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname,
1629a687c3e6SBing Zhao 				   SIOCETHTOOL, &ifr);
1630a687c3e6SBing Zhao 	if (ret) {
1631a687c3e6SBing Zhao 		DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d",
1632a687c3e6SBing Zhao 			PORT_ID(priv), ret);
1633a687c3e6SBing Zhao 		return ret;
1634a687c3e6SBing Zhao 	}
1635a687c3e6SBing Zhao 	/* The 2nd slave port may have a different strings set, based on the configuration. */
1636a687c3e6SBing Zhao 	for (j = 0; j != xstats_n; j++) {
1637a687c3e6SBing Zhao 		xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX;
1638a687c3e6SBing Zhao 		for (i = 0; i != stats_n_2nd; i++) {
1639a687c3e6SBing Zhao 			const char *curr_string =
1640a687c3e6SBing Zhao 				(const char *)&strings->data[i * ETH_GSTRING_LEN];
1641a687c3e6SBing Zhao 
1642a687c3e6SBing Zhao 			if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) {
1643a687c3e6SBing Zhao 				xstats_ctrl->dev_table_idx_2nd[j] = i;
1644a687c3e6SBing Zhao 				if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) {
1645a687c3e6SBing Zhao 					/* Already mapped in the 1st slave port. */
1646a687c3e6SBing Zhao 					idx = xstats_ctrl->xstats_o_idx[j];
1647a687c3e6SBing Zhao 					xstats_ctrl->xstats_o_idx_2nd[j] = idx;
1648a687c3e6SBing Zhao 				} else {
1649a687c3e6SBing Zhao 					/* Append the new items to the end of the map. */
1650a687c3e6SBing Zhao 					idx = xstats_ctrl->mlx5_stats_n++;
1651a687c3e6SBing Zhao 					xstats_ctrl->xstats_o_idx_2nd[j] = idx;
1652a687c3e6SBing Zhao 					xstats_ctrl->info[idx] = mlx5_counters_init[j];
1653a687c3e6SBing Zhao 				}
1654a687c3e6SBing Zhao 			}
1655a687c3e6SBing Zhao 		}
1656a687c3e6SBing Zhao 	}
1657a687c3e6SBing Zhao 	/* Dev counters are always at the last now. */
1658a687c3e6SBing Zhao 	for (j = 0; j != xstats_n; j++) {
1659a687c3e6SBing Zhao 		if (mlx5_counters_init[j].dev) {
1660a687c3e6SBing Zhao 			idx = xstats_ctrl->mlx5_stats_n++;
1661a687c3e6SBing Zhao 			xstats_ctrl->info[idx] = mlx5_counters_init[j];
1662a687c3e6SBing Zhao 			xstats_ctrl->hw_stats[idx] = 0;
1663a687c3e6SBing Zhao 		}
1664a687c3e6SBing Zhao 	}
1665a687c3e6SBing Zhao 	return 0;
1666a687c3e6SBing Zhao }
1667a687c3e6SBing Zhao 
166898c4b12aSOphir Munk /**
166998c4b12aSOphir Munk  * Init the structures to read device counters.
167098c4b12aSOphir Munk  *
167198c4b12aSOphir Munk  * @param dev
167298c4b12aSOphir Munk  *   Pointer to Ethernet device.
167398c4b12aSOphir Munk  */
167498c4b12aSOphir Munk void
167598c4b12aSOphir Munk mlx5_os_stats_init(struct rte_eth_dev *dev)
167698c4b12aSOphir Munk {
167798c4b12aSOphir Munk 	struct mlx5_priv *priv = dev->data->dev_private;
167898c4b12aSOphir Munk 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
167998c4b12aSOphir Munk 	struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl;
168098c4b12aSOphir Munk 	struct ethtool_gstrings *strings = NULL;
1681a687c3e6SBing Zhao 	uint16_t dev_stats_n = 0;
1682a687c3e6SBing Zhao 	uint16_t dev_stats_n_2nd = 0;
1683a687c3e6SBing Zhao 	unsigned int max_stats_n;
168498c4b12aSOphir Munk 	unsigned int str_sz;
168598c4b12aSOphir Munk 	int ret;
1686a687c3e6SBing Zhao 	bool bond_master = (priv->master && priv->pf_bond >= 0);
168798c4b12aSOphir Munk 
168898c4b12aSOphir Munk 	/* So that it won't aggregate for each init. */
168998c4b12aSOphir Munk 	xstats_ctrl->mlx5_stats_n = 0;
1690a687c3e6SBing Zhao 	ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd);
169198c4b12aSOphir Munk 	if (ret < 0) {
169298c4b12aSOphir Munk 		DRV_LOG(WARNING, "port %u no extended statistics available",
169398c4b12aSOphir Munk 			dev->data->port_id);
169498c4b12aSOphir Munk 		return;
169598c4b12aSOphir Munk 	}
1696a687c3e6SBing Zhao 	max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd);
169798c4b12aSOphir Munk 	/* Allocate memory to grab stat names and values. */
1698a687c3e6SBing Zhao 	str_sz = max_stats_n * ETH_GSTRING_LEN;
169998c4b12aSOphir Munk 	strings = (struct ethtool_gstrings *)
170098c4b12aSOphir Munk 		  mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0,
170198c4b12aSOphir Munk 			      SOCKET_ID_ANY);
170298c4b12aSOphir Munk 	if (!strings) {
170398c4b12aSOphir Munk 		DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
170498c4b12aSOphir Munk 			dev->data->port_id);
170598c4b12aSOphir Munk 		return;
170698c4b12aSOphir Munk 	}
1707a687c3e6SBing Zhao 	ret = mlx5_os_get_stats_strings(dev, bond_master, strings,
1708a687c3e6SBing Zhao 					dev_stats_n, dev_stats_n_2nd);
1709a687c3e6SBing Zhao 	if (ret < 0) {
1710a687c3e6SBing Zhao 		DRV_LOG(WARNING, "port %u failed to get the stats strings",
171198c4b12aSOphir Munk 			dev->data->port_id);
171298c4b12aSOphir Munk 		goto free;
171398c4b12aSOphir Munk 	}
171498c4b12aSOphir Munk 	xstats_ctrl->stats_n = dev_stats_n;
1715a687c3e6SBing Zhao 	xstats_ctrl->stats_n_2nd = dev_stats_n_2nd;
171698c4b12aSOphir Munk 	/* Copy to base at first time. */
1717a687c3e6SBing Zhao 	ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base);
171898c4b12aSOphir Munk 	if (ret)
171998c4b12aSOphir Munk 		DRV_LOG(ERR, "port %u cannot read device counters: %s",
172098c4b12aSOphir Munk 			dev->data->port_id, strerror(rte_errno));
172198c4b12aSOphir Munk 	mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
172298c4b12aSOphir Munk 	stats_ctrl->imissed = 0;
172398c4b12aSOphir Munk free:
172498c4b12aSOphir Munk 	mlx5_free(strings);
172598c4b12aSOphir Munk }
172698c4b12aSOphir Munk 
172798c4b12aSOphir Munk /**
172898c4b12aSOphir Munk  * Get MAC address by querying netdevice.
172998c4b12aSOphir Munk  *
173098c4b12aSOphir Munk  * @param[in] dev
173198c4b12aSOphir Munk  *   Pointer to Ethernet device.
173298c4b12aSOphir Munk  * @param[out] mac
173398c4b12aSOphir Munk  *   MAC address output buffer.
173498c4b12aSOphir Munk  *
173598c4b12aSOphir Munk  * @return
173698c4b12aSOphir Munk  *   0 on success, a negative errno value otherwise and rte_errno is set.
173798c4b12aSOphir Munk  */
173898c4b12aSOphir Munk int
173998c4b12aSOphir Munk mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[RTE_ETHER_ADDR_LEN])
174098c4b12aSOphir Munk {
174198c4b12aSOphir Munk 	struct ifreq request;
174298c4b12aSOphir Munk 	int ret;
174398c4b12aSOphir Munk 
174498c4b12aSOphir Munk 	ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);
174598c4b12aSOphir Munk 	if (ret)
174698c4b12aSOphir Munk 		return ret;
174798c4b12aSOphir Munk 	memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
174898c4b12aSOphir Munk 	return 0;
174998c4b12aSOphir Munk }
1750e8482187SBing Zhao 
1751e8482187SBing Zhao /*
1752e8482187SBing Zhao  * Query dropless_rq private flag value provided by ETHTOOL.
1753e8482187SBing Zhao  *
1754e8482187SBing Zhao  * @param dev
1755e8482187SBing Zhao  *   Pointer to Ethernet device.
1756e8482187SBing Zhao  *
1757e8482187SBing Zhao  * @return
1758e8482187SBing Zhao  *   - 0 on success, flag is not set.
1759e8482187SBing Zhao  *   - 1 on success, flag is set.
1760e8482187SBing Zhao  *   - negative errno value otherwise and rte_errno is set.
1761e8482187SBing Zhao  */
1762e8482187SBing Zhao int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev)
1763e8482187SBing Zhao {
1764ccc6ea5dSBing Zhao 	struct ethtool_sset_info *sset_info = NULL;
1765e8482187SBing Zhao 	struct ethtool_drvinfo drvinfo;
1766e8482187SBing Zhao 	struct ifreq ifr;
1767e8482187SBing Zhao 	struct ethtool_gstrings *strings = NULL;
1768e8482187SBing Zhao 	struct ethtool_value flags;
1769e8482187SBing Zhao 	const int32_t flag_len = sizeof(flags.data) * CHAR_BIT;
1770e8482187SBing Zhao 	int32_t str_sz;
1771e8482187SBing Zhao 	int32_t len;
1772e8482187SBing Zhao 	int32_t i;
1773e8482187SBing Zhao 	int ret;
1774e8482187SBing Zhao 
1775ccc6ea5dSBing Zhao 	sset_info = mlx5_malloc(0, sizeof(struct ethtool_sset_info) +
1776ccc6ea5dSBing Zhao 			sizeof(uint32_t), 0, SOCKET_ID_ANY);
1777ccc6ea5dSBing Zhao 	if (sset_info == NULL) {
1778ccc6ea5dSBing Zhao 		rte_errno = ENOMEM;
1779ccc6ea5dSBing Zhao 		return -rte_errno;
1780ccc6ea5dSBing Zhao 	}
1781ccc6ea5dSBing Zhao 	sset_info->cmd = ETHTOOL_GSSET_INFO;
1782ccc6ea5dSBing Zhao 	sset_info->reserved = 0;
1783ccc6ea5dSBing Zhao 	sset_info->sset_mask = 1ULL << ETH_SS_PRIV_FLAGS;
1784e8482187SBing Zhao 	ifr.ifr_data = (caddr_t)&sset_info;
1785e8482187SBing Zhao 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1786e8482187SBing Zhao 	if (!ret) {
1787ccc6ea5dSBing Zhao 		const uint32_t *sset_lengths = sset_info->data;
1788e8482187SBing Zhao 
1789ccc6ea5dSBing Zhao 		len = sset_info->sset_mask ? sset_lengths[0] : 0;
1790e8482187SBing Zhao 	} else if (ret == -EOPNOTSUPP) {
1791e8482187SBing Zhao 		drvinfo.cmd = ETHTOOL_GDRVINFO;
1792e8482187SBing Zhao 		ifr.ifr_data = (caddr_t)&drvinfo;
1793e8482187SBing Zhao 		ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1794e8482187SBing Zhao 		if (ret) {
1795e8482187SBing Zhao 			DRV_LOG(WARNING, "port %u cannot get the driver info",
1796e8482187SBing Zhao 				dev->data->port_id);
1797e8482187SBing Zhao 			goto exit;
1798e8482187SBing Zhao 		}
1799e8482187SBing Zhao 		len = *(uint32_t *)((char *)&drvinfo +
1800e8482187SBing Zhao 			offsetof(struct ethtool_drvinfo, n_priv_flags));
1801e8482187SBing Zhao 	} else {
1802e8482187SBing Zhao 		DRV_LOG(WARNING, "port %u cannot get the sset info",
1803e8482187SBing Zhao 			dev->data->port_id);
1804e8482187SBing Zhao 		goto exit;
1805e8482187SBing Zhao 	}
1806e8482187SBing Zhao 	if (!len) {
1807e8482187SBing Zhao 		DRV_LOG(WARNING, "port %u does not have private flag",
1808e8482187SBing Zhao 			dev->data->port_id);
1809e8482187SBing Zhao 		rte_errno = EOPNOTSUPP;
1810e8482187SBing Zhao 		ret = -rte_errno;
1811e8482187SBing Zhao 		goto exit;
1812e8482187SBing Zhao 	} else if (len > flag_len) {
1813e8482187SBing Zhao 		DRV_LOG(WARNING, "port %u maximal private flags number is %d",
1814e8482187SBing Zhao 			dev->data->port_id, flag_len);
1815e8482187SBing Zhao 		len = flag_len;
1816e8482187SBing Zhao 	}
1817e8482187SBing Zhao 	str_sz = ETH_GSTRING_LEN * len;
1818e8482187SBing Zhao 	strings = (struct ethtool_gstrings *)
1819e8482187SBing Zhao 		  mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0,
1820e8482187SBing Zhao 			      SOCKET_ID_ANY);
1821e8482187SBing Zhao 	if (!strings) {
1822e8482187SBing Zhao 		DRV_LOG(WARNING, "port %u unable to allocate memory for"
1823e8482187SBing Zhao 			" private flags", dev->data->port_id);
1824e8482187SBing Zhao 		rte_errno = ENOMEM;
1825e8482187SBing Zhao 		ret = -rte_errno;
1826e8482187SBing Zhao 		goto exit;
1827e8482187SBing Zhao 	}
1828e8482187SBing Zhao 	strings->cmd = ETHTOOL_GSTRINGS;
1829e8482187SBing Zhao 	strings->string_set = ETH_SS_PRIV_FLAGS;
1830e8482187SBing Zhao 	strings->len = len;
1831e8482187SBing Zhao 	ifr.ifr_data = (caddr_t)strings;
1832e8482187SBing Zhao 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1833e8482187SBing Zhao 	if (ret) {
1834e8482187SBing Zhao 		DRV_LOG(WARNING, "port %u unable to get private flags strings",
1835e8482187SBing Zhao 			dev->data->port_id);
1836e8482187SBing Zhao 		goto exit;
1837e8482187SBing Zhao 	}
1838e8482187SBing Zhao 	for (i = 0; i < len; i++) {
1839e8482187SBing Zhao 		strings->data[(i + 1) * ETH_GSTRING_LEN - 1] = 0;
1840e8482187SBing Zhao 		if (!strcmp((const char *)strings->data + i * ETH_GSTRING_LEN,
1841e8482187SBing Zhao 			     "dropless_rq"))
1842e8482187SBing Zhao 			break;
1843e8482187SBing Zhao 	}
1844e8482187SBing Zhao 	if (i == len) {
1845e8482187SBing Zhao 		DRV_LOG(WARNING, "port %u does not support dropless_rq",
1846e8482187SBing Zhao 			dev->data->port_id);
1847e8482187SBing Zhao 		rte_errno = EOPNOTSUPP;
1848e8482187SBing Zhao 		ret = -rte_errno;
1849e8482187SBing Zhao 		goto exit;
1850e8482187SBing Zhao 	}
1851e8482187SBing Zhao 	flags.cmd = ETHTOOL_GPFLAGS;
1852e8482187SBing Zhao 	ifr.ifr_data = (caddr_t)&flags;
1853e8482187SBing Zhao 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
1854e8482187SBing Zhao 	if (ret) {
1855e8482187SBing Zhao 		DRV_LOG(WARNING, "port %u unable to get private flags status",
1856e8482187SBing Zhao 			dev->data->port_id);
1857e8482187SBing Zhao 		goto exit;
1858e8482187SBing Zhao 	}
1859e8482187SBing Zhao 	ret = !!(flags.data & (1U << i));
1860e8482187SBing Zhao exit:
1861e8482187SBing Zhao 	mlx5_free(strings);
1862ccc6ea5dSBing Zhao 	mlx5_free(sset_info);
1863e8482187SBing Zhao 	return ret;
1864e8482187SBing Zhao }
18659b31fc90SViacheslav Ovsiienko 
18669b31fc90SViacheslav Ovsiienko /**
18679b31fc90SViacheslav Ovsiienko  * Unmaps HCA PCI BAR from the current process address space.
18689b31fc90SViacheslav Ovsiienko  *
18699b31fc90SViacheslav Ovsiienko  * @param dev
18709b31fc90SViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
18719b31fc90SViacheslav Ovsiienko  */
18729b31fc90SViacheslav Ovsiienko void mlx5_txpp_unmap_hca_bar(struct rte_eth_dev *dev)
18739b31fc90SViacheslav Ovsiienko {
18749b31fc90SViacheslav Ovsiienko 	struct mlx5_proc_priv *ppriv = dev->process_private;
18759b31fc90SViacheslav Ovsiienko 
18769b31fc90SViacheslav Ovsiienko 	if (ppriv && ppriv->hca_bar) {
18779b31fc90SViacheslav Ovsiienko 		rte_mem_unmap(ppriv->hca_bar, MLX5_ST_SZ_BYTES(initial_seg));
18789b31fc90SViacheslav Ovsiienko 		ppriv->hca_bar = NULL;
18799b31fc90SViacheslav Ovsiienko 	}
18809b31fc90SViacheslav Ovsiienko }
18819b31fc90SViacheslav Ovsiienko 
18829b31fc90SViacheslav Ovsiienko /**
18839b31fc90SViacheslav Ovsiienko  * Maps HCA PCI BAR to the current process address space.
18849b31fc90SViacheslav Ovsiienko  * Stores pointer in the process private structure allowing
18859b31fc90SViacheslav Ovsiienko  * to read internal and real time counter directly from the HW.
18869b31fc90SViacheslav Ovsiienko  *
18879b31fc90SViacheslav Ovsiienko  * @param dev
18889b31fc90SViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
18899b31fc90SViacheslav Ovsiienko  *
18909b31fc90SViacheslav Ovsiienko  * @return
18919b31fc90SViacheslav Ovsiienko  *   0 on success and not NULL pointer to mapped area in process structure.
18929b31fc90SViacheslav Ovsiienko  *   negative otherwise and NULL pointer
18939b31fc90SViacheslav Ovsiienko  */
18949b31fc90SViacheslav Ovsiienko int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
18959b31fc90SViacheslav Ovsiienko {
18969b31fc90SViacheslav Ovsiienko 	struct mlx5_proc_priv *ppriv = dev->process_private;
18979b31fc90SViacheslav Ovsiienko 	char pci_addr[PCI_PRI_STR_SIZE] = { 0 };
18989b31fc90SViacheslav Ovsiienko 	void *base, *expected = NULL;
18999b31fc90SViacheslav Ovsiienko 	int fd, ret;
19009b31fc90SViacheslav Ovsiienko 
19019b31fc90SViacheslav Ovsiienko 	if (!ppriv) {
19029b31fc90SViacheslav Ovsiienko 		rte_errno = ENOMEM;
19039b31fc90SViacheslav Ovsiienko 		return -rte_errno;
19049b31fc90SViacheslav Ovsiienko 	}
19059b31fc90SViacheslav Ovsiienko 	if (ppriv->hca_bar)
19069b31fc90SViacheslav Ovsiienko 		return 0;
19079b31fc90SViacheslav Ovsiienko 	ret = mlx5_dev_to_pci_str(dev->device, pci_addr, sizeof(pci_addr));
19089b31fc90SViacheslav Ovsiienko 	if (ret < 0)
19099b31fc90SViacheslav Ovsiienko 		return -rte_errno;
19109b31fc90SViacheslav Ovsiienko 	/* Open PCI device resource 0 - HCA initialize segment */
19119b31fc90SViacheslav Ovsiienko 	MKSTR(name, "/sys/bus/pci/devices/%s/resource0", pci_addr);
19129b31fc90SViacheslav Ovsiienko 	fd = open(name, O_RDWR | O_SYNC);
19139b31fc90SViacheslav Ovsiienko 	if (fd == -1) {
19149b31fc90SViacheslav Ovsiienko 		rte_errno = ENOTSUP;
19159b31fc90SViacheslav Ovsiienko 		return -ENOTSUP;
19169b31fc90SViacheslav Ovsiienko 	}
19179b31fc90SViacheslav Ovsiienko 	base = rte_mem_map(NULL, MLX5_ST_SZ_BYTES(initial_seg),
19189b31fc90SViacheslav Ovsiienko 			   RTE_PROT_READ, RTE_MAP_SHARED, fd, 0);
19199b31fc90SViacheslav Ovsiienko 	close(fd);
19209b31fc90SViacheslav Ovsiienko 	if (!base) {
19219b31fc90SViacheslav Ovsiienko 		rte_errno = ENOTSUP;
19229b31fc90SViacheslav Ovsiienko 		return -ENOTSUP;
19239b31fc90SViacheslav Ovsiienko 	}
19249b31fc90SViacheslav Ovsiienko 	/* Check there is no concurrent mapping in other thread. */
1925e12a0166STyler Retzlaff 	if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, &expected,
1926e12a0166STyler Retzlaff 					 base,
1927e12a0166STyler Retzlaff 					 rte_memory_order_relaxed, rte_memory_order_relaxed))
19289b31fc90SViacheslav Ovsiienko 		rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
19299b31fc90SViacheslav Ovsiienko 	return 0;
19309b31fc90SViacheslav Ovsiienko }
19319b31fc90SViacheslav Ovsiienko 
1932