xref: /dpdk/drivers/net/mlx5/mlx5_ethdev.c (revision 1aec68d134c4c36f14a5703b824c5c015da86296)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <assert.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <string.h>
13 #include <stdlib.h>
14 #include <errno.h>
15 #include <dirent.h>
16 #include <net/if.h>
17 #include <sys/ioctl.h>
18 #include <sys/socket.h>
19 #include <netinet/in.h>
20 #include <linux/ethtool.h>
21 #include <linux/sockios.h>
22 #include <fcntl.h>
23 #include <stdalign.h>
24 #include <sys/un.h>
25 #include <time.h>
26 
27 #include <rte_atomic.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_bus_pci.h>
30 #include <rte_mbuf.h>
31 #include <rte_common.h>
32 #include <rte_interrupts.h>
33 #include <rte_malloc.h>
34 #include <rte_string_fns.h>
35 #include <rte_rwlock.h>
36 
37 #include "mlx5.h"
38 #include "mlx5_glue.h"
39 #include "mlx5_rxtx.h"
40 #include "mlx5_utils.h"
41 
42 /* Supported speed values found in /usr/include/linux/ethtool.h */
43 #ifndef HAVE_SUPPORTED_40000baseKR4_Full
44 #define SUPPORTED_40000baseKR4_Full (1 << 23)
45 #endif
46 #ifndef HAVE_SUPPORTED_40000baseCR4_Full
47 #define SUPPORTED_40000baseCR4_Full (1 << 24)
48 #endif
49 #ifndef HAVE_SUPPORTED_40000baseSR4_Full
50 #define SUPPORTED_40000baseSR4_Full (1 << 25)
51 #endif
52 #ifndef HAVE_SUPPORTED_40000baseLR4_Full
53 #define SUPPORTED_40000baseLR4_Full (1 << 26)
54 #endif
55 #ifndef HAVE_SUPPORTED_56000baseKR4_Full
56 #define SUPPORTED_56000baseKR4_Full (1 << 27)
57 #endif
58 #ifndef HAVE_SUPPORTED_56000baseCR4_Full
59 #define SUPPORTED_56000baseCR4_Full (1 << 28)
60 #endif
61 #ifndef HAVE_SUPPORTED_56000baseSR4_Full
62 #define SUPPORTED_56000baseSR4_Full (1 << 29)
63 #endif
64 #ifndef HAVE_SUPPORTED_56000baseLR4_Full
65 #define SUPPORTED_56000baseLR4_Full (1 << 30)
66 #endif
67 
68 /* Add defines in case the running kernel is not the same as user headers. */
69 #ifndef ETHTOOL_GLINKSETTINGS
70 struct ethtool_link_settings {
71 	uint32_t cmd;
72 	uint32_t speed;
73 	uint8_t duplex;
74 	uint8_t port;
75 	uint8_t phy_address;
76 	uint8_t autoneg;
77 	uint8_t mdio_support;
78 	uint8_t eth_to_mdix;
79 	uint8_t eth_tp_mdix_ctrl;
80 	int8_t link_mode_masks_nwords;
81 	uint32_t reserved[8];
82 	uint32_t link_mode_masks[];
83 };
84 
85 #define ETHTOOL_GLINKSETTINGS 0x0000004c
86 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
87 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6
88 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
89 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
90 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
91 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
92 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
93 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
94 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
95 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
96 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
97 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
98 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
99 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
100 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
101 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
102 #endif
103 #ifndef HAVE_ETHTOOL_LINK_MODE_25G
104 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
105 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
106 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
107 #endif
108 #ifndef HAVE_ETHTOOL_LINK_MODE_50G
109 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
110 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
111 #endif
112 #ifndef HAVE_ETHTOOL_LINK_MODE_100G
113 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
114 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
115 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
116 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
117 #endif
118 
119 /**
120  * Get master interface name from private structure.
121  *
122  * @param[in] dev
123  *   Pointer to Ethernet device.
124  * @param[out] ifname
125  *   Interface name output buffer.
126  *
127  * @return
128  *   0 on success, a negative errno value otherwise and rte_errno is set.
129  */
130 static int
131 mlx5_get_master_ifname(const struct rte_eth_dev *dev,
132 		       char (*ifname)[IF_NAMESIZE])
133 {
134 	struct mlx5_priv *priv = dev->data->dev_private;
135 	DIR *dir;
136 	struct dirent *dent;
137 	unsigned int dev_type = 0;
138 	unsigned int dev_port_prev = ~0u;
139 	char match[IF_NAMESIZE] = "";
140 
141 	assert(priv);
142 	assert(priv->sh);
143 	{
144 		MKSTR(path, "%s/device/net", priv->sh->ibdev_path);
145 
146 		dir = opendir(path);
147 		if (dir == NULL) {
148 			rte_errno = errno;
149 			return -rte_errno;
150 		}
151 	}
152 	while ((dent = readdir(dir)) != NULL) {
153 		char *name = dent->d_name;
154 		FILE *file;
155 		unsigned int dev_port;
156 		int r;
157 
158 		if ((name[0] == '.') &&
159 		    ((name[1] == '\0') ||
160 		     ((name[1] == '.') && (name[2] == '\0'))))
161 			continue;
162 
163 		MKSTR(path, "%s/device/net/%s/%s",
164 		      priv->sh->ibdev_path, name,
165 		      (dev_type ? "dev_id" : "dev_port"));
166 
167 		file = fopen(path, "rb");
168 		if (file == NULL) {
169 			if (errno != ENOENT)
170 				continue;
171 			/*
172 			 * Switch to dev_id when dev_port does not exist as
173 			 * is the case with Linux kernel versions < 3.15.
174 			 */
175 try_dev_id:
176 			match[0] = '\0';
177 			if (dev_type)
178 				break;
179 			dev_type = 1;
180 			dev_port_prev = ~0u;
181 			rewinddir(dir);
182 			continue;
183 		}
184 		r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
185 		fclose(file);
186 		if (r != 1)
187 			continue;
188 		/*
189 		 * Switch to dev_id when dev_port returns the same value for
190 		 * all ports. May happen when using a MOFED release older than
191 		 * 3.0 with a Linux kernel >= 3.15.
192 		 */
193 		if (dev_port == dev_port_prev)
194 			goto try_dev_id;
195 		dev_port_prev = dev_port;
196 		if (dev_port == 0)
197 			strlcpy(match, name, sizeof(match));
198 	}
199 	closedir(dir);
200 	if (match[0] == '\0') {
201 		rte_errno = ENOENT;
202 		return -rte_errno;
203 	}
204 	strncpy(*ifname, match, sizeof(*ifname));
205 	return 0;
206 }
207 
208 /**
209  * Get interface name from private structure.
210  *
211  * This is a port representor-aware version of mlx5_get_master_ifname().
212  *
213  * @param[in] dev
214  *   Pointer to Ethernet device.
215  * @param[out] ifname
216  *   Interface name output buffer.
217  *
218  * @return
219  *   0 on success, a negative errno value otherwise and rte_errno is set.
220  */
221 int
222 mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
223 {
224 	struct mlx5_priv *priv = dev->data->dev_private;
225 	unsigned int ifindex =
226 		priv->nl_socket_rdma >= 0 ?
227 		mlx5_nl_ifindex(priv->nl_socket_rdma,
228 				priv->sh->ibdev_name,
229 				priv->ibv_port) : 0;
230 
231 	if (!ifindex) {
232 		if (!priv->representor)
233 			return mlx5_get_master_ifname(dev, ifname);
234 		rte_errno = ENXIO;
235 		return -rte_errno;
236 	}
237 	if (if_indextoname(ifindex, &(*ifname)[0]))
238 		return 0;
239 	rte_errno = errno;
240 	return -rte_errno;
241 }
242 
243 /**
244  * Get the interface index from device name.
245  *
246  * @param[in] dev
247  *   Pointer to Ethernet device.
248  *
249  * @return
250  *   Nonzero interface index on success, zero otherwise and rte_errno is set.
251  */
252 unsigned int
253 mlx5_ifindex(const struct rte_eth_dev *dev)
254 {
255 	char ifname[IF_NAMESIZE];
256 	unsigned int ifindex;
257 
258 	if (mlx5_get_ifname(dev, &ifname))
259 		return 0;
260 	ifindex = if_nametoindex(ifname);
261 	if (!ifindex)
262 		rte_errno = errno;
263 	return ifindex;
264 }
265 
266 /**
267  * Perform ifreq ioctl() on associated Ethernet device.
268  *
269  * @param[in] dev
270  *   Pointer to Ethernet device.
271  * @param req
272  *   Request number to pass to ioctl().
273  * @param[out] ifr
274  *   Interface request structure output buffer.
275  *
276  * @return
277  *   0 on success, a negative errno value otherwise and rte_errno is set.
278  */
279 int
280 mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
281 {
282 	int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
283 	int ret = 0;
284 
285 	if (sock == -1) {
286 		rte_errno = errno;
287 		return -rte_errno;
288 	}
289 	ret = mlx5_get_ifname(dev, &ifr->ifr_name);
290 	if (ret)
291 		goto error;
292 	ret = ioctl(sock, req, ifr);
293 	if (ret == -1) {
294 		rte_errno = errno;
295 		goto error;
296 	}
297 	close(sock);
298 	return 0;
299 error:
300 	close(sock);
301 	return -rte_errno;
302 }
303 
304 /**
305  * Get device MTU.
306  *
307  * @param dev
308  *   Pointer to Ethernet device.
309  * @param[out] mtu
310  *   MTU value output buffer.
311  *
312  * @return
313  *   0 on success, a negative errno value otherwise and rte_errno is set.
314  */
315 int
316 mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
317 {
318 	struct ifreq request;
319 	int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
320 
321 	if (ret)
322 		return ret;
323 	*mtu = request.ifr_mtu;
324 	return 0;
325 }
326 
327 /**
328  * Set device MTU.
329  *
330  * @param dev
331  *   Pointer to Ethernet device.
332  * @param mtu
333  *   MTU value to set.
334  *
335  * @return
336  *   0 on success, a negative errno value otherwise and rte_errno is set.
337  */
338 static int
339 mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
340 {
341 	struct ifreq request = { .ifr_mtu = mtu, };
342 
343 	return mlx5_ifreq(dev, SIOCSIFMTU, &request);
344 }
345 
346 /**
347  * Set device flags.
348  *
349  * @param dev
350  *   Pointer to Ethernet device.
351  * @param keep
352  *   Bitmask for flags that must remain untouched.
353  * @param flags
354  *   Bitmask for flags to modify.
355  *
356  * @return
357  *   0 on success, a negative errno value otherwise and rte_errno is set.
358  */
359 int
360 mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
361 {
362 	struct ifreq request;
363 	int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
364 
365 	if (ret)
366 		return ret;
367 	request.ifr_flags &= keep;
368 	request.ifr_flags |= flags & ~keep;
369 	return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
370 }
371 
372 /**
373  * DPDK callback for Ethernet device configuration.
374  *
375  * @param dev
376  *   Pointer to Ethernet device structure.
377  *
378  * @return
379  *   0 on success, a negative errno value otherwise and rte_errno is set.
380  */
381 int
382 mlx5_dev_configure(struct rte_eth_dev *dev)
383 {
384 	struct mlx5_priv *priv = dev->data->dev_private;
385 	unsigned int rxqs_n = dev->data->nb_rx_queues;
386 	unsigned int txqs_n = dev->data->nb_tx_queues;
387 	unsigned int i;
388 	unsigned int j;
389 	unsigned int reta_idx_n;
390 	const uint8_t use_app_rss_key =
391 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
392 	int ret = 0;
393 
394 	if (use_app_rss_key &&
395 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
396 	     MLX5_RSS_HASH_KEY_LEN)) {
397 		DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
398 			dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
399 		rte_errno = EINVAL;
400 		return -rte_errno;
401 	}
402 	priv->rss_conf.rss_key =
403 		rte_realloc(priv->rss_conf.rss_key,
404 			    MLX5_RSS_HASH_KEY_LEN, 0);
405 	if (!priv->rss_conf.rss_key) {
406 		DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
407 			dev->data->port_id, rxqs_n);
408 		rte_errno = ENOMEM;
409 		return -rte_errno;
410 	}
411 	memcpy(priv->rss_conf.rss_key,
412 	       use_app_rss_key ?
413 	       dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
414 	       rss_hash_default_key,
415 	       MLX5_RSS_HASH_KEY_LEN);
416 	priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
417 	priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
418 	priv->rxqs = (void *)dev->data->rx_queues;
419 	priv->txqs = (void *)dev->data->tx_queues;
420 	if (txqs_n != priv->txqs_n) {
421 		DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
422 			dev->data->port_id, priv->txqs_n, txqs_n);
423 		priv->txqs_n = txqs_n;
424 	}
425 	if (rxqs_n > priv->config.ind_table_max_size) {
426 		DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
427 			dev->data->port_id, rxqs_n);
428 		rte_errno = EINVAL;
429 		return -rte_errno;
430 	}
431 	if (rxqs_n == priv->rxqs_n)
432 		return 0;
433 	DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
434 		dev->data->port_id, priv->rxqs_n, rxqs_n);
435 	priv->rxqs_n = rxqs_n;
436 	/* If the requested number of RX queues is not a power of two, use the
437 	 * maximum indirection table size for better balancing.
438 	 * The result is always rounded to the next power of two. */
439 	reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
440 				     priv->config.ind_table_max_size :
441 				     rxqs_n));
442 	ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
443 	if (ret)
444 		return ret;
445 	/* When the number of RX queues is not a power of two, the remaining
446 	 * table entries are padded with reused WQs and hashes are not spread
447 	 * uniformly. */
448 	for (i = 0, j = 0; (i != reta_idx_n); ++i) {
449 		(*priv->reta_idx)[i] = j;
450 		if (++j == rxqs_n)
451 			j = 0;
452 	}
453 	return 0;
454 }
455 
456 /**
457  * Sets default tuning parameters.
458  *
459  * @param dev
460  *   Pointer to Ethernet device.
461  * @param[out] info
462  *   Info structure output buffer.
463  */
464 static void
465 mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
466 {
467 	struct mlx5_priv *priv = dev->data->dev_private;
468 
469 	/* Minimum CPU utilization. */
470 	info->default_rxportconf.ring_size = 256;
471 	info->default_txportconf.ring_size = 256;
472 	info->default_rxportconf.burst_size = 64;
473 	info->default_txportconf.burst_size = 64;
474 	if (priv->link_speed_capa & ETH_LINK_SPEED_100G) {
475 		info->default_rxportconf.nb_queues = 16;
476 		info->default_txportconf.nb_queues = 16;
477 		if (dev->data->nb_rx_queues > 2 ||
478 		    dev->data->nb_tx_queues > 2) {
479 			/* Max Throughput. */
480 			info->default_rxportconf.ring_size = 2048;
481 			info->default_txportconf.ring_size = 2048;
482 		}
483 	} else {
484 		info->default_rxportconf.nb_queues = 8;
485 		info->default_txportconf.nb_queues = 8;
486 		if (dev->data->nb_rx_queues > 2 ||
487 		    dev->data->nb_tx_queues > 2) {
488 			/* Max Throughput. */
489 			info->default_rxportconf.ring_size = 4096;
490 			info->default_txportconf.ring_size = 4096;
491 		}
492 	}
493 }
494 
495 /**
496  * DPDK callback to get information about the device.
497  *
498  * @param dev
499  *   Pointer to Ethernet device structure.
500  * @param[out] info
501  *   Info structure output buffer.
502  */
503 void
504 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
505 {
506 	struct mlx5_priv *priv = dev->data->dev_private;
507 	struct mlx5_dev_config *config = &priv->config;
508 	unsigned int max;
509 	char ifname[IF_NAMESIZE];
510 
511 	/* FIXME: we should ask the device for these values. */
512 	info->min_rx_bufsize = 32;
513 	info->max_rx_pktlen = 65536;
514 	/*
515 	 * Since we need one CQ per QP, the limit is the minimum number
516 	 * between the two values.
517 	 */
518 	max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq,
519 		      priv->sh->device_attr.orig_attr.max_qp);
520 	/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
521 	if (max >= 65535)
522 		max = 65535;
523 	info->max_rx_queues = max;
524 	info->max_tx_queues = max;
525 	info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
526 	info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
527 	info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
528 				 info->rx_queue_offload_capa);
529 	info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
530 	if (mlx5_get_ifname(dev, &ifname) == 0)
531 		info->if_index = if_nametoindex(ifname);
532 	info->reta_size = priv->reta_idx_n ?
533 		priv->reta_idx_n : config->ind_table_max_size;
534 	info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
535 	info->speed_capa = priv->link_speed_capa;
536 	info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
537 	mlx5_set_default_params(dev, info);
538 	info->switch_info.name = dev->data->name;
539 	info->switch_info.domain_id = priv->domain_id;
540 	info->switch_info.port_id = priv->representor_id;
541 	if (priv->representor) {
542 		unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
543 		uint16_t port_id[i];
544 
545 		i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
546 		while (i--) {
547 			struct mlx5_priv *opriv =
548 				rte_eth_devices[port_id[i]].data->dev_private;
549 
550 			if (!opriv ||
551 			    opriv->representor ||
552 			    opriv->domain_id != priv->domain_id)
553 				continue;
554 			/*
555 			 * Override switch name with that of the master
556 			 * device.
557 			 */
558 			info->switch_info.name = opriv->dev_data->name;
559 			break;
560 		}
561 	}
562 }
563 
564 /**
565  * Get firmware version of a device.
566  *
567  * @param dev
568  *   Ethernet device port.
569  * @param fw_ver
570  *   String output allocated by caller.
571  * @param fw_size
572  *   Size of the output string, including terminating null byte.
573  *
574  * @return
575  *   0 on success, or the size of the non truncated string if too big.
576  */
577 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
578 {
579 	struct mlx5_priv *priv = dev->data->dev_private;
580 	struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr;
581 	size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
582 
583 	if (fw_size < size)
584 		return size;
585 	if (fw_ver != NULL)
586 		strlcpy(fw_ver, attr->fw_ver, fw_size);
587 	return 0;
588 }
589 
590 /**
591  * Get supported packet types.
592  *
593  * @param dev
594  *   Pointer to Ethernet device structure.
595  *
596  * @return
597  *   A pointer to the supported Packet types array.
598  */
599 const uint32_t *
600 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
601 {
602 	static const uint32_t ptypes[] = {
603 		/* refers to rxq_cq_to_pkt_type() */
604 		RTE_PTYPE_L2_ETHER,
605 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
606 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
607 		RTE_PTYPE_L4_NONFRAG,
608 		RTE_PTYPE_L4_FRAG,
609 		RTE_PTYPE_L4_TCP,
610 		RTE_PTYPE_L4_UDP,
611 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
612 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
613 		RTE_PTYPE_INNER_L4_NONFRAG,
614 		RTE_PTYPE_INNER_L4_FRAG,
615 		RTE_PTYPE_INNER_L4_TCP,
616 		RTE_PTYPE_INNER_L4_UDP,
617 		RTE_PTYPE_UNKNOWN
618 	};
619 
620 	if (dev->rx_pkt_burst == mlx5_rx_burst ||
621 	    dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
622 	    dev->rx_pkt_burst == mlx5_rx_burst_vec)
623 		return ptypes;
624 	return NULL;
625 }
626 
627 /**
628  * DPDK callback to retrieve physical link information.
629  *
630  * @param dev
631  *   Pointer to Ethernet device structure.
632  * @param[out] link
633  *   Storage for current link status.
634  *
635  * @return
636  *   0 on success, a negative errno value otherwise and rte_errno is set.
637  */
638 static int
639 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
640 			       struct rte_eth_link *link)
641 {
642 	struct mlx5_priv *priv = dev->data->dev_private;
643 	struct ethtool_cmd edata = {
644 		.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
645 	};
646 	struct ifreq ifr;
647 	struct rte_eth_link dev_link;
648 	int link_speed = 0;
649 	int ret;
650 
651 	ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
652 	if (ret) {
653 		DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
654 			dev->data->port_id, strerror(rte_errno));
655 		return ret;
656 	}
657 	dev_link = (struct rte_eth_link) {
658 		.link_status = ((ifr.ifr_flags & IFF_UP) &&
659 				(ifr.ifr_flags & IFF_RUNNING)),
660 	};
661 	ifr = (struct ifreq) {
662 		.ifr_data = (void *)&edata,
663 	};
664 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
665 	if (ret) {
666 		DRV_LOG(WARNING,
667 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
668 			dev->data->port_id, strerror(rte_errno));
669 		return ret;
670 	}
671 	link_speed = ethtool_cmd_speed(&edata);
672 	if (link_speed == -1)
673 		dev_link.link_speed = ETH_SPEED_NUM_NONE;
674 	else
675 		dev_link.link_speed = link_speed;
676 	priv->link_speed_capa = 0;
677 	if (edata.supported & SUPPORTED_Autoneg)
678 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
679 	if (edata.supported & (SUPPORTED_1000baseT_Full |
680 			       SUPPORTED_1000baseKX_Full))
681 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
682 	if (edata.supported & SUPPORTED_10000baseKR_Full)
683 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
684 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
685 			       SUPPORTED_40000baseCR4_Full |
686 			       SUPPORTED_40000baseSR4_Full |
687 			       SUPPORTED_40000baseLR4_Full))
688 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
689 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
690 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
691 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
692 			ETH_LINK_SPEED_FIXED);
693 	if (((dev_link.link_speed && !dev_link.link_status) ||
694 	     (!dev_link.link_speed && dev_link.link_status))) {
695 		rte_errno = EAGAIN;
696 		return -rte_errno;
697 	}
698 	*link = dev_link;
699 	return 0;
700 }
701 
702 /**
703  * Retrieve physical link information (unlocked version using new ioctl).
704  *
705  * @param dev
706  *   Pointer to Ethernet device structure.
707  * @param[out] link
708  *   Storage for current link status.
709  *
710  * @return
711  *   0 on success, a negative errno value otherwise and rte_errno is set.
712  */
713 static int
714 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
715 			     struct rte_eth_link *link)
716 
717 {
718 	struct mlx5_priv *priv = dev->data->dev_private;
719 	struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
720 	struct ifreq ifr;
721 	struct rte_eth_link dev_link;
722 	uint64_t sc;
723 	int ret;
724 
725 	ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
726 	if (ret) {
727 		DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
728 			dev->data->port_id, strerror(rte_errno));
729 		return ret;
730 	}
731 	dev_link = (struct rte_eth_link) {
732 		.link_status = ((ifr.ifr_flags & IFF_UP) &&
733 				(ifr.ifr_flags & IFF_RUNNING)),
734 	};
735 	ifr = (struct ifreq) {
736 		.ifr_data = (void *)&gcmd,
737 	};
738 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
739 	if (ret) {
740 		DRV_LOG(DEBUG,
741 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
742 			" failed: %s",
743 			dev->data->port_id, strerror(rte_errno));
744 		return ret;
745 	}
746 	gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
747 
748 	alignas(struct ethtool_link_settings)
749 	uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
750 		     sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
751 	struct ethtool_link_settings *ecmd = (void *)data;
752 
753 	*ecmd = gcmd;
754 	ifr.ifr_data = (void *)ecmd;
755 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
756 	if (ret) {
757 		DRV_LOG(DEBUG,
758 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
759 			" failed: %s",
760 			dev->data->port_id, strerror(rte_errno));
761 		return ret;
762 	}
763 	dev_link.link_speed = ecmd->speed;
764 	sc = ecmd->link_mode_masks[0] |
765 		((uint64_t)ecmd->link_mode_masks[1] << 32);
766 	priv->link_speed_capa = 0;
767 	if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
768 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
769 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
770 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
771 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
772 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
773 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
774 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
775 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
776 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
777 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
778 		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
779 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
780 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
781 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
782 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
783 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
784 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
785 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
786 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
787 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
788 		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
789 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
790 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
791 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
792 		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
793 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
794 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
795 		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
796 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
797 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
798 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
799 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
800 		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
801 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
802 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
803 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
804 				  ETH_LINK_SPEED_FIXED);
805 	if (((dev_link.link_speed && !dev_link.link_status) ||
806 	     (!dev_link.link_speed && dev_link.link_status))) {
807 		rte_errno = EAGAIN;
808 		return -rte_errno;
809 	}
810 	*link = dev_link;
811 	return 0;
812 }
813 
814 /**
815  * DPDK callback to retrieve physical link information.
816  *
817  * @param dev
818  *   Pointer to Ethernet device structure.
819  * @param wait_to_complete
820  *   Wait for request completion.
821  *
822  * @return
823  *   0 if link status was not updated, positive if it was, a negative errno
824  *   value otherwise and rte_errno is set.
825  */
826 int
827 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
828 {
829 	int ret;
830 	struct rte_eth_link dev_link;
831 	time_t start_time = time(NULL);
832 
833 	do {
834 		ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
835 		if (ret)
836 			ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
837 		if (ret == 0)
838 			break;
839 		/* Handle wait to complete situation. */
840 		if (wait_to_complete && ret == -EAGAIN) {
841 			if (abs((int)difftime(time(NULL), start_time)) <
842 			    MLX5_LINK_STATUS_TIMEOUT) {
843 				usleep(0);
844 				continue;
845 			} else {
846 				rte_errno = EBUSY;
847 				return -rte_errno;
848 			}
849 		} else if (ret < 0) {
850 			return ret;
851 		}
852 	} while (wait_to_complete);
853 	ret = !!memcmp(&dev->data->dev_link, &dev_link,
854 		       sizeof(struct rte_eth_link));
855 	dev->data->dev_link = dev_link;
856 	return ret;
857 }
858 
859 /**
860  * DPDK callback to change the MTU.
861  *
862  * @param dev
863  *   Pointer to Ethernet device structure.
864  * @param in_mtu
865  *   New MTU.
866  *
867  * @return
868  *   0 on success, a negative errno value otherwise and rte_errno is set.
869  */
870 int
871 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
872 {
873 	struct mlx5_priv *priv = dev->data->dev_private;
874 	uint16_t kern_mtu = 0;
875 	int ret;
876 
877 	ret = mlx5_get_mtu(dev, &kern_mtu);
878 	if (ret)
879 		return ret;
880 	/* Set kernel interface MTU first. */
881 	ret = mlx5_set_mtu(dev, mtu);
882 	if (ret)
883 		return ret;
884 	ret = mlx5_get_mtu(dev, &kern_mtu);
885 	if (ret)
886 		return ret;
887 	if (kern_mtu == mtu) {
888 		priv->mtu = mtu;
889 		DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
890 			dev->data->port_id, mtu);
891 		return 0;
892 	}
893 	rte_errno = EAGAIN;
894 	return -rte_errno;
895 }
896 
897 /**
898  * DPDK callback to get flow control status.
899  *
900  * @param dev
901  *   Pointer to Ethernet device structure.
902  * @param[out] fc_conf
903  *   Flow control output buffer.
904  *
905  * @return
906  *   0 on success, a negative errno value otherwise and rte_errno is set.
907  */
908 int
909 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
910 {
911 	struct ifreq ifr;
912 	struct ethtool_pauseparam ethpause = {
913 		.cmd = ETHTOOL_GPAUSEPARAM
914 	};
915 	int ret;
916 
917 	ifr.ifr_data = (void *)&ethpause;
918 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
919 	if (ret) {
920 		DRV_LOG(WARNING,
921 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
922 			" %s",
923 			dev->data->port_id, strerror(rte_errno));
924 		return ret;
925 	}
926 	fc_conf->autoneg = ethpause.autoneg;
927 	if (ethpause.rx_pause && ethpause.tx_pause)
928 		fc_conf->mode = RTE_FC_FULL;
929 	else if (ethpause.rx_pause)
930 		fc_conf->mode = RTE_FC_RX_PAUSE;
931 	else if (ethpause.tx_pause)
932 		fc_conf->mode = RTE_FC_TX_PAUSE;
933 	else
934 		fc_conf->mode = RTE_FC_NONE;
935 	return 0;
936 }
937 
938 /**
939  * DPDK callback to modify flow control parameters.
940  *
941  * @param dev
942  *   Pointer to Ethernet device structure.
943  * @param[in] fc_conf
944  *   Flow control parameters.
945  *
946  * @return
947  *   0 on success, a negative errno value otherwise and rte_errno is set.
948  */
949 int
950 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
951 {
952 	struct ifreq ifr;
953 	struct ethtool_pauseparam ethpause = {
954 		.cmd = ETHTOOL_SPAUSEPARAM
955 	};
956 	int ret;
957 
958 	ifr.ifr_data = (void *)&ethpause;
959 	ethpause.autoneg = fc_conf->autoneg;
960 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
961 	    (fc_conf->mode & RTE_FC_RX_PAUSE))
962 		ethpause.rx_pause = 1;
963 	else
964 		ethpause.rx_pause = 0;
965 
966 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
967 	    (fc_conf->mode & RTE_FC_TX_PAUSE))
968 		ethpause.tx_pause = 1;
969 	else
970 		ethpause.tx_pause = 0;
971 	ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
972 	if (ret) {
973 		DRV_LOG(WARNING,
974 			"port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
975 			" failed: %s",
976 			dev->data->port_id, strerror(rte_errno));
977 		return ret;
978 	}
979 	return 0;
980 }
981 
982 /**
983  * Get PCI information from struct ibv_device.
984  *
985  * @param device
986  *   Pointer to Ethernet device structure.
987  * @param[out] pci_addr
988  *   PCI bus address output buffer.
989  *
990  * @return
991  *   0 on success, a negative errno value otherwise and rte_errno is set.
992  */
993 int
994 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
995 			    struct rte_pci_addr *pci_addr)
996 {
997 	FILE *file;
998 	char line[32];
999 	MKSTR(path, "%s/device/uevent", device->ibdev_path);
1000 
1001 	file = fopen(path, "rb");
1002 	if (file == NULL) {
1003 		rte_errno = errno;
1004 		return -rte_errno;
1005 	}
1006 	while (fgets(line, sizeof(line), file) == line) {
1007 		size_t len = strlen(line);
1008 		int ret;
1009 
1010 		/* Truncate long lines. */
1011 		if (len == (sizeof(line) - 1))
1012 			while (line[(len - 1)] != '\n') {
1013 				ret = fgetc(file);
1014 				if (ret == EOF)
1015 					break;
1016 				line[(len - 1)] = ret;
1017 			}
1018 		/* Extract information. */
1019 		if (sscanf(line,
1020 			   "PCI_SLOT_NAME="
1021 			   "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
1022 			   &pci_addr->domain,
1023 			   &pci_addr->bus,
1024 			   &pci_addr->devid,
1025 			   &pci_addr->function) == 4) {
1026 			ret = 0;
1027 			break;
1028 		}
1029 	}
1030 	fclose(file);
1031 	return 0;
1032 }
1033 
1034 /**
1035  * Handle shared asynchronous events the NIC (removal event
1036  * and link status change). Supports multiport IB device.
1037  *
1038  * @param cb_arg
1039  *   Callback argument.
1040  */
1041 void
1042 mlx5_dev_interrupt_handler(void *cb_arg)
1043 {
1044 	struct mlx5_ibv_shared *sh = cb_arg;
1045 	struct ibv_async_event event;
1046 
1047 	/* Read all message from the IB device and acknowledge them. */
1048 	for (;;) {
1049 		struct rte_eth_dev *dev;
1050 		uint32_t tmp;
1051 
1052 		if (mlx5_glue->get_async_event(sh->ctx, &event))
1053 			break;
1054 		/* Retrieve and check IB port index. */
1055 		tmp = (uint32_t)event.element.port_num;
1056 		assert(tmp && (tmp <= sh->max_port));
1057 		if (!tmp ||
1058 		    tmp > sh->max_port ||
1059 		    sh->port[tmp - 1].ih_port_id >= RTE_MAX_ETHPORTS) {
1060 			/*
1061 			 * Invalid IB port index or no handler
1062 			 * installed for this port.
1063 			 */
1064 			mlx5_glue->ack_async_event(&event);
1065 			continue;
1066 		}
1067 		/* Retrieve ethernet device descriptor. */
1068 		tmp = sh->port[tmp - 1].ih_port_id;
1069 		dev = &rte_eth_devices[tmp];
1070 		tmp = 0;
1071 		assert(dev);
1072 		if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
1073 		     event.event_type == IBV_EVENT_PORT_ERR) &&
1074 			dev->data->dev_conf.intr_conf.lsc) {
1075 			mlx5_glue->ack_async_event(&event);
1076 			if (mlx5_link_update(dev, 0) == -EAGAIN) {
1077 				usleep(0);
1078 				continue;
1079 			}
1080 			_rte_eth_dev_callback_process
1081 				(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1082 			continue;
1083 		}
1084 		if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
1085 		    dev->data->dev_conf.intr_conf.rmv) {
1086 			mlx5_glue->ack_async_event(&event);
1087 			_rte_eth_dev_callback_process
1088 				(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
1089 			continue;
1090 		}
1091 		DRV_LOG(DEBUG,
1092 			"port %u event type %d on not handled",
1093 			dev->data->port_id, event.event_type);
1094 		mlx5_glue->ack_async_event(&event);
1095 	}
1096 }
1097 
1098 /**
1099  * Uninstall shared asynchronous device events handler.
1100  * This function is implemeted to support event sharing
1101  * between multiple ports of single IB device.
1102  *
1103  * @param dev
1104  *   Pointer to Ethernet device.
1105  */
1106 static void
1107 mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev)
1108 {
1109 	struct mlx5_priv *priv = dev->data->dev_private;
1110 	struct mlx5_ibv_shared *sh = priv->sh;
1111 
1112 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1113 		return;
1114 	pthread_mutex_lock(&sh->intr_mutex);
1115 	assert(priv->ibv_port);
1116 	assert(priv->ibv_port <= sh->max_port);
1117 	assert(dev->data->port_id < RTE_MAX_ETHPORTS);
1118 	if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
1119 		goto exit;
1120 	assert(sh->port[priv->ibv_port - 1].ih_port_id ==
1121 					(uint32_t)dev->data->port_id);
1122 	assert(sh->intr_cnt);
1123 	sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
1124 	if (!sh->intr_cnt || --sh->intr_cnt)
1125 		goto exit;
1126 	rte_intr_callback_unregister(&sh->intr_handle,
1127 				     mlx5_dev_interrupt_handler, sh);
1128 	sh->intr_handle.fd = 0;
1129 	sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1130 exit:
1131 	pthread_mutex_unlock(&sh->intr_mutex);
1132 }
1133 
1134 /**
1135  * Install shared asyncronous device events handler.
1136  * This function is implemeted to support event sharing
1137  * between multiple ports of single IB device.
1138  *
1139  * @param dev
1140  *   Pointer to Ethernet device.
1141  */
1142 static void
1143 mlx5_dev_shared_handler_install(struct rte_eth_dev *dev)
1144 {
1145 	struct mlx5_priv *priv = dev->data->dev_private;
1146 	struct mlx5_ibv_shared *sh = priv->sh;
1147 	int ret;
1148 	int flags;
1149 
1150 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1151 		return;
1152 	pthread_mutex_lock(&sh->intr_mutex);
1153 	assert(priv->ibv_port);
1154 	assert(priv->ibv_port <= sh->max_port);
1155 	assert(dev->data->port_id < RTE_MAX_ETHPORTS);
1156 	if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
1157 		/* The handler is already installed for this port. */
1158 		assert(sh->intr_cnt);
1159 		goto exit;
1160 	}
1161 	sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id;
1162 	if (sh->intr_cnt) {
1163 		sh->intr_cnt++;
1164 		goto exit;
1165 	}
1166 	/* No shared handler installed. */
1167 	assert(sh->ctx->async_fd > 0);
1168 	flags = fcntl(sh->ctx->async_fd, F_GETFL);
1169 	ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1170 	if (ret) {
1171 		DRV_LOG(INFO, "failed to change file descriptor"
1172 			      " async event queue");
1173 		/* Indicate there will be no interrupts. */
1174 		dev->data->dev_conf.intr_conf.lsc = 0;
1175 		dev->data->dev_conf.intr_conf.rmv = 0;
1176 		sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
1177 		goto exit;
1178 	}
1179 	sh->intr_handle.fd = sh->ctx->async_fd;
1180 	sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
1181 	rte_intr_callback_register(&sh->intr_handle,
1182 				   mlx5_dev_interrupt_handler, sh);
1183 	sh->intr_cnt++;
1184 exit:
1185 	pthread_mutex_unlock(&sh->intr_mutex);
1186 }
1187 
1188 /**
1189  * Uninstall interrupt handler.
1190  *
1191  * @param dev
1192  *   Pointer to Ethernet device.
1193  */
1194 void
1195 mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
1196 {
1197 	mlx5_dev_shared_handler_uninstall(dev);
1198 }
1199 
1200 /**
1201  * Install interrupt handler.
1202  *
1203  * @param dev
1204  *   Pointer to Ethernet device.
1205  */
1206 void
1207 mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
1208 {
1209 	mlx5_dev_shared_handler_install(dev);
1210 }
1211 
1212 /**
1213  * DPDK callback to bring the link DOWN.
1214  *
1215  * @param dev
1216  *   Pointer to Ethernet device structure.
1217  *
1218  * @return
1219  *   0 on success, a negative errno value otherwise and rte_errno is set.
1220  */
1221 int
1222 mlx5_set_link_down(struct rte_eth_dev *dev)
1223 {
1224 	return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
1225 }
1226 
1227 /**
1228  * DPDK callback to bring the link UP.
1229  *
1230  * @param dev
1231  *   Pointer to Ethernet device structure.
1232  *
1233  * @return
1234  *   0 on success, a negative errno value otherwise and rte_errno is set.
1235  */
1236 int
1237 mlx5_set_link_up(struct rte_eth_dev *dev)
1238 {
1239 	return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
1240 }
1241 
1242 /**
1243  * Configure the TX function to use.
1244  *
1245  * @param dev
1246  *   Pointer to private data structure.
1247  *
1248  * @return
1249  *   Pointer to selected Tx burst function.
1250  */
1251 eth_tx_burst_t
1252 mlx5_select_tx_function(struct rte_eth_dev *dev)
1253 {
1254 	struct mlx5_priv *priv = dev->data->dev_private;
1255 	eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
1256 	struct mlx5_dev_config *config = &priv->config;
1257 	uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
1258 	int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
1259 				    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1260 				    DEV_TX_OFFLOAD_GRE_TNL_TSO |
1261 				    DEV_TX_OFFLOAD_IP_TNL_TSO |
1262 				    DEV_TX_OFFLOAD_UDP_TNL_TSO));
1263 	int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
1264 				    DEV_TX_OFFLOAD_UDP_TNL_TSO |
1265 				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
1266 	int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
1267 
1268 	assert(priv != NULL);
1269 	/* Select appropriate TX function. */
1270 	if (vlan_insert || tso || swp)
1271 		return tx_pkt_burst;
1272 	if (config->mps == MLX5_MPW_ENHANCED) {
1273 		if (mlx5_check_vec_tx_support(dev) > 0) {
1274 			if (mlx5_check_raw_vec_tx_support(dev) > 0)
1275 				tx_pkt_burst = mlx5_tx_burst_raw_vec;
1276 			else
1277 				tx_pkt_burst = mlx5_tx_burst_vec;
1278 			DRV_LOG(DEBUG,
1279 				"port %u selected enhanced MPW Tx vectorized"
1280 				" function",
1281 				dev->data->port_id);
1282 		} else {
1283 			tx_pkt_burst = mlx5_tx_burst_empw;
1284 			DRV_LOG(DEBUG,
1285 				"port %u selected enhanced MPW Tx function",
1286 				dev->data->port_id);
1287 		}
1288 	} else if (config->mps && (config->txq_inline > 0)) {
1289 		tx_pkt_burst = mlx5_tx_burst_mpw_inline;
1290 		DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
1291 			dev->data->port_id);
1292 	} else if (config->mps) {
1293 		tx_pkt_burst = mlx5_tx_burst_mpw;
1294 		DRV_LOG(DEBUG, "port %u selected MPW Tx function",
1295 			dev->data->port_id);
1296 	}
1297 	return tx_pkt_burst;
1298 }
1299 
1300 /**
1301  * Configure the RX function to use.
1302  *
1303  * @param dev
1304  *   Pointer to private data structure.
1305  *
1306  * @return
1307  *   Pointer to selected Rx burst function.
1308  */
1309 eth_rx_burst_t
1310 mlx5_select_rx_function(struct rte_eth_dev *dev)
1311 {
1312 	eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
1313 
1314 	assert(dev != NULL);
1315 	if (mlx5_check_vec_rx_support(dev) > 0) {
1316 		rx_pkt_burst = mlx5_rx_burst_vec;
1317 		DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
1318 			dev->data->port_id);
1319 	} else if (mlx5_mprq_enabled(dev)) {
1320 		rx_pkt_burst = mlx5_rx_burst_mprq;
1321 	}
1322 	return rx_pkt_burst;
1323 }
1324 
1325 /**
1326  * Check if mlx5 device was removed.
1327  *
1328  * @param dev
1329  *   Pointer to Ethernet device structure.
1330  *
1331  * @return
1332  *   1 when device is removed, otherwise 0.
1333  */
1334 int
1335 mlx5_is_removed(struct rte_eth_dev *dev)
1336 {
1337 	struct ibv_device_attr device_attr;
1338 	struct mlx5_priv *priv = dev->data->dev_private;
1339 
1340 	if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
1341 		return 1;
1342 	return 0;
1343 }
1344 
1345 /**
1346  * Get port ID list of mlx5 instances sharing a common device.
1347  *
1348  * @param[in] dev
1349  *   Device to look for.
1350  * @param[out] port_list
1351  *   Result buffer for collected port IDs.
1352  * @param port_list_n
1353  *   Maximum number of entries in result buffer. If 0, @p port_list can be
1354  *   NULL.
1355  *
1356  * @return
1357  *   Number of matching instances regardless of the @p port_list_n
1358  *   parameter, 0 if none were found.
1359  */
1360 unsigned int
1361 mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list,
1362 		    unsigned int port_list_n)
1363 {
1364 	uint16_t id;
1365 	unsigned int n = 0;
1366 
1367 	RTE_ETH_FOREACH_DEV_OF(id, dev) {
1368 		if (n < port_list_n)
1369 			port_list[n] = id;
1370 		n++;
1371 	}
1372 	return n;
1373 }
1374 
1375 /**
1376  * Get switch information associated with network interface.
1377  *
1378  * @param ifindex
1379  *   Network interface index.
1380  * @param[out] info
1381  *   Switch information object, populated in case of success.
1382  *
1383  * @return
1384  *   0 on success, a negative errno value otherwise and rte_errno is set.
1385  */
1386 int
1387 mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
1388 {
1389 	char ifname[IF_NAMESIZE];
1390 	char port_name[IF_NAMESIZE];
1391 	FILE *file;
1392 	struct mlx5_switch_info data = {
1393 		.master = 0,
1394 		.representor = 0,
1395 		.port_name_new = 0,
1396 		.port_name = 0,
1397 		.switch_id = 0,
1398 	};
1399 	DIR *dir;
1400 	bool port_name_set = false;
1401 	bool port_switch_id_set = false;
1402 	bool device_dir = false;
1403 	char c;
1404 	int ret;
1405 
1406 	if (!if_indextoname(ifindex, ifname)) {
1407 		rte_errno = errno;
1408 		return -rte_errno;
1409 	}
1410 
1411 	MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
1412 	      ifname);
1413 	MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
1414 	      ifname);
1415 	MKSTR(pci_device, "/sys/class/net/%s/device",
1416 	      ifname);
1417 
1418 	file = fopen(phys_port_name, "rb");
1419 	if (file != NULL) {
1420 		ret = fscanf(file, "%s", port_name);
1421 		fclose(file);
1422 		if (ret == 1)
1423 			port_name_set = mlx5_translate_port_name(port_name,
1424 								 &data);
1425 	}
1426 	file = fopen(phys_switch_id, "rb");
1427 	if (file == NULL) {
1428 		rte_errno = errno;
1429 		return -rte_errno;
1430 	}
1431 	port_switch_id_set =
1432 		fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
1433 		c == '\n';
1434 	fclose(file);
1435 	dir = opendir(pci_device);
1436 	if (dir != NULL) {
1437 		closedir(dir);
1438 		device_dir = true;
1439 	}
1440 	data.master = port_switch_id_set && (!port_name_set || device_dir);
1441 	data.representor = port_switch_id_set && port_name_set && !device_dir;
1442 	*info = data;
1443 	assert(!(data.master && data.representor));
1444 	if (data.master && data.representor) {
1445 		DRV_LOG(ERR, "ifindex %u device is recognized as master"
1446 			     " and as representor", ifindex);
1447 		rte_errno = ENODEV;
1448 		return -rte_errno;
1449 	}
1450 	return 0;
1451 }
1452 
1453 /**
1454  * Extract port name, as a number, from sysfs or netlink information.
1455  *
1456  * @param[in] port_name_in
1457  *   String representing the port name.
1458  * @param[out] port_info_out
1459  *   Port information, including port name as a number.
1460  *
1461  * @return
1462  *   true on success, false otherwise.
1463  */
1464 bool
1465 mlx5_translate_port_name(const char *port_name_in,
1466 			 struct mlx5_switch_info *port_info_out)
1467 {
1468 	char pf_c1, pf_c2, vf_c1, vf_c2;
1469 	char *end;
1470 	int32_t pf_num;
1471 	bool port_name_set = false;
1472 
1473 	/*
1474 	 * Check for port-name as a string of the form pf0vf0
1475 	 * (support kernel ver >= 5.0)
1476 	 */
1477 	port_name_set =	(sscanf(port_name_in, "%c%c%d%c%c%d", &pf_c1, &pf_c2,
1478 				&pf_num, &vf_c1, &vf_c2,
1479 				&port_info_out->port_name) == 6);
1480 	if (port_name_set) {
1481 		port_info_out->port_name_new = 1;
1482 	} else {
1483 		/* Check for port-name as a number (support kernel ver < 5.0 */
1484 		errno = 0;
1485 		port_info_out->port_name = strtol(port_name_in, &end, 0);
1486 		if (!errno &&
1487 		    (size_t)(end - port_name_in) == strlen(port_name_in))
1488 			port_name_set = true;
1489 	}
1490 	return port_name_set;
1491 }
1492