xref: /dpdk/drivers/net/mlx5/mlx5_ethdev.c (revision 01d79216e68e5fa5f79ef09bfcb9344b9d56800e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #define _GNU_SOURCE
35 
36 #include <stddef.h>
37 #include <assert.h>
38 #include <unistd.h>
39 #include <stdint.h>
40 #include <stdio.h>
41 #include <string.h>
42 #include <stdlib.h>
43 #include <errno.h>
44 #include <dirent.h>
45 #include <net/if.h>
46 #include <sys/ioctl.h>
47 #include <sys/socket.h>
48 #include <sys/utsname.h>
49 #include <netinet/in.h>
50 #include <linux/ethtool.h>
51 #include <linux/sockios.h>
52 #include <linux/version.h>
53 #include <fcntl.h>
54 #include <stdalign.h>
55 #include <sys/un.h>
56 
57 #include <rte_atomic.h>
58 #include <rte_ethdev.h>
59 #include <rte_bus_pci.h>
60 #include <rte_mbuf.h>
61 #include <rte_common.h>
62 #include <rte_interrupts.h>
63 #include <rte_alarm.h>
64 #include <rte_malloc.h>
65 
66 #include "mlx5.h"
67 #include "mlx5_rxtx.h"
68 #include "mlx5_utils.h"
69 
70 /* Add defines in case the running kernel is not the same as user headers. */
71 #ifndef ETHTOOL_GLINKSETTINGS
72 struct ethtool_link_settings {
73 	uint32_t cmd;
74 	uint32_t speed;
75 	uint8_t duplex;
76 	uint8_t port;
77 	uint8_t phy_address;
78 	uint8_t autoneg;
79 	uint8_t mdio_support;
80 	uint8_t eth_to_mdix;
81 	uint8_t eth_tp_mdix_ctrl;
82 	int8_t link_mode_masks_nwords;
83 	uint32_t reserved[8];
84 	uint32_t link_mode_masks[];
85 };
86 
87 #define ETHTOOL_GLINKSETTINGS 0x0000004c
88 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
89 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6
90 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
91 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
92 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
93 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
94 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
95 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
96 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
97 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
98 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
99 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
100 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
101 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
102 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
103 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
104 #endif
105 #ifndef HAVE_ETHTOOL_LINK_MODE_25G
106 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
107 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
108 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
109 #endif
110 #ifndef HAVE_ETHTOOL_LINK_MODE_50G
111 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
112 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
113 #endif
114 #ifndef HAVE_ETHTOOL_LINK_MODE_100G
115 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
116 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
117 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
118 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
119 #endif
120 
121 /**
122  * Check if running as a secondary process.
123  *
124  * @return
125  *   Nonzero if running as a secondary process.
126  */
127 inline int
128 mlx5_is_secondary(void)
129 {
130 	return rte_eal_process_type() == RTE_PROC_SECONDARY;
131 }
132 
133 /**
134  * Get interface name from private structure.
135  *
136  * @param[in] priv
137  *   Pointer to private structure.
138  * @param[out] ifname
139  *   Interface name output buffer.
140  *
141  * @return
142  *   0 on success, -1 on failure and errno is set.
143  */
144 int
145 priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
146 {
147 	DIR *dir;
148 	struct dirent *dent;
149 	unsigned int dev_type = 0;
150 	unsigned int dev_port_prev = ~0u;
151 	char match[IF_NAMESIZE] = "";
152 
153 	{
154 		MKSTR(path, "%s/device/net", priv->ibdev_path);
155 
156 		dir = opendir(path);
157 		if (dir == NULL)
158 			return -1;
159 	}
160 	while ((dent = readdir(dir)) != NULL) {
161 		char *name = dent->d_name;
162 		FILE *file;
163 		unsigned int dev_port;
164 		int r;
165 
166 		if ((name[0] == '.') &&
167 		    ((name[1] == '\0') ||
168 		     ((name[1] == '.') && (name[2] == '\0'))))
169 			continue;
170 
171 		MKSTR(path, "%s/device/net/%s/%s",
172 		      priv->ibdev_path, name,
173 		      (dev_type ? "dev_id" : "dev_port"));
174 
175 		file = fopen(path, "rb");
176 		if (file == NULL) {
177 			if (errno != ENOENT)
178 				continue;
179 			/*
180 			 * Switch to dev_id when dev_port does not exist as
181 			 * is the case with Linux kernel versions < 3.15.
182 			 */
183 try_dev_id:
184 			match[0] = '\0';
185 			if (dev_type)
186 				break;
187 			dev_type = 1;
188 			dev_port_prev = ~0u;
189 			rewinddir(dir);
190 			continue;
191 		}
192 		r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
193 		fclose(file);
194 		if (r != 1)
195 			continue;
196 		/*
197 		 * Switch to dev_id when dev_port returns the same value for
198 		 * all ports. May happen when using a MOFED release older than
199 		 * 3.0 with a Linux kernel >= 3.15.
200 		 */
201 		if (dev_port == dev_port_prev)
202 			goto try_dev_id;
203 		dev_port_prev = dev_port;
204 		if (dev_port == (priv->port - 1u))
205 			snprintf(match, sizeof(match), "%s", name);
206 	}
207 	closedir(dir);
208 	if (match[0] == '\0')
209 		return -1;
210 	strncpy(*ifname, match, sizeof(*ifname));
211 	return 0;
212 }
213 
214 /**
215  * Check if the counter is located on ib counters file.
216  *
217  * @param[in] cntr
218  *   Counter name.
219  *
220  * @return
221  *   1 if counter is located on ib counters file , 0 otherwise.
222  */
223 int
224 priv_is_ib_cntr(const char *cntr)
225 {
226 	if (!strcmp(cntr, "out_of_buffer"))
227 		return 1;
228 	return 0;
229 }
230 
231 /**
232  * Read from sysfs entry.
233  *
234  * @param[in] priv
235  *   Pointer to private structure.
236  * @param[in] entry
237  *   Entry name relative to sysfs path.
238  * @param[out] buf
239  *   Data output buffer.
240  * @param size
241  *   Buffer size.
242  *
243  * @return
244  *   0 on success, -1 on failure and errno is set.
245  */
246 static int
247 priv_sysfs_read(const struct priv *priv, const char *entry,
248 		char *buf, size_t size)
249 {
250 	char ifname[IF_NAMESIZE];
251 	FILE *file;
252 	int ret;
253 	int err;
254 
255 	if (priv_get_ifname(priv, &ifname))
256 		return -1;
257 
258 	if (priv_is_ib_cntr(entry)) {
259 		MKSTR(path, "%s/ports/1/hw_counters/%s",
260 		      priv->ibdev_path, entry);
261 		file = fopen(path, "rb");
262 	} else {
263 		MKSTR(path, "%s/device/net/%s/%s",
264 		      priv->ibdev_path, ifname, entry);
265 		file = fopen(path, "rb");
266 	}
267 	if (file == NULL)
268 		return -1;
269 	ret = fread(buf, 1, size, file);
270 	err = errno;
271 	if (((size_t)ret < size) && (ferror(file)))
272 		ret = -1;
273 	else
274 		ret = size;
275 	fclose(file);
276 	errno = err;
277 	return ret;
278 }
279 
280 /**
281  * Write to sysfs entry.
282  *
283  * @param[in] priv
284  *   Pointer to private structure.
285  * @param[in] entry
286  *   Entry name relative to sysfs path.
287  * @param[in] buf
288  *   Data buffer.
289  * @param size
290  *   Buffer size.
291  *
292  * @return
293  *   0 on success, -1 on failure and errno is set.
294  */
295 static int
296 priv_sysfs_write(const struct priv *priv, const char *entry,
297 		 char *buf, size_t size)
298 {
299 	char ifname[IF_NAMESIZE];
300 	FILE *file;
301 	int ret;
302 	int err;
303 
304 	if (priv_get_ifname(priv, &ifname))
305 		return -1;
306 
307 	MKSTR(path, "%s/device/net/%s/%s", priv->ibdev_path, ifname, entry);
308 
309 	file = fopen(path, "wb");
310 	if (file == NULL)
311 		return -1;
312 	ret = fwrite(buf, 1, size, file);
313 	err = errno;
314 	if (((size_t)ret < size) || (ferror(file)))
315 		ret = -1;
316 	else
317 		ret = size;
318 	fclose(file);
319 	errno = err;
320 	return ret;
321 }
322 
323 /**
324  * Get unsigned long sysfs property.
325  *
326  * @param priv
327  *   Pointer to private structure.
328  * @param[in] name
329  *   Entry name relative to sysfs path.
330  * @param[out] value
331  *   Value output buffer.
332  *
333  * @return
334  *   0 on success, -1 on failure and errno is set.
335  */
336 static int
337 priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
338 {
339 	int ret;
340 	unsigned long value_ret;
341 	char value_str[32];
342 
343 	ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
344 	if (ret == -1) {
345 		DEBUG("cannot read %s value from sysfs: %s",
346 		      name, strerror(errno));
347 		return -1;
348 	}
349 	value_str[ret] = '\0';
350 	errno = 0;
351 	value_ret = strtoul(value_str, NULL, 0);
352 	if (errno) {
353 		DEBUG("invalid %s value `%s': %s", name, value_str,
354 		      strerror(errno));
355 		return -1;
356 	}
357 	*value = value_ret;
358 	return 0;
359 }
360 
361 /**
362  * Set unsigned long sysfs property.
363  *
364  * @param priv
365  *   Pointer to private structure.
366  * @param[in] name
367  *   Entry name relative to sysfs path.
368  * @param value
369  *   Value to set.
370  *
371  * @return
372  *   0 on success, -1 on failure and errno is set.
373  */
374 static int
375 priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
376 {
377 	int ret;
378 	MKSTR(value_str, "%lu", value);
379 
380 	ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
381 	if (ret == -1) {
382 		DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
383 		      name, value_str, value, strerror(errno));
384 		return -1;
385 	}
386 	return 0;
387 }
388 
389 /**
390  * Perform ifreq ioctl() on associated Ethernet device.
391  *
392  * @param[in] priv
393  *   Pointer to private structure.
394  * @param req
395  *   Request number to pass to ioctl().
396  * @param[out] ifr
397  *   Interface request structure output buffer.
398  *
399  * @return
400  *   0 on success, -1 on failure and errno is set.
401  */
402 int
403 priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
404 {
405 	int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
406 	int ret = -1;
407 
408 	if (sock == -1)
409 		return ret;
410 	if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
411 		ret = ioctl(sock, req, ifr);
412 	close(sock);
413 	return ret;
414 }
415 
416 /**
417  * Return the number of active VFs for the current device.
418  *
419  * @param[in] priv
420  *   Pointer to private structure.
421  * @param[out] num_vfs
422  *   Number of active VFs.
423  *
424  * @return
425  *   0 on success, -1 on failure and errno is set.
426  */
427 int
428 priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
429 {
430 	/* The sysfs entry name depends on the operating system. */
431 	const char **name = (const char *[]){
432 		"device/sriov_numvfs",
433 		"device/mlx5_num_vfs",
434 		NULL,
435 	};
436 	int ret;
437 
438 	do {
439 		unsigned long ulong_num_vfs;
440 
441 		ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
442 		if (!ret)
443 			*num_vfs = ulong_num_vfs;
444 	} while (*(++name) && ret);
445 	return ret;
446 }
447 
448 /**
449  * Get device MTU.
450  *
451  * @param priv
452  *   Pointer to private structure.
453  * @param[out] mtu
454  *   MTU value output buffer.
455  *
456  * @return
457  *   0 on success, -1 on failure and errno is set.
458  */
459 int
460 priv_get_mtu(struct priv *priv, uint16_t *mtu)
461 {
462 	unsigned long ulong_mtu;
463 
464 	if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
465 		return -1;
466 	*mtu = ulong_mtu;
467 	return 0;
468 }
469 
470 /**
471  * Read device counter from sysfs.
472  *
473  * @param priv
474  *   Pointer to private structure.
475  * @param name
476  *   Counter name.
477  * @param[out] cntr
478  *   Counter output buffer.
479  *
480  * @return
481  *   0 on success, -1 on failure and errno is set.
482  */
483 int
484 priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr)
485 {
486 	unsigned long ulong_ctr;
487 
488 	if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1)
489 		return -1;
490 	*cntr = ulong_ctr;
491 	return 0;
492 }
493 
494 /**
495  * Set device MTU.
496  *
497  * @param priv
498  *   Pointer to private structure.
499  * @param mtu
500  *   MTU value to set.
501  *
502  * @return
503  *   0 on success, -1 on failure and errno is set.
504  */
505 static int
506 priv_set_mtu(struct priv *priv, uint16_t mtu)
507 {
508 	uint16_t new_mtu;
509 
510 	if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
511 	    priv_get_mtu(priv, &new_mtu))
512 		return -1;
513 	if (new_mtu == mtu)
514 		return 0;
515 	errno = EINVAL;
516 	return -1;
517 }
518 
519 /**
520  * Set device flags.
521  *
522  * @param priv
523  *   Pointer to private structure.
524  * @param keep
525  *   Bitmask for flags that must remain untouched.
526  * @param flags
527  *   Bitmask for flags to modify.
528  *
529  * @return
530  *   0 on success, -1 on failure and errno is set.
531  */
532 int
533 priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
534 {
535 	unsigned long tmp;
536 
537 	if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
538 		return -1;
539 	tmp &= keep;
540 	tmp |= (flags & (~keep));
541 	return priv_set_sysfs_ulong(priv, "flags", tmp);
542 }
543 
544 /**
545  * Ethernet device configuration.
546  *
547  * Prepare the driver for a given number of TX and RX queues.
548  *
549  * @param dev
550  *   Pointer to Ethernet device structure.
551  *
552  * @return
553  *   0 on success, errno value on failure.
554  */
555 static int
556 dev_configure(struct rte_eth_dev *dev)
557 {
558 	struct priv *priv = dev->data->dev_private;
559 	unsigned int rxqs_n = dev->data->nb_rx_queues;
560 	unsigned int txqs_n = dev->data->nb_tx_queues;
561 	unsigned int i;
562 	unsigned int j;
563 	unsigned int reta_idx_n;
564 	const uint8_t use_app_rss_key =
565 		!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
566 
567 	if (use_app_rss_key &&
568 	    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
569 	     rss_hash_default_key_len)) {
570 		/* MLX5 RSS only support 40bytes key. */
571 		return EINVAL;
572 	}
573 	priv->rss_conf.rss_key =
574 		rte_realloc(priv->rss_conf.rss_key,
575 			    rss_hash_default_key_len, 0);
576 	if (!priv->rss_conf.rss_key) {
577 		ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
578 		return ENOMEM;
579 	}
580 	memcpy(priv->rss_conf.rss_key,
581 	       use_app_rss_key ?
582 	       dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
583 	       rss_hash_default_key,
584 	       rss_hash_default_key_len);
585 	priv->rss_conf.rss_key_len = rss_hash_default_key_len;
586 	priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
587 	priv->rxqs = (void *)dev->data->rx_queues;
588 	priv->txqs = (void *)dev->data->tx_queues;
589 	if (txqs_n != priv->txqs_n) {
590 		INFO("%p: TX queues number update: %u -> %u",
591 		     (void *)dev, priv->txqs_n, txqs_n);
592 		priv->txqs_n = txqs_n;
593 	}
594 	if (rxqs_n > priv->ind_table_max_size) {
595 		ERROR("cannot handle this many RX queues (%u)", rxqs_n);
596 		return EINVAL;
597 	}
598 	if (rxqs_n == priv->rxqs_n)
599 		return 0;
600 	INFO("%p: RX queues number update: %u -> %u",
601 	     (void *)dev, priv->rxqs_n, rxqs_n);
602 	priv->rxqs_n = rxqs_n;
603 	/* If the requested number of RX queues is not a power of two, use the
604 	 * maximum indirection table size for better balancing.
605 	 * The result is always rounded to the next power of two. */
606 	reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
607 				     priv->ind_table_max_size :
608 				     rxqs_n));
609 	if (priv_rss_reta_index_resize(priv, reta_idx_n))
610 		return ENOMEM;
611 	/* When the number of RX queues is not a power of two, the remaining
612 	 * table entries are padded with reused WQs and hashes are not spread
613 	 * uniformly. */
614 	for (i = 0, j = 0; (i != reta_idx_n); ++i) {
615 		(*priv->reta_idx)[i] = j;
616 		if (++j == rxqs_n)
617 			j = 0;
618 	}
619 	return 0;
620 }
621 
622 /**
623  * DPDK callback for Ethernet device configuration.
624  *
625  * @param dev
626  *   Pointer to Ethernet device structure.
627  *
628  * @return
629  *   0 on success, negative errno value on failure.
630  */
631 int
632 mlx5_dev_configure(struct rte_eth_dev *dev)
633 {
634 	struct priv *priv = dev->data->dev_private;
635 	int ret;
636 
637 	if (mlx5_is_secondary())
638 		return -E_RTE_SECONDARY;
639 
640 	priv_lock(priv);
641 	ret = dev_configure(dev);
642 	assert(ret >= 0);
643 	priv_unlock(priv);
644 	return -ret;
645 }
646 
647 /**
648  * DPDK callback to get information about the device.
649  *
650  * @param dev
651  *   Pointer to Ethernet device structure.
652  * @param[out] info
653  *   Info structure output buffer.
654  */
655 void
656 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
657 {
658 	struct priv *priv = dev->data->dev_private;
659 	unsigned int max;
660 	char ifname[IF_NAMESIZE];
661 
662 	info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
663 
664 	priv_lock(priv);
665 	/* FIXME: we should ask the device for these values. */
666 	info->min_rx_bufsize = 32;
667 	info->max_rx_pktlen = 65536;
668 	/*
669 	 * Since we need one CQ per QP, the limit is the minimum number
670 	 * between the two values.
671 	 */
672 	max = RTE_MIN(priv->device_attr.orig_attr.max_cq,
673 		      priv->device_attr.orig_attr.max_qp);
674 	/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
675 	if (max >= 65535)
676 		max = 65535;
677 	info->max_rx_queues = max;
678 	info->max_tx_queues = max;
679 	info->max_mac_addrs = RTE_DIM(priv->mac);
680 	info->rx_offload_capa =
681 		(priv->hw_csum ?
682 		 (DEV_RX_OFFLOAD_IPV4_CKSUM |
683 		  DEV_RX_OFFLOAD_UDP_CKSUM |
684 		  DEV_RX_OFFLOAD_TCP_CKSUM) :
685 		 0) |
686 		(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
687 		DEV_RX_OFFLOAD_TIMESTAMP;
688 
689 	if (!priv->mps)
690 		info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
691 	if (priv->hw_csum)
692 		info->tx_offload_capa |=
693 			(DEV_TX_OFFLOAD_IPV4_CKSUM |
694 			 DEV_TX_OFFLOAD_UDP_CKSUM |
695 			 DEV_TX_OFFLOAD_TCP_CKSUM);
696 	if (priv->tso)
697 		info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
698 	if (priv->tunnel_en)
699 		info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
700 					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
701 					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
702 	if (priv_get_ifname(priv, &ifname) == 0)
703 		info->if_index = if_nametoindex(ifname);
704 	info->reta_size = priv->reta_idx_n ?
705 		priv->reta_idx_n : priv->ind_table_max_size;
706 	info->hash_key_size = priv->rss_conf.rss_key_len;
707 	info->speed_capa = priv->link_speed_capa;
708 	priv_unlock(priv);
709 }
710 
711 const uint32_t *
712 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
713 {
714 	static const uint32_t ptypes[] = {
715 		/* refers to rxq_cq_to_pkt_type() */
716 		RTE_PTYPE_L2_ETHER,
717 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
718 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
719 		RTE_PTYPE_L4_NONFRAG,
720 		RTE_PTYPE_L4_FRAG,
721 		RTE_PTYPE_L4_TCP,
722 		RTE_PTYPE_L4_UDP,
723 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
724 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
725 		RTE_PTYPE_INNER_L4_NONFRAG,
726 		RTE_PTYPE_INNER_L4_FRAG,
727 		RTE_PTYPE_INNER_L4_TCP,
728 		RTE_PTYPE_INNER_L4_UDP,
729 		RTE_PTYPE_UNKNOWN
730 	};
731 
732 	if (dev->rx_pkt_burst == mlx5_rx_burst ||
733 	    dev->rx_pkt_burst == mlx5_rx_burst_vec)
734 		return ptypes;
735 	return NULL;
736 }
737 
738 /**
739  * DPDK callback to retrieve physical link information.
740  *
741  * @param dev
742  *   Pointer to Ethernet device structure.
743  * @param wait_to_complete
744  *   Wait for request completion (ignored).
745  */
746 static int
747 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
748 {
749 	struct priv *priv = dev->data->dev_private;
750 	struct ethtool_cmd edata = {
751 		.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
752 	};
753 	struct ifreq ifr;
754 	struct rte_eth_link dev_link;
755 	int link_speed = 0;
756 
757 	/* priv_lock() is not taken to allow concurrent calls. */
758 
759 	(void)wait_to_complete;
760 	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
761 		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
762 		return -1;
763 	}
764 	memset(&dev_link, 0, sizeof(dev_link));
765 	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
766 				(ifr.ifr_flags & IFF_RUNNING));
767 	ifr.ifr_data = (void *)&edata;
768 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
769 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
770 		     strerror(errno));
771 		return -1;
772 	}
773 	link_speed = ethtool_cmd_speed(&edata);
774 	if (link_speed == -1)
775 		dev_link.link_speed = 0;
776 	else
777 		dev_link.link_speed = link_speed;
778 	priv->link_speed_capa = 0;
779 	if (edata.supported & SUPPORTED_Autoneg)
780 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
781 	if (edata.supported & (SUPPORTED_1000baseT_Full |
782 			       SUPPORTED_1000baseKX_Full))
783 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
784 	if (edata.supported & SUPPORTED_10000baseKR_Full)
785 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
786 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
787 			       SUPPORTED_40000baseCR4_Full |
788 			       SUPPORTED_40000baseSR4_Full |
789 			       SUPPORTED_40000baseLR4_Full))
790 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
791 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
792 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
793 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
794 			ETH_LINK_SPEED_FIXED);
795 	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
796 		/* Link status changed. */
797 		dev->data->dev_link = dev_link;
798 		return 0;
799 	}
800 	/* Link status is still the same. */
801 	return -1;
802 }
803 
804 /**
805  * Retrieve physical link information (unlocked version using new ioctl).
806  *
807  * @param dev
808  *   Pointer to Ethernet device structure.
809  * @param wait_to_complete
810  *   Wait for request completion (ignored).
811  */
812 static int
813 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
814 {
815 	struct priv *priv = dev->data->dev_private;
816 	struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
817 	struct ifreq ifr;
818 	struct rte_eth_link dev_link;
819 	uint64_t sc;
820 
821 	(void)wait_to_complete;
822 	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
823 		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
824 		return -1;
825 	}
826 	memset(&dev_link, 0, sizeof(dev_link));
827 	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
828 				(ifr.ifr_flags & IFF_RUNNING));
829 	ifr.ifr_data = (void *)&gcmd;
830 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
831 		DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
832 		      strerror(errno));
833 		return -1;
834 	}
835 	gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
836 
837 	alignas(struct ethtool_link_settings)
838 	uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
839 		     sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
840 	struct ethtool_link_settings *ecmd = (void *)data;
841 
842 	*ecmd = gcmd;
843 	ifr.ifr_data = (void *)ecmd;
844 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
845 		DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
846 		      strerror(errno));
847 		return -1;
848 	}
849 	dev_link.link_speed = ecmd->speed;
850 	sc = ecmd->link_mode_masks[0] |
851 		((uint64_t)ecmd->link_mode_masks[1] << 32);
852 	priv->link_speed_capa = 0;
853 	if (sc & MLX5_BITSHIFT(ETHTOOL_LINK_MODE_Autoneg_BIT))
854 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
855 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseT_Full_BIT) |
856 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT)))
857 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
858 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) |
859 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) |
860 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT)))
861 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
862 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) |
863 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT)))
864 		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
865 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) |
866 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) |
867 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) |
868 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT)))
869 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
870 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) |
871 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) |
872 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) |
873 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT)))
874 		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
875 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) |
876 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) |
877 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT)))
878 		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
879 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) |
880 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT)))
881 		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
882 	if (sc & (MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) |
883 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) |
884 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) |
885 		  MLX5_BITSHIFT(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT)))
886 		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
887 	dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
888 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
889 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
890 				  ETH_LINK_SPEED_FIXED);
891 	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
892 		/* Link status changed. */
893 		dev->data->dev_link = dev_link;
894 		return 0;
895 	}
896 	/* Link status is still the same. */
897 	return -1;
898 }
899 
900 /**
901  * DPDK callback to retrieve physical link information.
902  *
903  * @param dev
904  *   Pointer to Ethernet device structure.
905  * @param wait_to_complete
906  *   Wait for request completion (ignored).
907  */
908 int
909 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
910 {
911 	struct utsname utsname;
912 	int ver[3];
913 
914 	if (uname(&utsname) == -1 ||
915 	    sscanf(utsname.release, "%d.%d.%d",
916 		   &ver[0], &ver[1], &ver[2]) != 3 ||
917 	    KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
918 		return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
919 	return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
920 }
921 
922 /**
923  * DPDK callback to change the MTU.
924  *
925  * @param dev
926  *   Pointer to Ethernet device structure.
927  * @param in_mtu
928  *   New MTU.
929  *
930  * @return
931  *   0 on success, negative errno value on failure.
932  */
933 int
934 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
935 {
936 	struct priv *priv = dev->data->dev_private;
937 	uint16_t kern_mtu;
938 	int ret = 0;
939 
940 	if (mlx5_is_secondary())
941 		return -E_RTE_SECONDARY;
942 
943 	priv_lock(priv);
944 	ret = priv_get_mtu(priv, &kern_mtu);
945 	if (ret)
946 		goto out;
947 	/* Set kernel interface MTU first. */
948 	ret = priv_set_mtu(priv, mtu);
949 	if (ret)
950 		goto out;
951 	ret = priv_get_mtu(priv, &kern_mtu);
952 	if (ret)
953 		goto out;
954 	if (kern_mtu == mtu) {
955 		priv->mtu = mtu;
956 		DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
957 	}
958 	priv_unlock(priv);
959 	return 0;
960 out:
961 	ret = errno;
962 	WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
963 	     strerror(ret));
964 	priv_unlock(priv);
965 	assert(ret >= 0);
966 	return -ret;
967 }
968 
969 /**
970  * DPDK callback to get flow control status.
971  *
972  * @param dev
973  *   Pointer to Ethernet device structure.
974  * @param[out] fc_conf
975  *   Flow control output buffer.
976  *
977  * @return
978  *   0 on success, negative errno value on failure.
979  */
980 int
981 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
982 {
983 	struct priv *priv = dev->data->dev_private;
984 	struct ifreq ifr;
985 	struct ethtool_pauseparam ethpause = {
986 		.cmd = ETHTOOL_GPAUSEPARAM
987 	};
988 	int ret;
989 
990 	if (mlx5_is_secondary())
991 		return -E_RTE_SECONDARY;
992 
993 	ifr.ifr_data = (void *)&ethpause;
994 	priv_lock(priv);
995 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
996 		ret = errno;
997 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
998 		     " failed: %s",
999 		     strerror(ret));
1000 		goto out;
1001 	}
1002 
1003 	fc_conf->autoneg = ethpause.autoneg;
1004 	if (ethpause.rx_pause && ethpause.tx_pause)
1005 		fc_conf->mode = RTE_FC_FULL;
1006 	else if (ethpause.rx_pause)
1007 		fc_conf->mode = RTE_FC_RX_PAUSE;
1008 	else if (ethpause.tx_pause)
1009 		fc_conf->mode = RTE_FC_TX_PAUSE;
1010 	else
1011 		fc_conf->mode = RTE_FC_NONE;
1012 	ret = 0;
1013 
1014 out:
1015 	priv_unlock(priv);
1016 	assert(ret >= 0);
1017 	return -ret;
1018 }
1019 
1020 /**
1021  * DPDK callback to modify flow control parameters.
1022  *
1023  * @param dev
1024  *   Pointer to Ethernet device structure.
1025  * @param[in] fc_conf
1026  *   Flow control parameters.
1027  *
1028  * @return
1029  *   0 on success, negative errno value on failure.
1030  */
1031 int
1032 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1033 {
1034 	struct priv *priv = dev->data->dev_private;
1035 	struct ifreq ifr;
1036 	struct ethtool_pauseparam ethpause = {
1037 		.cmd = ETHTOOL_SPAUSEPARAM
1038 	};
1039 	int ret;
1040 
1041 	if (mlx5_is_secondary())
1042 		return -E_RTE_SECONDARY;
1043 
1044 	ifr.ifr_data = (void *)&ethpause;
1045 	ethpause.autoneg = fc_conf->autoneg;
1046 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1047 	    (fc_conf->mode & RTE_FC_RX_PAUSE))
1048 		ethpause.rx_pause = 1;
1049 	else
1050 		ethpause.rx_pause = 0;
1051 
1052 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1053 	    (fc_conf->mode & RTE_FC_TX_PAUSE))
1054 		ethpause.tx_pause = 1;
1055 	else
1056 		ethpause.tx_pause = 0;
1057 
1058 	priv_lock(priv);
1059 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
1060 		ret = errno;
1061 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
1062 		     " failed: %s",
1063 		     strerror(ret));
1064 		goto out;
1065 	}
1066 	ret = 0;
1067 
1068 out:
1069 	priv_unlock(priv);
1070 	assert(ret >= 0);
1071 	return -ret;
1072 }
1073 
1074 /**
1075  * Get PCI information from struct ibv_device.
1076  *
1077  * @param device
1078  *   Pointer to Ethernet device structure.
1079  * @param[out] pci_addr
1080  *   PCI bus address output buffer.
1081  *
1082  * @return
1083  *   0 on success, -1 on failure and errno is set.
1084  */
1085 int
1086 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
1087 			    struct rte_pci_addr *pci_addr)
1088 {
1089 	FILE *file;
1090 	char line[32];
1091 	MKSTR(path, "%s/device/uevent", device->ibdev_path);
1092 
1093 	file = fopen(path, "rb");
1094 	if (file == NULL)
1095 		return -1;
1096 	while (fgets(line, sizeof(line), file) == line) {
1097 		size_t len = strlen(line);
1098 		int ret;
1099 
1100 		/* Truncate long lines. */
1101 		if (len == (sizeof(line) - 1))
1102 			while (line[(len - 1)] != '\n') {
1103 				ret = fgetc(file);
1104 				if (ret == EOF)
1105 					break;
1106 				line[(len - 1)] = ret;
1107 			}
1108 		/* Extract information. */
1109 		if (sscanf(line,
1110 			   "PCI_SLOT_NAME="
1111 			   "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
1112 			   &pci_addr->domain,
1113 			   &pci_addr->bus,
1114 			   &pci_addr->devid,
1115 			   &pci_addr->function) == 4) {
1116 			ret = 0;
1117 			break;
1118 		}
1119 	}
1120 	fclose(file);
1121 	return 0;
1122 }
1123 
1124 /**
1125  * Update the link status.
1126  *
1127  * @param priv
1128  *   Pointer to private structure.
1129  *
1130  * @return
1131  *   Zero if the callback process can be called immediately.
1132  */
1133 static int
1134 priv_link_status_update(struct priv *priv)
1135 {
1136 	struct rte_eth_link *link = &priv->dev->data->dev_link;
1137 
1138 	mlx5_link_update(priv->dev, 0);
1139 	if (((link->link_speed == 0) && link->link_status) ||
1140 		((link->link_speed != 0) && !link->link_status)) {
1141 		/*
1142 		 * Inconsistent status. Event likely occurred before the
1143 		 * kernel netdevice exposes the new status.
1144 		 */
1145 		if (!priv->pending_alarm) {
1146 			priv->pending_alarm = 1;
1147 			rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
1148 					  mlx5_dev_link_status_handler,
1149 					  priv->dev);
1150 		}
1151 		return 1;
1152 	} else if (unlikely(priv->pending_alarm)) {
1153 		/* Link interrupt occurred while alarm is already scheduled. */
1154 		priv->pending_alarm = 0;
1155 		rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev);
1156 	}
1157 	return 0;
1158 }
1159 
1160 /**
1161  * Device status handler.
1162  *
1163  * @param priv
1164  *   Pointer to private structure.
1165  * @param events
1166  *   Pointer to event flags holder.
1167  *
1168  * @return
1169  *   Events bitmap of callback process which can be called immediately.
1170  */
1171 static uint32_t
1172 priv_dev_status_handler(struct priv *priv)
1173 {
1174 	struct ibv_async_event event;
1175 	uint32_t ret = 0;
1176 
1177 	/* Read all message and acknowledge them. */
1178 	for (;;) {
1179 		if (ibv_get_async_event(priv->ctx, &event))
1180 			break;
1181 		if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
1182 			event.event_type == IBV_EVENT_PORT_ERR) &&
1183 			(priv->dev->data->dev_conf.intr_conf.lsc == 1))
1184 			ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
1185 		else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
1186 			priv->dev->data->dev_conf.intr_conf.rmv == 1)
1187 			ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
1188 		else
1189 			DEBUG("event type %d on port %d not handled",
1190 			      event.event_type, event.element.port_num);
1191 		ibv_ack_async_event(&event);
1192 	}
1193 	if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
1194 		if (priv_link_status_update(priv))
1195 			ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC);
1196 	return ret;
1197 }
1198 
1199 /**
1200  * Handle delayed link status event.
1201  *
1202  * @param arg
1203  *   Registered argument.
1204  */
1205 void
1206 mlx5_dev_link_status_handler(void *arg)
1207 {
1208 	struct rte_eth_dev *dev = arg;
1209 	struct priv *priv = dev->data->dev_private;
1210 	int ret;
1211 
1212 	priv_lock(priv);
1213 	assert(priv->pending_alarm == 1);
1214 	priv->pending_alarm = 0;
1215 	ret = priv_link_status_update(priv);
1216 	priv_unlock(priv);
1217 	if (!ret)
1218 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
1219 					      NULL);
1220 }
1221 
1222 /**
1223  * Handle interrupts from the NIC.
1224  *
1225  * @param[in] intr_handle
1226  *   Interrupt handler.
1227  * @param cb_arg
1228  *   Callback argument.
1229  */
1230 void
1231 mlx5_dev_interrupt_handler(void *cb_arg)
1232 {
1233 	struct rte_eth_dev *dev = cb_arg;
1234 	struct priv *priv = dev->data->dev_private;
1235 	uint32_t events;
1236 
1237 	priv_lock(priv);
1238 	events = priv_dev_status_handler(priv);
1239 	priv_unlock(priv);
1240 	if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
1241 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
1242 					      NULL);
1243 	if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
1244 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RMV, NULL,
1245 					      NULL);
1246 }
1247 
1248 /**
1249  * Handle interrupts from the socket.
1250  *
1251  * @param cb_arg
1252  *   Callback argument.
1253  */
1254 static void
1255 mlx5_dev_handler_socket(void *cb_arg)
1256 {
1257 	struct rte_eth_dev *dev = cb_arg;
1258 	struct priv *priv = dev->data->dev_private;
1259 
1260 	priv_lock(priv);
1261 	priv_socket_handle(priv);
1262 	priv_unlock(priv);
1263 }
1264 
1265 /**
1266  * Uninstall interrupt handler.
1267  *
1268  * @param priv
1269  *   Pointer to private structure.
1270  * @param dev
1271  *   Pointer to the rte_eth_dev structure.
1272  */
1273 void
1274 priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
1275 {
1276 	if (dev->data->dev_conf.intr_conf.lsc ||
1277 	    dev->data->dev_conf.intr_conf.rmv)
1278 		rte_intr_callback_unregister(&priv->intr_handle,
1279 					     mlx5_dev_interrupt_handler, dev);
1280 	if (priv->primary_socket)
1281 		rte_intr_callback_unregister(&priv->intr_handle_socket,
1282 					     mlx5_dev_handler_socket, dev);
1283 	if (priv->pending_alarm)
1284 		rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
1285 	priv->pending_alarm = 0;
1286 	priv->intr_handle.fd = 0;
1287 	priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1288 	priv->intr_handle_socket.fd = 0;
1289 	priv->intr_handle_socket.type = RTE_INTR_HANDLE_UNKNOWN;
1290 }
1291 
1292 /**
1293  * Install interrupt handler.
1294  *
1295  * @param priv
1296  *   Pointer to private structure.
1297  * @param dev
1298  *   Pointer to the rte_eth_dev structure.
1299  */
1300 void
1301 priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
1302 {
1303 	int rc, flags;
1304 
1305 	assert(!mlx5_is_secondary());
1306 	assert(priv->ctx->async_fd > 0);
1307 	flags = fcntl(priv->ctx->async_fd, F_GETFL);
1308 	rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1309 	if (rc < 0) {
1310 		INFO("failed to change file descriptor async event queue");
1311 		dev->data->dev_conf.intr_conf.lsc = 0;
1312 		dev->data->dev_conf.intr_conf.rmv = 0;
1313 	}
1314 	if (dev->data->dev_conf.intr_conf.lsc ||
1315 	    dev->data->dev_conf.intr_conf.rmv) {
1316 		priv->intr_handle.fd = priv->ctx->async_fd;
1317 		priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
1318 		rte_intr_callback_register(&priv->intr_handle,
1319 					   mlx5_dev_interrupt_handler, dev);
1320 	}
1321 
1322 	rc = priv_socket_init(priv);
1323 	if (!rc && priv->primary_socket) {
1324 		priv->intr_handle_socket.fd = priv->primary_socket;
1325 		priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
1326 		rte_intr_callback_register(&priv->intr_handle_socket,
1327 					   mlx5_dev_handler_socket, dev);
1328 	}
1329 }
1330 
1331 /**
1332  * Change the link state (UP / DOWN).
1333  *
1334  * @param priv
1335  *   Pointer to private data structure.
1336  * @param dev
1337  *   Pointer to rte_eth_dev structure.
1338  * @param up
1339  *   Nonzero for link up, otherwise link down.
1340  *
1341  * @return
1342  *   0 on success, errno value on failure.
1343  */
1344 static int
1345 priv_dev_set_link(struct priv *priv, struct rte_eth_dev *dev, int up)
1346 {
1347 	int err;
1348 
1349 	if (up) {
1350 		err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
1351 		if (err)
1352 			return err;
1353 		priv_dev_select_tx_function(priv, dev);
1354 		priv_dev_select_rx_function(priv, dev);
1355 	} else {
1356 		err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
1357 		if (err)
1358 			return err;
1359 		dev->rx_pkt_burst = removed_rx_burst;
1360 		dev->tx_pkt_burst = removed_tx_burst;
1361 	}
1362 	return 0;
1363 }
1364 
1365 /**
1366  * DPDK callback to bring the link DOWN.
1367  *
1368  * @param dev
1369  *   Pointer to Ethernet device structure.
1370  *
1371  * @return
1372  *   0 on success, errno value on failure.
1373  */
1374 int
1375 mlx5_set_link_down(struct rte_eth_dev *dev)
1376 {
1377 	struct priv *priv = dev->data->dev_private;
1378 	int err;
1379 
1380 	priv_lock(priv);
1381 	err = priv_dev_set_link(priv, dev, 0);
1382 	priv_unlock(priv);
1383 	return err;
1384 }
1385 
1386 /**
1387  * DPDK callback to bring the link UP.
1388  *
1389  * @param dev
1390  *   Pointer to Ethernet device structure.
1391  *
1392  * @return
1393  *   0 on success, errno value on failure.
1394  */
1395 int
1396 mlx5_set_link_up(struct rte_eth_dev *dev)
1397 {
1398 	struct priv *priv = dev->data->dev_private;
1399 	int err;
1400 
1401 	priv_lock(priv);
1402 	err = priv_dev_set_link(priv, dev, 1);
1403 	priv_unlock(priv);
1404 	return err;
1405 }
1406 
1407 /**
1408  * Configure the TX function to use.
1409  *
1410  * @param priv
1411  *   Pointer to private data structure.
1412  * @param dev
1413  *   Pointer to rte_eth_dev structure.
1414  */
1415 void
1416 priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
1417 {
1418 	assert(priv != NULL);
1419 	assert(dev != NULL);
1420 	dev->tx_pkt_burst = mlx5_tx_burst;
1421 	/* Select appropriate TX function. */
1422 	if (priv->mps == MLX5_MPW_ENHANCED) {
1423 		if (priv_check_vec_tx_support(priv) > 0) {
1424 			if (priv_check_raw_vec_tx_support(priv) > 0)
1425 				dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
1426 			else
1427 				dev->tx_pkt_burst = mlx5_tx_burst_vec;
1428 			DEBUG("selected Enhanced MPW TX vectorized function");
1429 		} else {
1430 			dev->tx_pkt_burst = mlx5_tx_burst_empw;
1431 			DEBUG("selected Enhanced MPW TX function");
1432 		}
1433 	} else if (priv->mps && priv->txq_inline) {
1434 		dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
1435 		DEBUG("selected MPW inline TX function");
1436 	} else if (priv->mps) {
1437 		dev->tx_pkt_burst = mlx5_tx_burst_mpw;
1438 		DEBUG("selected MPW TX function");
1439 	}
1440 }
1441 
1442 /**
1443  * Configure the RX function to use.
1444  *
1445  * @param priv
1446  *   Pointer to private data structure.
1447  * @param dev
1448  *   Pointer to rte_eth_dev structure.
1449  */
1450 void
1451 priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
1452 {
1453 	assert(priv != NULL);
1454 	assert(dev != NULL);
1455 	if (priv_check_vec_rx_support(priv) > 0) {
1456 		dev->rx_pkt_burst = mlx5_rx_burst_vec;
1457 		DEBUG("selected RX vectorized function");
1458 	} else {
1459 		dev->rx_pkt_burst = mlx5_rx_burst;
1460 	}
1461 }
1462