xref: /dpdk/drivers/net/mlx5/mlx5_ethdev.c (revision 6b72aad61f09b870427f4ebcedae525a985bb37a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stddef.h>
35 #include <assert.h>
36 #include <unistd.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <string.h>
40 #include <stdlib.h>
41 #include <errno.h>
42 #include <dirent.h>
43 #include <net/if.h>
44 #include <sys/ioctl.h>
45 #include <sys/socket.h>
46 #include <sys/utsname.h>
47 #include <netinet/in.h>
48 #include <linux/ethtool.h>
49 #include <linux/sockios.h>
50 #include <linux/version.h>
51 #include <fcntl.h>
52 
53 /* DPDK headers don't like -pedantic. */
54 #ifdef PEDANTIC
55 #pragma GCC diagnostic ignored "-Wpedantic"
56 #endif
57 #include <rte_atomic.h>
58 #include <rte_ethdev.h>
59 #include <rte_mbuf.h>
60 #include <rte_common.h>
61 #include <rte_interrupts.h>
62 #include <rte_alarm.h>
63 #include <rte_malloc.h>
64 #ifdef PEDANTIC
65 #pragma GCC diagnostic error "-Wpedantic"
66 #endif
67 
68 #include "mlx5.h"
69 #include "mlx5_rxtx.h"
70 #include "mlx5_utils.h"
71 
72 /* Add defines in case the running kernel is not the same as user headers. */
73 #ifndef ETHTOOL_GLINKSETTINGS
74 struct ethtool_link_settings {
75 	uint32_t cmd;
76 	uint32_t speed;
77 	uint8_t duplex;
78 	uint8_t port;
79 	uint8_t phy_address;
80 	uint8_t autoneg;
81 	uint8_t mdio_support;
82 	uint8_t eth_to_mdix;
83 	uint8_t eth_tp_mdix_ctrl;
84 	int8_t link_mode_masks_nwords;
85 	uint32_t reserved[8];
86 	uint32_t link_mode_masks[];
87 };
88 
89 #define ETHTOOL_GLINKSETTINGS 0x0000004c
90 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
91 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6
92 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
93 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
94 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
95 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
96 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
97 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
98 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
99 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
100 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
101 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
102 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
103 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
104 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
105 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
106 #endif
107 #ifndef HAVE_ETHTOOL_LINK_MODE_25G
108 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
109 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
110 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
111 #endif
112 #ifndef HAVE_ETHTOOL_LINK_MODE_50G
113 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
114 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
115 #endif
116 #ifndef HAVE_ETHTOOL_LINK_MODE_100G
117 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
118 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
119 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
120 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
121 #endif
122 
123 /**
124  * Return private structure associated with an Ethernet device.
125  *
126  * @param dev
127  *   Pointer to Ethernet device structure.
128  *
129  * @return
130  *   Pointer to private structure.
131  */
132 struct priv *
133 mlx5_get_priv(struct rte_eth_dev *dev)
134 {
135 	struct mlx5_secondary_data *sd;
136 
137 	if (!mlx5_is_secondary())
138 		return dev->data->dev_private;
139 	sd = &mlx5_secondary_data[dev->data->port_id];
140 	return sd->data.dev_private;
141 }
142 
143 /**
144  * Check if running as a secondary process.
145  *
146  * @return
147  *   Nonzero if running as a secondary process.
148  */
149 inline int
150 mlx5_is_secondary(void)
151 {
152 	return rte_eal_process_type() != RTE_PROC_PRIMARY;
153 }
154 
155 /**
156  * Get interface name from private structure.
157  *
158  * @param[in] priv
159  *   Pointer to private structure.
160  * @param[out] ifname
161  *   Interface name output buffer.
162  *
163  * @return
164  *   0 on success, -1 on failure and errno is set.
165  */
166 int
167 priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
168 {
169 	DIR *dir;
170 	struct dirent *dent;
171 	unsigned int dev_type = 0;
172 	unsigned int dev_port_prev = ~0u;
173 	char match[IF_NAMESIZE] = "";
174 
175 	{
176 		MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
177 
178 		dir = opendir(path);
179 		if (dir == NULL)
180 			return -1;
181 	}
182 	while ((dent = readdir(dir)) != NULL) {
183 		char *name = dent->d_name;
184 		FILE *file;
185 		unsigned int dev_port;
186 		int r;
187 
188 		if ((name[0] == '.') &&
189 		    ((name[1] == '\0') ||
190 		     ((name[1] == '.') && (name[2] == '\0'))))
191 			continue;
192 
193 		MKSTR(path, "%s/device/net/%s/%s",
194 		      priv->ctx->device->ibdev_path, name,
195 		      (dev_type ? "dev_id" : "dev_port"));
196 
197 		file = fopen(path, "rb");
198 		if (file == NULL) {
199 			if (errno != ENOENT)
200 				continue;
201 			/*
202 			 * Switch to dev_id when dev_port does not exist as
203 			 * is the case with Linux kernel versions < 3.15.
204 			 */
205 try_dev_id:
206 			match[0] = '\0';
207 			if (dev_type)
208 				break;
209 			dev_type = 1;
210 			dev_port_prev = ~0u;
211 			rewinddir(dir);
212 			continue;
213 		}
214 		r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
215 		fclose(file);
216 		if (r != 1)
217 			continue;
218 		/*
219 		 * Switch to dev_id when dev_port returns the same value for
220 		 * all ports. May happen when using a MOFED release older than
221 		 * 3.0 with a Linux kernel >= 3.15.
222 		 */
223 		if (dev_port == dev_port_prev)
224 			goto try_dev_id;
225 		dev_port_prev = dev_port;
226 		if (dev_port == (priv->port - 1u))
227 			snprintf(match, sizeof(match), "%s", name);
228 	}
229 	closedir(dir);
230 	if (match[0] == '\0')
231 		return -1;
232 	strncpy(*ifname, match, sizeof(*ifname));
233 	return 0;
234 }
235 
236 /**
237  * Read from sysfs entry.
238  *
239  * @param[in] priv
240  *   Pointer to private structure.
241  * @param[in] entry
242  *   Entry name relative to sysfs path.
243  * @param[out] buf
244  *   Data output buffer.
245  * @param size
246  *   Buffer size.
247  *
248  * @return
249  *   0 on success, -1 on failure and errno is set.
250  */
251 static int
252 priv_sysfs_read(const struct priv *priv, const char *entry,
253 		char *buf, size_t size)
254 {
255 	char ifname[IF_NAMESIZE];
256 	FILE *file;
257 	int ret;
258 	int err;
259 
260 	if (priv_get_ifname(priv, &ifname))
261 		return -1;
262 
263 	MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
264 	      ifname, entry);
265 
266 	file = fopen(path, "rb");
267 	if (file == NULL)
268 		return -1;
269 	ret = fread(buf, 1, size, file);
270 	err = errno;
271 	if (((size_t)ret < size) && (ferror(file)))
272 		ret = -1;
273 	else
274 		ret = size;
275 	fclose(file);
276 	errno = err;
277 	return ret;
278 }
279 
280 /**
281  * Write to sysfs entry.
282  *
283  * @param[in] priv
284  *   Pointer to private structure.
285  * @param[in] entry
286  *   Entry name relative to sysfs path.
287  * @param[in] buf
288  *   Data buffer.
289  * @param size
290  *   Buffer size.
291  *
292  * @return
293  *   0 on success, -1 on failure and errno is set.
294  */
295 static int
296 priv_sysfs_write(const struct priv *priv, const char *entry,
297 		 char *buf, size_t size)
298 {
299 	char ifname[IF_NAMESIZE];
300 	FILE *file;
301 	int ret;
302 	int err;
303 
304 	if (priv_get_ifname(priv, &ifname))
305 		return -1;
306 
307 	MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
308 	      ifname, entry);
309 
310 	file = fopen(path, "wb");
311 	if (file == NULL)
312 		return -1;
313 	ret = fwrite(buf, 1, size, file);
314 	err = errno;
315 	if (((size_t)ret < size) || (ferror(file)))
316 		ret = -1;
317 	else
318 		ret = size;
319 	fclose(file);
320 	errno = err;
321 	return ret;
322 }
323 
324 /**
325  * Get unsigned long sysfs property.
326  *
327  * @param priv
328  *   Pointer to private structure.
329  * @param[in] name
330  *   Entry name relative to sysfs path.
331  * @param[out] value
332  *   Value output buffer.
333  *
334  * @return
335  *   0 on success, -1 on failure and errno is set.
336  */
337 static int
338 priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
339 {
340 	int ret;
341 	unsigned long value_ret;
342 	char value_str[32];
343 
344 	ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
345 	if (ret == -1) {
346 		DEBUG("cannot read %s value from sysfs: %s",
347 		      name, strerror(errno));
348 		return -1;
349 	}
350 	value_str[ret] = '\0';
351 	errno = 0;
352 	value_ret = strtoul(value_str, NULL, 0);
353 	if (errno) {
354 		DEBUG("invalid %s value `%s': %s", name, value_str,
355 		      strerror(errno));
356 		return -1;
357 	}
358 	*value = value_ret;
359 	return 0;
360 }
361 
362 /**
363  * Set unsigned long sysfs property.
364  *
365  * @param priv
366  *   Pointer to private structure.
367  * @param[in] name
368  *   Entry name relative to sysfs path.
369  * @param value
370  *   Value to set.
371  *
372  * @return
373  *   0 on success, -1 on failure and errno is set.
374  */
375 static int
376 priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
377 {
378 	int ret;
379 	MKSTR(value_str, "%lu", value);
380 
381 	ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
382 	if (ret == -1) {
383 		DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
384 		      name, value_str, value, strerror(errno));
385 		return -1;
386 	}
387 	return 0;
388 }
389 
390 /**
391  * Perform ifreq ioctl() on associated Ethernet device.
392  *
393  * @param[in] priv
394  *   Pointer to private structure.
395  * @param req
396  *   Request number to pass to ioctl().
397  * @param[out] ifr
398  *   Interface request structure output buffer.
399  *
400  * @return
401  *   0 on success, -1 on failure and errno is set.
402  */
403 int
404 priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
405 {
406 	int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
407 	int ret = -1;
408 
409 	if (sock == -1)
410 		return ret;
411 	if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
412 		ret = ioctl(sock, req, ifr);
413 	close(sock);
414 	return ret;
415 }
416 
417 /**
418  * Return the number of active VFs for the current device.
419  *
420  * @param[in] priv
421  *   Pointer to private structure.
422  * @param[out] num_vfs
423  *   Number of active VFs.
424  *
425  * @return
426  *   0 on success, -1 on failure and errno is set.
427  */
428 int
429 priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
430 {
431 	/* The sysfs entry name depends on the operating system. */
432 	const char **name = (const char *[]){
433 		"device/sriov_numvfs",
434 		"device/mlx5_num_vfs",
435 		NULL,
436 	};
437 	int ret;
438 
439 	do {
440 		unsigned long ulong_num_vfs;
441 
442 		ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
443 		if (!ret)
444 			*num_vfs = ulong_num_vfs;
445 	} while (*(++name) && ret);
446 	return ret;
447 }
448 
449 /**
450  * Get device MTU.
451  *
452  * @param priv
453  *   Pointer to private structure.
454  * @param[out] mtu
455  *   MTU value output buffer.
456  *
457  * @return
458  *   0 on success, -1 on failure and errno is set.
459  */
460 int
461 priv_get_mtu(struct priv *priv, uint16_t *mtu)
462 {
463 	unsigned long ulong_mtu;
464 
465 	if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
466 		return -1;
467 	*mtu = ulong_mtu;
468 	return 0;
469 }
470 
471 /**
472  * Set device MTU.
473  *
474  * @param priv
475  *   Pointer to private structure.
476  * @param mtu
477  *   MTU value to set.
478  *
479  * @return
480  *   0 on success, -1 on failure and errno is set.
481  */
482 static int
483 priv_set_mtu(struct priv *priv, uint16_t mtu)
484 {
485 	uint16_t new_mtu;
486 
487 	if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
488 	    priv_get_mtu(priv, &new_mtu))
489 		return -1;
490 	if (new_mtu == mtu)
491 		return 0;
492 	errno = EINVAL;
493 	return -1;
494 }
495 
496 /**
497  * Set device flags.
498  *
499  * @param priv
500  *   Pointer to private structure.
501  * @param keep
502  *   Bitmask for flags that must remain untouched.
503  * @param flags
504  *   Bitmask for flags to modify.
505  *
506  * @return
507  *   0 on success, -1 on failure and errno is set.
508  */
509 int
510 priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
511 {
512 	unsigned long tmp;
513 
514 	if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
515 		return -1;
516 	tmp &= keep;
517 	tmp |= (flags & (~keep));
518 	return priv_set_sysfs_ulong(priv, "flags", tmp);
519 }
520 
521 /**
522  * Ethernet device configuration.
523  *
524  * Prepare the driver for a given number of TX and RX queues.
525  *
526  * @param dev
527  *   Pointer to Ethernet device structure.
528  *
529  * @return
530  *   0 on success, errno value on failure.
531  */
532 static int
533 dev_configure(struct rte_eth_dev *dev)
534 {
535 	struct priv *priv = dev->data->dev_private;
536 	unsigned int rxqs_n = dev->data->nb_rx_queues;
537 	unsigned int txqs_n = dev->data->nb_tx_queues;
538 	unsigned int i;
539 	unsigned int j;
540 	unsigned int reta_idx_n;
541 
542 	priv->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
543 	priv->rxqs = (void *)dev->data->rx_queues;
544 	priv->txqs = (void *)dev->data->tx_queues;
545 	if (txqs_n != priv->txqs_n) {
546 		INFO("%p: TX queues number update: %u -> %u",
547 		     (void *)dev, priv->txqs_n, txqs_n);
548 		priv->txqs_n = txqs_n;
549 	}
550 	if (rxqs_n > priv->ind_table_max_size) {
551 		ERROR("cannot handle this many RX queues (%u)", rxqs_n);
552 		return EINVAL;
553 	}
554 	if (rxqs_n == priv->rxqs_n)
555 		return 0;
556 	INFO("%p: RX queues number update: %u -> %u",
557 	     (void *)dev, priv->rxqs_n, rxqs_n);
558 	priv->rxqs_n = rxqs_n;
559 	/* If the requested number of RX queues is not a power of two, use the
560 	 * maximum indirection table size for better balancing.
561 	 * The result is always rounded to the next power of two. */
562 	reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
563 				     priv->ind_table_max_size :
564 				     rxqs_n));
565 	if (priv_rss_reta_index_resize(priv, reta_idx_n))
566 		return ENOMEM;
567 	/* When the number of RX queues is not a power of two, the remaining
568 	 * table entries are padded with reused WQs and hashes are not spread
569 	 * uniformly. */
570 	for (i = 0, j = 0; (i != reta_idx_n); ++i) {
571 		(*priv->reta_idx)[i] = j;
572 		if (++j == rxqs_n)
573 			j = 0;
574 	}
575 	return 0;
576 }
577 
578 /**
579  * DPDK callback for Ethernet device configuration.
580  *
581  * @param dev
582  *   Pointer to Ethernet device structure.
583  *
584  * @return
585  *   0 on success, negative errno value on failure.
586  */
587 int
588 mlx5_dev_configure(struct rte_eth_dev *dev)
589 {
590 	struct priv *priv = dev->data->dev_private;
591 	int ret;
592 
593 	if (mlx5_is_secondary())
594 		return -E_RTE_SECONDARY;
595 
596 	priv_lock(priv);
597 	ret = dev_configure(dev);
598 	assert(ret >= 0);
599 	priv_unlock(priv);
600 	return -ret;
601 }
602 
603 /**
604  * DPDK callback to get information about the device.
605  *
606  * @param dev
607  *   Pointer to Ethernet device structure.
608  * @param[out] info
609  *   Info structure output buffer.
610  */
611 void
612 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
613 {
614 	struct priv *priv = mlx5_get_priv(dev);
615 	unsigned int max;
616 	char ifname[IF_NAMESIZE];
617 
618 	info->pci_dev = RTE_DEV_TO_PCI(dev->device);
619 
620 	priv_lock(priv);
621 	/* FIXME: we should ask the device for these values. */
622 	info->min_rx_bufsize = 32;
623 	info->max_rx_pktlen = 65536;
624 	/*
625 	 * Since we need one CQ per QP, the limit is the minimum number
626 	 * between the two values.
627 	 */
628 	max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
629 	       priv->device_attr.max_qp : priv->device_attr.max_cq);
630 	/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
631 	if (max >= 65535)
632 		max = 65535;
633 	info->max_rx_queues = max;
634 	info->max_tx_queues = max;
635 	info->max_mac_addrs = RTE_DIM(priv->mac);
636 	info->rx_offload_capa =
637 		(priv->hw_csum ?
638 		 (DEV_RX_OFFLOAD_IPV4_CKSUM |
639 		  DEV_RX_OFFLOAD_UDP_CKSUM |
640 		  DEV_RX_OFFLOAD_TCP_CKSUM) :
641 		 0) |
642 		(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0);
643 	if (!priv->mps)
644 		info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
645 	if (priv->hw_csum)
646 		info->tx_offload_capa |=
647 			(DEV_TX_OFFLOAD_IPV4_CKSUM |
648 			 DEV_TX_OFFLOAD_UDP_CKSUM |
649 			 DEV_TX_OFFLOAD_TCP_CKSUM);
650 	if (priv_get_ifname(priv, &ifname) == 0)
651 		info->if_index = if_nametoindex(ifname);
652 	/* FIXME: RETA update/query API expects the callee to know the size of
653 	 * the indirection table, for this PMD the size varies depending on
654 	 * the number of RX queues, it becomes impossible to find the correct
655 	 * size if it is not fixed.
656 	 * The API should be updated to solve this problem. */
657 	info->reta_size = priv->ind_table_max_size;
658 	info->hash_key_size = ((*priv->rss_conf) ?
659 			       (*priv->rss_conf)[0]->rss_key_len :
660 			       0);
661 	info->speed_capa = priv->link_speed_capa;
662 	priv_unlock(priv);
663 }
664 
665 const uint32_t *
666 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
667 {
668 	static const uint32_t ptypes[] = {
669 		/* refers to rxq_cq_to_pkt_type() */
670 		RTE_PTYPE_L3_IPV4,
671 		RTE_PTYPE_L3_IPV6,
672 		RTE_PTYPE_INNER_L3_IPV4,
673 		RTE_PTYPE_INNER_L3_IPV6,
674 		RTE_PTYPE_UNKNOWN
675 
676 	};
677 
678 	if (dev->rx_pkt_burst == mlx5_rx_burst)
679 		return ptypes;
680 	return NULL;
681 }
682 
683 /**
684  * DPDK callback to retrieve physical link information.
685  *
686  * @param dev
687  *   Pointer to Ethernet device structure.
688  * @param wait_to_complete
689  *   Wait for request completion (ignored).
690  */
691 static int
692 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
693 {
694 	struct priv *priv = mlx5_get_priv(dev);
695 	struct ethtool_cmd edata = {
696 		.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
697 	};
698 	struct ifreq ifr;
699 	struct rte_eth_link dev_link;
700 	int link_speed = 0;
701 
702 	/* priv_lock() is not taken to allow concurrent calls. */
703 
704 	(void)wait_to_complete;
705 	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
706 		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
707 		return -1;
708 	}
709 	memset(&dev_link, 0, sizeof(dev_link));
710 	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
711 				(ifr.ifr_flags & IFF_RUNNING));
712 	ifr.ifr_data = (void *)&edata;
713 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
714 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
715 		     strerror(errno));
716 		return -1;
717 	}
718 	link_speed = ethtool_cmd_speed(&edata);
719 	if (link_speed == -1)
720 		dev_link.link_speed = 0;
721 	else
722 		dev_link.link_speed = link_speed;
723 	priv->link_speed_capa = 0;
724 	if (edata.supported & SUPPORTED_Autoneg)
725 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
726 	if (edata.supported & (SUPPORTED_1000baseT_Full |
727 			       SUPPORTED_1000baseKX_Full))
728 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
729 	if (edata.supported & SUPPORTED_10000baseKR_Full)
730 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
731 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
732 			       SUPPORTED_40000baseCR4_Full |
733 			       SUPPORTED_40000baseSR4_Full |
734 			       SUPPORTED_40000baseLR4_Full))
735 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
736 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
737 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
738 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
739 			ETH_LINK_SPEED_FIXED);
740 	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
741 		/* Link status changed. */
742 		dev->data->dev_link = dev_link;
743 		return 0;
744 	}
745 	/* Link status is still the same. */
746 	return -1;
747 }
748 
749 /**
750  * Retrieve physical link information (unlocked version using new ioctl).
751  *
752  * @param dev
753  *   Pointer to Ethernet device structure.
754  * @param wait_to_complete
755  *   Wait for request completion (ignored).
756  */
757 static int
758 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
759 {
760 	struct priv *priv = mlx5_get_priv(dev);
761 	struct ethtool_link_settings edata = {
762 		.cmd = ETHTOOL_GLINKSETTINGS,
763 	};
764 	struct ifreq ifr;
765 	struct rte_eth_link dev_link;
766 	uint64_t sc;
767 
768 	(void)wait_to_complete;
769 	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
770 		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
771 		return -1;
772 	}
773 	memset(&dev_link, 0, sizeof(dev_link));
774 	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
775 				(ifr.ifr_flags & IFF_RUNNING));
776 	ifr.ifr_data = (void *)&edata;
777 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
778 		DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
779 		      strerror(errno));
780 		return -1;
781 	}
782 	dev_link.link_speed = edata.speed;
783 	sc = edata.link_mode_masks[0] |
784 		((uint64_t)edata.link_mode_masks[1] << 32);
785 	priv->link_speed_capa = 0;
786 	if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
787 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
788 	if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT |
789 		  ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))
790 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
791 	if (sc & (ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT |
792 		  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT |
793 		  ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))
794 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
795 	if (sc & (ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT |
796 		  ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))
797 		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
798 	if (sc & (ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT |
799 		  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT |
800 		  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT |
801 		  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))
802 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
803 	if (sc & (ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT |
804 		  ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT |
805 		  ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT |
806 		  ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
807 		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
808 	if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT |
809 		  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT |
810 		  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
811 		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
812 	if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT |
813 		  ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
814 		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
815 	if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT |
816 		  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT |
817 		  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
818 		  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
819 		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
820 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
821 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
822 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
823 				  ETH_LINK_SPEED_FIXED);
824 	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
825 		/* Link status changed. */
826 		dev->data->dev_link = dev_link;
827 		return 0;
828 	}
829 	/* Link status is still the same. */
830 	return -1;
831 }
832 
833 /**
834  * DPDK callback to retrieve physical link information.
835  *
836  * @param dev
837  *   Pointer to Ethernet device structure.
838  * @param wait_to_complete
839  *   Wait for request completion (ignored).
840  */
841 int
842 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
843 {
844 	struct utsname utsname;
845 	int ver[3];
846 
847 	if (uname(&utsname) == -1 ||
848 	    sscanf(utsname.release, "%d.%d.%d",
849 		   &ver[0], &ver[1], &ver[2]) != 3 ||
850 	    KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
851 		return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
852 	return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
853 }
854 
855 /**
856  * DPDK callback to change the MTU.
857  *
858  * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
859  * received). Use this as a hint to enable/disable scattered packets support
860  * and improve performance when not needed.
861  * Since failure is not an option, reconfiguring queues on the fly is not
862  * recommended.
863  *
864  * @param dev
865  *   Pointer to Ethernet device structure.
866  * @param in_mtu
867  *   New MTU.
868  *
869  * @return
870  *   0 on success, negative errno value on failure.
871  */
872 int
873 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
874 {
875 	struct priv *priv = dev->data->dev_private;
876 	int ret = 0;
877 	unsigned int i;
878 	uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
879 		mlx5_rx_burst;
880 	unsigned int max_frame_len;
881 	int rehash;
882 	int restart = priv->started;
883 
884 	if (mlx5_is_secondary())
885 		return -E_RTE_SECONDARY;
886 
887 	priv_lock(priv);
888 	/* Set kernel interface MTU first. */
889 	if (priv_set_mtu(priv, mtu)) {
890 		ret = errno;
891 		WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
892 		     strerror(ret));
893 		goto out;
894 	} else
895 		DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
896 	/* Temporarily replace RX handler with a fake one, assuming it has not
897 	 * been copied elsewhere. */
898 	dev->rx_pkt_burst = removed_rx_burst;
899 	/* Make sure everyone has left mlx5_rx_burst() and uses
900 	 * removed_rx_burst() instead. */
901 	rte_wmb();
902 	usleep(1000);
903 	/* MTU does not include header and CRC. */
904 	max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
905 	/* Check if at least one queue is going to need a SGE update. */
906 	for (i = 0; i != priv->rxqs_n; ++i) {
907 		struct rxq *rxq = (*priv->rxqs)[i];
908 		unsigned int mb_len;
909 		unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
910 		unsigned int sges_n;
911 
912 		if (rxq == NULL)
913 			continue;
914 		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
915 		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
916 		/*
917 		 * Determine the number of SGEs needed for a full packet
918 		 * and round it to the next power of two.
919 		 */
920 		sges_n = log2above((size / mb_len) + !!(size % mb_len));
921 		if (sges_n != rxq->sges_n)
922 			break;
923 	}
924 	/*
925 	 * If all queues have the right number of SGEs, a simple rehash
926 	 * of their buffers is enough, otherwise SGE information can only
927 	 * be updated in a queue by recreating it. All resources that depend
928 	 * on queues (flows, indirection tables) must be recreated as well in
929 	 * that case.
930 	 */
931 	rehash = (i == priv->rxqs_n);
932 	if (!rehash) {
933 		/* Clean up everything as with mlx5_dev_stop(). */
934 		priv_special_flow_disable_all(priv);
935 		priv_mac_addrs_disable(priv);
936 		priv_destroy_hash_rxqs(priv);
937 		priv_fdir_disable(priv);
938 		priv_dev_interrupt_handler_uninstall(priv, dev);
939 	}
940 recover:
941 	/* Reconfigure each RX queue. */
942 	for (i = 0; (i != priv->rxqs_n); ++i) {
943 		struct rxq *rxq = (*priv->rxqs)[i];
944 		struct rxq_ctrl *rxq_ctrl =
945 			container_of(rxq, struct rxq_ctrl, rxq);
946 		int sp;
947 		unsigned int mb_len;
948 		unsigned int tmp;
949 
950 		if (rxq == NULL)
951 			continue;
952 		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
953 		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
954 		/* Toggle scattered support (sp) if necessary. */
955 		sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
956 		/* Provide new values to rxq_setup(). */
957 		dev->data->dev_conf.rxmode.jumbo_frame = sp;
958 		dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
959 		if (rehash)
960 			ret = rxq_rehash(dev, rxq_ctrl);
961 		else
962 			ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
963 					     rxq_ctrl->socket, NULL, rxq->mp);
964 		if (!ret)
965 			continue;
966 		/* Attempt to roll back in case of error. */
967 		tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
968 		if (max_frame_len != tmp) {
969 			max_frame_len = tmp;
970 			goto recover;
971 		}
972 		/* Double fault, disable RX. */
973 		break;
974 	}
975 	/*
976 	 * Use a safe RX burst function in case of error, otherwise mimic
977 	 * mlx5_dev_start().
978 	 */
979 	if (ret) {
980 		ERROR("unable to reconfigure RX queues, RX disabled");
981 		rx_func = removed_rx_burst;
982 	} else if (restart &&
983 		 !rehash &&
984 		 !priv_create_hash_rxqs(priv) &&
985 		 !priv_rehash_flows(priv)) {
986 		if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
987 			priv_fdir_enable(priv);
988 		priv_dev_interrupt_handler_install(priv, dev);
989 	}
990 	priv->mtu = mtu;
991 	/* Burst functions can now be called again. */
992 	rte_wmb();
993 	dev->rx_pkt_burst = rx_func;
994 out:
995 	priv_unlock(priv);
996 	assert(ret >= 0);
997 	return -ret;
998 }
999 
1000 /**
1001  * DPDK callback to get flow control status.
1002  *
1003  * @param dev
1004  *   Pointer to Ethernet device structure.
1005  * @param[out] fc_conf
1006  *   Flow control output buffer.
1007  *
1008  * @return
1009  *   0 on success, negative errno value on failure.
1010  */
1011 int
1012 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1013 {
1014 	struct priv *priv = dev->data->dev_private;
1015 	struct ifreq ifr;
1016 	struct ethtool_pauseparam ethpause = {
1017 		.cmd = ETHTOOL_GPAUSEPARAM
1018 	};
1019 	int ret;
1020 
1021 	if (mlx5_is_secondary())
1022 		return -E_RTE_SECONDARY;
1023 
1024 	ifr.ifr_data = (void *)&ethpause;
1025 	priv_lock(priv);
1026 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
1027 		ret = errno;
1028 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
1029 		     " failed: %s",
1030 		     strerror(ret));
1031 		goto out;
1032 	}
1033 
1034 	fc_conf->autoneg = ethpause.autoneg;
1035 	if (ethpause.rx_pause && ethpause.tx_pause)
1036 		fc_conf->mode = RTE_FC_FULL;
1037 	else if (ethpause.rx_pause)
1038 		fc_conf->mode = RTE_FC_RX_PAUSE;
1039 	else if (ethpause.tx_pause)
1040 		fc_conf->mode = RTE_FC_TX_PAUSE;
1041 	else
1042 		fc_conf->mode = RTE_FC_NONE;
1043 	ret = 0;
1044 
1045 out:
1046 	priv_unlock(priv);
1047 	assert(ret >= 0);
1048 	return -ret;
1049 }
1050 
1051 /**
1052  * DPDK callback to modify flow control parameters.
1053  *
1054  * @param dev
1055  *   Pointer to Ethernet device structure.
1056  * @param[in] fc_conf
1057  *   Flow control parameters.
1058  *
1059  * @return
1060  *   0 on success, negative errno value on failure.
1061  */
1062 int
1063 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1064 {
1065 	struct priv *priv = dev->data->dev_private;
1066 	struct ifreq ifr;
1067 	struct ethtool_pauseparam ethpause = {
1068 		.cmd = ETHTOOL_SPAUSEPARAM
1069 	};
1070 	int ret;
1071 
1072 	if (mlx5_is_secondary())
1073 		return -E_RTE_SECONDARY;
1074 
1075 	ifr.ifr_data = (void *)&ethpause;
1076 	ethpause.autoneg = fc_conf->autoneg;
1077 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1078 	    (fc_conf->mode & RTE_FC_RX_PAUSE))
1079 		ethpause.rx_pause = 1;
1080 	else
1081 		ethpause.rx_pause = 0;
1082 
1083 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1084 	    (fc_conf->mode & RTE_FC_TX_PAUSE))
1085 		ethpause.tx_pause = 1;
1086 	else
1087 		ethpause.tx_pause = 0;
1088 
1089 	priv_lock(priv);
1090 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
1091 		ret = errno;
1092 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
1093 		     " failed: %s",
1094 		     strerror(ret));
1095 		goto out;
1096 	}
1097 	ret = 0;
1098 
1099 out:
1100 	priv_unlock(priv);
1101 	assert(ret >= 0);
1102 	return -ret;
1103 }
1104 
1105 /**
1106  * Get PCI information from struct ibv_device.
1107  *
1108  * @param device
1109  *   Pointer to Ethernet device structure.
1110  * @param[out] pci_addr
1111  *   PCI bus address output buffer.
1112  *
1113  * @return
1114  *   0 on success, -1 on failure and errno is set.
1115  */
1116 int
1117 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
1118 			    struct rte_pci_addr *pci_addr)
1119 {
1120 	FILE *file;
1121 	char line[32];
1122 	MKSTR(path, "%s/device/uevent", device->ibdev_path);
1123 
1124 	file = fopen(path, "rb");
1125 	if (file == NULL)
1126 		return -1;
1127 	while (fgets(line, sizeof(line), file) == line) {
1128 		size_t len = strlen(line);
1129 		int ret;
1130 
1131 		/* Truncate long lines. */
1132 		if (len == (sizeof(line) - 1))
1133 			while (line[(len - 1)] != '\n') {
1134 				ret = fgetc(file);
1135 				if (ret == EOF)
1136 					break;
1137 				line[(len - 1)] = ret;
1138 			}
1139 		/* Extract information. */
1140 		if (sscanf(line,
1141 			   "PCI_SLOT_NAME="
1142 			   "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
1143 			   &pci_addr->domain,
1144 			   &pci_addr->bus,
1145 			   &pci_addr->devid,
1146 			   &pci_addr->function) == 4) {
1147 			ret = 0;
1148 			break;
1149 		}
1150 	}
1151 	fclose(file);
1152 	return 0;
1153 }
1154 
1155 /**
1156  * Link status handler.
1157  *
1158  * @param priv
1159  *   Pointer to private structure.
1160  * @param dev
1161  *   Pointer to the rte_eth_dev structure.
1162  *
1163  * @return
1164  *   Nonzero if the callback process can be called immediately.
1165  */
1166 static int
1167 priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
1168 {
1169 	struct ibv_async_event event;
1170 	struct rte_eth_link *link = &dev->data->dev_link;
1171 	int ret = 0;
1172 
1173 	/* Read all message and acknowledge them. */
1174 	for (;;) {
1175 		if (ibv_get_async_event(priv->ctx, &event))
1176 			break;
1177 
1178 		if (event.event_type != IBV_EVENT_PORT_ACTIVE &&
1179 		    event.event_type != IBV_EVENT_PORT_ERR)
1180 			DEBUG("event type %d on port %d not handled",
1181 			      event.event_type, event.element.port_num);
1182 		ibv_ack_async_event(&event);
1183 	}
1184 	mlx5_link_update(dev, 0);
1185 	if (((link->link_speed == 0) && link->link_status) ||
1186 	    ((link->link_speed != 0) && !link->link_status)) {
1187 		if (!priv->pending_alarm) {
1188 			/* Inconsistent status, check again later. */
1189 			priv->pending_alarm = 1;
1190 			rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
1191 					  mlx5_dev_link_status_handler,
1192 					  dev);
1193 		}
1194 	} else {
1195 		ret = 1;
1196 	}
1197 	return ret;
1198 }
1199 
1200 /**
1201  * Handle delayed link status event.
1202  *
1203  * @param arg
1204  *   Registered argument.
1205  */
1206 void
1207 mlx5_dev_link_status_handler(void *arg)
1208 {
1209 	struct rte_eth_dev *dev = arg;
1210 	struct priv *priv = dev->data->dev_private;
1211 	int ret;
1212 
1213 	priv_lock(priv);
1214 	assert(priv->pending_alarm == 1);
1215 	priv->pending_alarm = 0;
1216 	ret = priv_dev_link_status_handler(priv, dev);
1217 	priv_unlock(priv);
1218 	if (ret)
1219 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1220 }
1221 
1222 /**
1223  * Handle interrupts from the NIC.
1224  *
1225  * @param[in] intr_handle
1226  *   Interrupt handler.
1227  * @param cb_arg
1228  *   Callback argument.
1229  */
1230 void
1231 mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
1232 {
1233 	struct rte_eth_dev *dev = cb_arg;
1234 	struct priv *priv = dev->data->dev_private;
1235 	int ret;
1236 
1237 	(void)intr_handle;
1238 	priv_lock(priv);
1239 	ret = priv_dev_link_status_handler(priv, dev);
1240 	priv_unlock(priv);
1241 	if (ret)
1242 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1243 }
1244 
1245 /**
1246  * Uninstall interrupt handler.
1247  *
1248  * @param priv
1249  *   Pointer to private structure.
1250  * @param dev
1251  *   Pointer to the rte_eth_dev structure.
1252  */
1253 void
1254 priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
1255 {
1256 	if (!dev->data->dev_conf.intr_conf.lsc)
1257 		return;
1258 	rte_intr_callback_unregister(&priv->intr_handle,
1259 				     mlx5_dev_interrupt_handler,
1260 				     dev);
1261 	if (priv->pending_alarm)
1262 		rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
1263 	priv->pending_alarm = 0;
1264 	priv->intr_handle.fd = 0;
1265 	priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1266 }
1267 
1268 /**
1269  * Install interrupt handler.
1270  *
1271  * @param priv
1272  *   Pointer to private structure.
1273  * @param dev
1274  *   Pointer to the rte_eth_dev structure.
1275  */
1276 void
1277 priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
1278 {
1279 	int rc, flags;
1280 
1281 	if (!dev->data->dev_conf.intr_conf.lsc)
1282 		return;
1283 	assert(priv->ctx->async_fd > 0);
1284 	flags = fcntl(priv->ctx->async_fd, F_GETFL);
1285 	rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1286 	if (rc < 0) {
1287 		INFO("failed to change file descriptor async event queue");
1288 		dev->data->dev_conf.intr_conf.lsc = 0;
1289 	} else {
1290 		priv->intr_handle.fd = priv->ctx->async_fd;
1291 		priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
1292 		rte_intr_callback_register(&priv->intr_handle,
1293 					   mlx5_dev_interrupt_handler,
1294 					   dev);
1295 	}
1296 }
1297 
1298 /**
1299  * Change the link state (UP / DOWN).
1300  *
1301  * @param priv
1302  *   Pointer to Ethernet device structure.
1303  * @param up
1304  *   Nonzero for link up, otherwise link down.
1305  *
1306  * @return
1307  *   0 on success, errno value on failure.
1308  */
1309 static int
1310 priv_set_link(struct priv *priv, int up)
1311 {
1312 	struct rte_eth_dev *dev = priv->dev;
1313 	int err;
1314 
1315 	if (up) {
1316 		err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
1317 		if (err)
1318 			return err;
1319 		priv_select_tx_function(priv);
1320 		priv_select_rx_function(priv);
1321 	} else {
1322 		err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
1323 		if (err)
1324 			return err;
1325 		dev->rx_pkt_burst = removed_rx_burst;
1326 		dev->tx_pkt_burst = removed_tx_burst;
1327 	}
1328 	return 0;
1329 }
1330 
1331 /**
1332  * DPDK callback to bring the link DOWN.
1333  *
1334  * @param dev
1335  *   Pointer to Ethernet device structure.
1336  *
1337  * @return
1338  *   0 on success, errno value on failure.
1339  */
1340 int
1341 mlx5_set_link_down(struct rte_eth_dev *dev)
1342 {
1343 	struct priv *priv = dev->data->dev_private;
1344 	int err;
1345 
1346 	priv_lock(priv);
1347 	err = priv_set_link(priv, 0);
1348 	priv_unlock(priv);
1349 	return err;
1350 }
1351 
1352 /**
1353  * DPDK callback to bring the link UP.
1354  *
1355  * @param dev
1356  *   Pointer to Ethernet device structure.
1357  *
1358  * @return
1359  *   0 on success, errno value on failure.
1360  */
1361 int
1362 mlx5_set_link_up(struct rte_eth_dev *dev)
1363 {
1364 	struct priv *priv = dev->data->dev_private;
1365 	int err;
1366 
1367 	priv_lock(priv);
1368 	err = priv_set_link(priv, 1);
1369 	priv_unlock(priv);
1370 	return err;
1371 }
1372 
1373 /**
1374  * Configure secondary process queues from a private data pointer (primary
1375  * or secondary) and update burst callbacks. Can take place only once.
1376  *
1377  * All queues must have been previously created by the primary process to
1378  * avoid undefined behavior.
1379  *
1380  * @param priv
1381  *   Private data pointer from either primary or secondary process.
1382  *
1383  * @return
1384  *   Private data pointer from secondary process, NULL in case of error.
1385  */
1386 struct priv *
1387 mlx5_secondary_data_setup(struct priv *priv)
1388 {
1389 	unsigned int port_id = 0;
1390 	struct mlx5_secondary_data *sd;
1391 	void **tx_queues;
1392 	void **rx_queues;
1393 	unsigned int nb_tx_queues;
1394 	unsigned int nb_rx_queues;
1395 	unsigned int i;
1396 
1397 	/* priv must be valid at this point. */
1398 	assert(priv != NULL);
1399 	/* priv->dev must also be valid but may point to local memory from
1400 	 * another process, possibly with the same address and must not
1401 	 * be dereferenced yet. */
1402 	assert(priv->dev != NULL);
1403 	/* Determine port ID by finding out where priv comes from. */
1404 	while (1) {
1405 		sd = &mlx5_secondary_data[port_id];
1406 		rte_spinlock_lock(&sd->lock);
1407 		/* Primary process? */
1408 		if (sd->primary_priv == priv)
1409 			break;
1410 		/* Secondary process? */
1411 		if (sd->data.dev_private == priv)
1412 			break;
1413 		rte_spinlock_unlock(&sd->lock);
1414 		if (++port_id == RTE_DIM(mlx5_secondary_data))
1415 			port_id = 0;
1416 	}
1417 	/* Switch to secondary private structure. If private data has already
1418 	 * been updated by another thread, there is nothing else to do. */
1419 	priv = sd->data.dev_private;
1420 	if (priv->dev->data == &sd->data)
1421 		goto end;
1422 	/* Sanity checks. Secondary private structure is supposed to point
1423 	 * to local eth_dev, itself still pointing to the shared device data
1424 	 * structure allocated by the primary process. */
1425 	assert(sd->shared_dev_data != &sd->data);
1426 	assert(sd->data.nb_tx_queues == 0);
1427 	assert(sd->data.tx_queues == NULL);
1428 	assert(sd->data.nb_rx_queues == 0);
1429 	assert(sd->data.rx_queues == NULL);
1430 	assert(priv != sd->primary_priv);
1431 	assert(priv->dev->data == sd->shared_dev_data);
1432 	assert(priv->txqs_n == 0);
1433 	assert(priv->txqs == NULL);
1434 	assert(priv->rxqs_n == 0);
1435 	assert(priv->rxqs == NULL);
1436 	nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
1437 	nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
1438 	/* Allocate local storage for queues. */
1439 	tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
1440 				sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
1441 				RTE_CACHE_LINE_SIZE);
1442 	rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
1443 				sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
1444 				RTE_CACHE_LINE_SIZE);
1445 	if (tx_queues == NULL || rx_queues == NULL)
1446 		goto error;
1447 	/* Lock to prevent control operations during setup. */
1448 	priv_lock(priv);
1449 	/* TX queues. */
1450 	for (i = 0; i != nb_tx_queues; ++i) {
1451 		struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
1452 		struct txq_ctrl *primary_txq_ctrl;
1453 		struct txq_ctrl *txq_ctrl;
1454 
1455 		if (primary_txq == NULL)
1456 			continue;
1457 		primary_txq_ctrl = container_of(primary_txq,
1458 						struct txq_ctrl, txq);
1459 		txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl) +
1460 					     (1 << primary_txq->elts_n) *
1461 					     sizeof(struct rte_mbuf *), 0,
1462 					     primary_txq_ctrl->socket);
1463 		if (txq_ctrl != NULL) {
1464 			if (txq_ctrl_setup(priv->dev,
1465 					   txq_ctrl,
1466 					   1 << primary_txq->elts_n,
1467 					   primary_txq_ctrl->socket,
1468 					   NULL) == 0) {
1469 				txq_ctrl->txq.stats.idx =
1470 					primary_txq->stats.idx;
1471 				tx_queues[i] = &txq_ctrl->txq;
1472 				continue;
1473 			}
1474 			rte_free(txq_ctrl);
1475 		}
1476 		while (i) {
1477 			txq_ctrl = tx_queues[--i];
1478 			txq_cleanup(txq_ctrl);
1479 			rte_free(txq_ctrl);
1480 		}
1481 		goto error;
1482 	}
1483 	/* RX queues. */
1484 	for (i = 0; i != nb_rx_queues; ++i) {
1485 		struct rxq_ctrl *primary_rxq =
1486 			container_of((*sd->primary_priv->rxqs)[i],
1487 				     struct rxq_ctrl, rxq);
1488 
1489 		if (primary_rxq == NULL)
1490 			continue;
1491 		/* Not supported yet. */
1492 		rx_queues[i] = NULL;
1493 	}
1494 	/* Update everything. */
1495 	priv->txqs = (void *)tx_queues;
1496 	priv->txqs_n = nb_tx_queues;
1497 	priv->rxqs = (void *)rx_queues;
1498 	priv->rxqs_n = nb_rx_queues;
1499 	sd->data.rx_queues = rx_queues;
1500 	sd->data.tx_queues = tx_queues;
1501 	sd->data.nb_rx_queues = nb_rx_queues;
1502 	sd->data.nb_tx_queues = nb_tx_queues;
1503 	sd->data.dev_link = sd->shared_dev_data->dev_link;
1504 	sd->data.mtu = sd->shared_dev_data->mtu;
1505 	memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
1506 	       sizeof(sd->data.rx_queue_state));
1507 	memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
1508 	       sizeof(sd->data.tx_queue_state));
1509 	sd->data.dev_flags = sd->shared_dev_data->dev_flags;
1510 	/* Use local data from now on. */
1511 	rte_mb();
1512 	priv->dev->data = &sd->data;
1513 	rte_mb();
1514 	priv_select_tx_function(priv);
1515 	priv_select_rx_function(priv);
1516 	priv_unlock(priv);
1517 end:
1518 	/* More sanity checks. */
1519 	assert(priv->dev->data == &sd->data);
1520 	rte_spinlock_unlock(&sd->lock);
1521 	return priv;
1522 error:
1523 	priv_unlock(priv);
1524 	rte_free(tx_queues);
1525 	rte_free(rx_queues);
1526 	rte_spinlock_unlock(&sd->lock);
1527 	return NULL;
1528 }
1529 
1530 /**
1531  * Configure the TX function to use.
1532  *
1533  * @param priv
1534  *   Pointer to private structure.
1535  */
1536 void
1537 priv_select_tx_function(struct priv *priv)
1538 {
1539 	priv->dev->tx_pkt_burst = mlx5_tx_burst;
1540 	/* Select appropriate TX function. */
1541 	if (priv->mps && priv->txq_inline) {
1542 		priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
1543 		DEBUG("selected MPW inline TX function");
1544 	} else if (priv->mps) {
1545 		priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
1546 		DEBUG("selected MPW TX function");
1547 	}
1548 }
1549 
1550 /**
1551  * Configure the RX function to use.
1552  *
1553  * @param priv
1554  *   Pointer to private structure.
1555  */
1556 void
1557 priv_select_rx_function(struct priv *priv)
1558 {
1559 	priv->dev->rx_pkt_burst = mlx5_rx_burst;
1560 }
1561