xref: /dpdk/drivers/net/mlx5/mlx5_ethdev.c (revision b79e4c00af0e7cfb8601ab0208659d226b82bd10)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stddef.h>
35 #include <assert.h>
36 #include <unistd.h>
37 #include <stdint.h>
38 #include <stdio.h>
39 #include <string.h>
40 #include <stdlib.h>
41 #include <errno.h>
42 #include <dirent.h>
43 #include <net/if.h>
44 #include <sys/ioctl.h>
45 #include <sys/socket.h>
46 #include <sys/utsname.h>
47 #include <netinet/in.h>
48 #include <linux/ethtool.h>
49 #include <linux/sockios.h>
50 #include <linux/version.h>
51 #include <fcntl.h>
52 
53 /* DPDK headers don't like -pedantic. */
54 #ifdef PEDANTIC
55 #pragma GCC diagnostic ignored "-Wpedantic"
56 #endif
57 #include <rte_atomic.h>
58 #include <rte_ethdev.h>
59 #include <rte_mbuf.h>
60 #include <rte_common.h>
61 #include <rte_interrupts.h>
62 #include <rte_alarm.h>
63 #include <rte_malloc.h>
64 #ifdef PEDANTIC
65 #pragma GCC diagnostic error "-Wpedantic"
66 #endif
67 
68 #include "mlx5.h"
69 #include "mlx5_rxtx.h"
70 #include "mlx5_utils.h"
71 
72 /* Add defines in case the running kernel is not the same as user headers. */
73 #ifndef ETHTOOL_GLINKSETTINGS
74 struct ethtool_link_settings {
75 	uint32_t cmd;
76 	uint32_t speed;
77 	uint8_t duplex;
78 	uint8_t port;
79 	uint8_t phy_address;
80 	uint8_t autoneg;
81 	uint8_t mdio_support;
82 	uint8_t eth_to_mdix;
83 	uint8_t eth_tp_mdix_ctrl;
84 	int8_t link_mode_masks_nwords;
85 	uint32_t reserved[8];
86 	uint32_t link_mode_masks[];
87 };
88 
89 #define ETHTOOL_GLINKSETTINGS 0x0000004c
90 #define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
91 #define ETHTOOL_LINK_MODE_Autoneg_BIT 6
92 #define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
93 #define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
94 #define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
95 #define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
96 #define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
97 #define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
98 #define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
99 #define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
100 #define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
101 #define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
102 #define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
103 #define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
104 #define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
105 #define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
106 #endif
107 #ifndef HAVE_ETHTOOL_LINK_MODE_25G
108 #define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
109 #define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
110 #define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
111 #endif
112 #ifndef HAVE_ETHTOOL_LINK_MODE_50G
113 #define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
114 #define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
115 #endif
116 #ifndef HAVE_ETHTOOL_LINK_MODE_100G
117 #define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
118 #define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
119 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
120 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
121 #endif
122 
123 /**
124  * Return private structure associated with an Ethernet device.
125  *
126  * @param dev
127  *   Pointer to Ethernet device structure.
128  *
129  * @return
130  *   Pointer to private structure.
131  */
132 struct priv *
133 mlx5_get_priv(struct rte_eth_dev *dev)
134 {
135 	struct mlx5_secondary_data *sd;
136 
137 	if (!mlx5_is_secondary())
138 		return dev->data->dev_private;
139 	sd = &mlx5_secondary_data[dev->data->port_id];
140 	return sd->data.dev_private;
141 }
142 
143 /**
144  * Check if running as a secondary process.
145  *
146  * @return
147  *   Nonzero if running as a secondary process.
148  */
149 inline int
150 mlx5_is_secondary(void)
151 {
152 	return rte_eal_process_type() != RTE_PROC_PRIMARY;
153 }
154 
155 /**
156  * Get interface name from private structure.
157  *
158  * @param[in] priv
159  *   Pointer to private structure.
160  * @param[out] ifname
161  *   Interface name output buffer.
162  *
163  * @return
164  *   0 on success, -1 on failure and errno is set.
165  */
166 int
167 priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
168 {
169 	DIR *dir;
170 	struct dirent *dent;
171 	unsigned int dev_type = 0;
172 	unsigned int dev_port_prev = ~0u;
173 	char match[IF_NAMESIZE] = "";
174 
175 	{
176 		MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
177 
178 		dir = opendir(path);
179 		if (dir == NULL)
180 			return -1;
181 	}
182 	while ((dent = readdir(dir)) != NULL) {
183 		char *name = dent->d_name;
184 		FILE *file;
185 		unsigned int dev_port;
186 		int r;
187 
188 		if ((name[0] == '.') &&
189 		    ((name[1] == '\0') ||
190 		     ((name[1] == '.') && (name[2] == '\0'))))
191 			continue;
192 
193 		MKSTR(path, "%s/device/net/%s/%s",
194 		      priv->ctx->device->ibdev_path, name,
195 		      (dev_type ? "dev_id" : "dev_port"));
196 
197 		file = fopen(path, "rb");
198 		if (file == NULL) {
199 			if (errno != ENOENT)
200 				continue;
201 			/*
202 			 * Switch to dev_id when dev_port does not exist as
203 			 * is the case with Linux kernel versions < 3.15.
204 			 */
205 try_dev_id:
206 			match[0] = '\0';
207 			if (dev_type)
208 				break;
209 			dev_type = 1;
210 			dev_port_prev = ~0u;
211 			rewinddir(dir);
212 			continue;
213 		}
214 		r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
215 		fclose(file);
216 		if (r != 1)
217 			continue;
218 		/*
219 		 * Switch to dev_id when dev_port returns the same value for
220 		 * all ports. May happen when using a MOFED release older than
221 		 * 3.0 with a Linux kernel >= 3.15.
222 		 */
223 		if (dev_port == dev_port_prev)
224 			goto try_dev_id;
225 		dev_port_prev = dev_port;
226 		if (dev_port == (priv->port - 1u))
227 			snprintf(match, sizeof(match), "%s", name);
228 	}
229 	closedir(dir);
230 	if (match[0] == '\0')
231 		return -1;
232 	strncpy(*ifname, match, sizeof(*ifname));
233 	return 0;
234 }
235 
236 /**
237  * Check if the counter is located on ib counters file.
238  *
239  * @param[in] cntr
240  *   Counter name.
241  *
242  * @return
243  *   1 if counter is located on ib counters file , 0 otherwise.
244  */
245 int
246 priv_is_ib_cntr(const char *cntr)
247 {
248 	if (!strcmp(cntr, "out_of_buffer"))
249 		return 1;
250 	return 0;
251 }
252 
253 /**
254  * Read from sysfs entry.
255  *
256  * @param[in] priv
257  *   Pointer to private structure.
258  * @param[in] entry
259  *   Entry name relative to sysfs path.
260  * @param[out] buf
261  *   Data output buffer.
262  * @param size
263  *   Buffer size.
264  *
265  * @return
266  *   0 on success, -1 on failure and errno is set.
267  */
268 static int
269 priv_sysfs_read(const struct priv *priv, const char *entry,
270 		char *buf, size_t size)
271 {
272 	char ifname[IF_NAMESIZE];
273 	FILE *file;
274 	int ret;
275 	int err;
276 
277 	if (priv_get_ifname(priv, &ifname))
278 		return -1;
279 
280 	if (priv_is_ib_cntr(entry)) {
281 		MKSTR(path, "%s/ports/1/hw_counters/%s",
282 		      priv->ctx->device->ibdev_path, entry);
283 		file = fopen(path, "rb");
284 	} else {
285 		MKSTR(path, "%s/device/net/%s/%s",
286 		      priv->ctx->device->ibdev_path, ifname, entry);
287 		file = fopen(path, "rb");
288 	}
289 	if (file == NULL)
290 		return -1;
291 	ret = fread(buf, 1, size, file);
292 	err = errno;
293 	if (((size_t)ret < size) && (ferror(file)))
294 		ret = -1;
295 	else
296 		ret = size;
297 	fclose(file);
298 	errno = err;
299 	return ret;
300 }
301 
302 /**
303  * Write to sysfs entry.
304  *
305  * @param[in] priv
306  *   Pointer to private structure.
307  * @param[in] entry
308  *   Entry name relative to sysfs path.
309  * @param[in] buf
310  *   Data buffer.
311  * @param size
312  *   Buffer size.
313  *
314  * @return
315  *   0 on success, -1 on failure and errno is set.
316  */
317 static int
318 priv_sysfs_write(const struct priv *priv, const char *entry,
319 		 char *buf, size_t size)
320 {
321 	char ifname[IF_NAMESIZE];
322 	FILE *file;
323 	int ret;
324 	int err;
325 
326 	if (priv_get_ifname(priv, &ifname))
327 		return -1;
328 
329 	MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
330 	      ifname, entry);
331 
332 	file = fopen(path, "wb");
333 	if (file == NULL)
334 		return -1;
335 	ret = fwrite(buf, 1, size, file);
336 	err = errno;
337 	if (((size_t)ret < size) || (ferror(file)))
338 		ret = -1;
339 	else
340 		ret = size;
341 	fclose(file);
342 	errno = err;
343 	return ret;
344 }
345 
346 /**
347  * Get unsigned long sysfs property.
348  *
349  * @param priv
350  *   Pointer to private structure.
351  * @param[in] name
352  *   Entry name relative to sysfs path.
353  * @param[out] value
354  *   Value output buffer.
355  *
356  * @return
357  *   0 on success, -1 on failure and errno is set.
358  */
359 static int
360 priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
361 {
362 	int ret;
363 	unsigned long value_ret;
364 	char value_str[32];
365 
366 	ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
367 	if (ret == -1) {
368 		DEBUG("cannot read %s value from sysfs: %s",
369 		      name, strerror(errno));
370 		return -1;
371 	}
372 	value_str[ret] = '\0';
373 	errno = 0;
374 	value_ret = strtoul(value_str, NULL, 0);
375 	if (errno) {
376 		DEBUG("invalid %s value `%s': %s", name, value_str,
377 		      strerror(errno));
378 		return -1;
379 	}
380 	*value = value_ret;
381 	return 0;
382 }
383 
384 /**
385  * Set unsigned long sysfs property.
386  *
387  * @param priv
388  *   Pointer to private structure.
389  * @param[in] name
390  *   Entry name relative to sysfs path.
391  * @param value
392  *   Value to set.
393  *
394  * @return
395  *   0 on success, -1 on failure and errno is set.
396  */
397 static int
398 priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
399 {
400 	int ret;
401 	MKSTR(value_str, "%lu", value);
402 
403 	ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
404 	if (ret == -1) {
405 		DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
406 		      name, value_str, value, strerror(errno));
407 		return -1;
408 	}
409 	return 0;
410 }
411 
412 /**
413  * Perform ifreq ioctl() on associated Ethernet device.
414  *
415  * @param[in] priv
416  *   Pointer to private structure.
417  * @param req
418  *   Request number to pass to ioctl().
419  * @param[out] ifr
420  *   Interface request structure output buffer.
421  *
422  * @return
423  *   0 on success, -1 on failure and errno is set.
424  */
425 int
426 priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
427 {
428 	int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
429 	int ret = -1;
430 
431 	if (sock == -1)
432 		return ret;
433 	if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
434 		ret = ioctl(sock, req, ifr);
435 	close(sock);
436 	return ret;
437 }
438 
439 /**
440  * Return the number of active VFs for the current device.
441  *
442  * @param[in] priv
443  *   Pointer to private structure.
444  * @param[out] num_vfs
445  *   Number of active VFs.
446  *
447  * @return
448  *   0 on success, -1 on failure and errno is set.
449  */
450 int
451 priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
452 {
453 	/* The sysfs entry name depends on the operating system. */
454 	const char **name = (const char *[]){
455 		"device/sriov_numvfs",
456 		"device/mlx5_num_vfs",
457 		NULL,
458 	};
459 	int ret;
460 
461 	do {
462 		unsigned long ulong_num_vfs;
463 
464 		ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
465 		if (!ret)
466 			*num_vfs = ulong_num_vfs;
467 	} while (*(++name) && ret);
468 	return ret;
469 }
470 
471 /**
472  * Get device MTU.
473  *
474  * @param priv
475  *   Pointer to private structure.
476  * @param[out] mtu
477  *   MTU value output buffer.
478  *
479  * @return
480  *   0 on success, -1 on failure and errno is set.
481  */
482 int
483 priv_get_mtu(struct priv *priv, uint16_t *mtu)
484 {
485 	unsigned long ulong_mtu;
486 
487 	if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
488 		return -1;
489 	*mtu = ulong_mtu;
490 	return 0;
491 }
492 
493 /**
494  * Read device counter from sysfs.
495  *
496  * @param priv
497  *   Pointer to private structure.
498  * @param name
499  *   Counter name.
500  * @param[out] cntr
501  *   Counter output buffer.
502  *
503  * @return
504  *   0 on success, -1 on failure and errno is set.
505  */
506 int
507 priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr)
508 {
509 	unsigned long ulong_ctr;
510 
511 	if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1)
512 		return -1;
513 	*cntr = ulong_ctr;
514 	return 0;
515 }
516 
517 /**
518  * Set device MTU.
519  *
520  * @param priv
521  *   Pointer to private structure.
522  * @param mtu
523  *   MTU value to set.
524  *
525  * @return
526  *   0 on success, -1 on failure and errno is set.
527  */
528 static int
529 priv_set_mtu(struct priv *priv, uint16_t mtu)
530 {
531 	uint16_t new_mtu;
532 
533 	if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
534 	    priv_get_mtu(priv, &new_mtu))
535 		return -1;
536 	if (new_mtu == mtu)
537 		return 0;
538 	errno = EINVAL;
539 	return -1;
540 }
541 
542 /**
543  * Set device flags.
544  *
545  * @param priv
546  *   Pointer to private structure.
547  * @param keep
548  *   Bitmask for flags that must remain untouched.
549  * @param flags
550  *   Bitmask for flags to modify.
551  *
552  * @return
553  *   0 on success, -1 on failure and errno is set.
554  */
555 int
556 priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
557 {
558 	unsigned long tmp;
559 
560 	if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
561 		return -1;
562 	tmp &= keep;
563 	tmp |= (flags & (~keep));
564 	return priv_set_sysfs_ulong(priv, "flags", tmp);
565 }
566 
567 /**
568  * Ethernet device configuration.
569  *
570  * Prepare the driver for a given number of TX and RX queues.
571  *
572  * @param dev
573  *   Pointer to Ethernet device structure.
574  *
575  * @return
576  *   0 on success, errno value on failure.
577  */
578 static int
579 dev_configure(struct rte_eth_dev *dev)
580 {
581 	struct priv *priv = dev->data->dev_private;
582 	unsigned int rxqs_n = dev->data->nb_rx_queues;
583 	unsigned int txqs_n = dev->data->nb_tx_queues;
584 	unsigned int i;
585 	unsigned int j;
586 	unsigned int reta_idx_n;
587 
588 	priv->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
589 	priv->rxqs = (void *)dev->data->rx_queues;
590 	priv->txqs = (void *)dev->data->tx_queues;
591 	if (txqs_n != priv->txqs_n) {
592 		INFO("%p: TX queues number update: %u -> %u",
593 		     (void *)dev, priv->txqs_n, txqs_n);
594 		priv->txqs_n = txqs_n;
595 	}
596 	if (rxqs_n > priv->ind_table_max_size) {
597 		ERROR("cannot handle this many RX queues (%u)", rxqs_n);
598 		return EINVAL;
599 	}
600 	if (rxqs_n == priv->rxqs_n)
601 		return 0;
602 	INFO("%p: RX queues number update: %u -> %u",
603 	     (void *)dev, priv->rxqs_n, rxqs_n);
604 	priv->rxqs_n = rxqs_n;
605 	/* If the requested number of RX queues is not a power of two, use the
606 	 * maximum indirection table size for better balancing.
607 	 * The result is always rounded to the next power of two. */
608 	reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
609 				     priv->ind_table_max_size :
610 				     rxqs_n));
611 	if (priv_rss_reta_index_resize(priv, reta_idx_n))
612 		return ENOMEM;
613 	/* When the number of RX queues is not a power of two, the remaining
614 	 * table entries are padded with reused WQs and hashes are not spread
615 	 * uniformly. */
616 	for (i = 0, j = 0; (i != reta_idx_n); ++i) {
617 		(*priv->reta_idx)[i] = j;
618 		if (++j == rxqs_n)
619 			j = 0;
620 	}
621 	return 0;
622 }
623 
624 /**
625  * DPDK callback for Ethernet device configuration.
626  *
627  * @param dev
628  *   Pointer to Ethernet device structure.
629  *
630  * @return
631  *   0 on success, negative errno value on failure.
632  */
633 int
634 mlx5_dev_configure(struct rte_eth_dev *dev)
635 {
636 	struct priv *priv = dev->data->dev_private;
637 	int ret;
638 
639 	if (mlx5_is_secondary())
640 		return -E_RTE_SECONDARY;
641 
642 	priv_lock(priv);
643 	ret = dev_configure(dev);
644 	assert(ret >= 0);
645 	priv_unlock(priv);
646 	return -ret;
647 }
648 
649 /**
650  * DPDK callback to get information about the device.
651  *
652  * @param dev
653  *   Pointer to Ethernet device structure.
654  * @param[out] info
655  *   Info structure output buffer.
656  */
657 void
658 mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
659 {
660 	struct priv *priv = mlx5_get_priv(dev);
661 	unsigned int max;
662 	char ifname[IF_NAMESIZE];
663 
664 	info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
665 
666 	priv_lock(priv);
667 	/* FIXME: we should ask the device for these values. */
668 	info->min_rx_bufsize = 32;
669 	info->max_rx_pktlen = 65536;
670 	/*
671 	 * Since we need one CQ per QP, the limit is the minimum number
672 	 * between the two values.
673 	 */
674 	max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
675 	       priv->device_attr.max_qp : priv->device_attr.max_cq);
676 	/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
677 	if (max >= 65535)
678 		max = 65535;
679 	info->max_rx_queues = max;
680 	info->max_tx_queues = max;
681 	info->max_mac_addrs = RTE_DIM(priv->mac);
682 	info->rx_offload_capa =
683 		(priv->hw_csum ?
684 		 (DEV_RX_OFFLOAD_IPV4_CKSUM |
685 		  DEV_RX_OFFLOAD_UDP_CKSUM |
686 		  DEV_RX_OFFLOAD_TCP_CKSUM) :
687 		 0) |
688 		(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0);
689 	if (!priv->mps)
690 		info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
691 	if (priv->hw_csum)
692 		info->tx_offload_capa |=
693 			(DEV_TX_OFFLOAD_IPV4_CKSUM |
694 			 DEV_TX_OFFLOAD_UDP_CKSUM |
695 			 DEV_TX_OFFLOAD_TCP_CKSUM);
696 	if (priv->tso)
697 		info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
698 	if (priv->tunnel_en)
699 		info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
700 					  DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
701 					  DEV_TX_OFFLOAD_GRE_TNL_TSO);
702 	if (priv_get_ifname(priv, &ifname) == 0)
703 		info->if_index = if_nametoindex(ifname);
704 	info->reta_size = priv->reta_idx_n ?
705 		priv->reta_idx_n : priv->ind_table_max_size;
706 	info->hash_key_size = ((*priv->rss_conf) ?
707 			       (*priv->rss_conf)[0]->rss_key_len :
708 			       0);
709 	info->speed_capa = priv->link_speed_capa;
710 	priv_unlock(priv);
711 }
712 
713 const uint32_t *
714 mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
715 {
716 	static const uint32_t ptypes[] = {
717 		/* refers to rxq_cq_to_pkt_type() */
718 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
719 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
720 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
721 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
722 		RTE_PTYPE_UNKNOWN
723 
724 	};
725 
726 	if (dev->rx_pkt_burst == mlx5_rx_burst ||
727 	    dev->rx_pkt_burst == mlx5_rx_burst_vec)
728 		return ptypes;
729 	return NULL;
730 }
731 
732 /**
733  * DPDK callback to retrieve physical link information.
734  *
735  * @param dev
736  *   Pointer to Ethernet device structure.
737  * @param wait_to_complete
738  *   Wait for request completion (ignored).
739  */
740 static int
741 mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
742 {
743 	struct priv *priv = mlx5_get_priv(dev);
744 	struct ethtool_cmd edata = {
745 		.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
746 	};
747 	struct ifreq ifr;
748 	struct rte_eth_link dev_link;
749 	int link_speed = 0;
750 
751 	/* priv_lock() is not taken to allow concurrent calls. */
752 
753 	(void)wait_to_complete;
754 	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
755 		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
756 		return -1;
757 	}
758 	memset(&dev_link, 0, sizeof(dev_link));
759 	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
760 				(ifr.ifr_flags & IFF_RUNNING));
761 	ifr.ifr_data = (void *)&edata;
762 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
763 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
764 		     strerror(errno));
765 		return -1;
766 	}
767 	link_speed = ethtool_cmd_speed(&edata);
768 	if (link_speed == -1)
769 		dev_link.link_speed = 0;
770 	else
771 		dev_link.link_speed = link_speed;
772 	priv->link_speed_capa = 0;
773 	if (edata.supported & SUPPORTED_Autoneg)
774 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
775 	if (edata.supported & (SUPPORTED_1000baseT_Full |
776 			       SUPPORTED_1000baseKX_Full))
777 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
778 	if (edata.supported & SUPPORTED_10000baseKR_Full)
779 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
780 	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
781 			       SUPPORTED_40000baseCR4_Full |
782 			       SUPPORTED_40000baseSR4_Full |
783 			       SUPPORTED_40000baseLR4_Full))
784 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
785 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
786 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
787 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
788 			ETH_LINK_SPEED_FIXED);
789 	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
790 		/* Link status changed. */
791 		dev->data->dev_link = dev_link;
792 		return 0;
793 	}
794 	/* Link status is still the same. */
795 	return -1;
796 }
797 
798 /**
799  * Retrieve physical link information (unlocked version using new ioctl).
800  *
801  * @param dev
802  *   Pointer to Ethernet device structure.
803  * @param wait_to_complete
804  *   Wait for request completion (ignored).
805  */
806 static int
807 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
808 {
809 	struct priv *priv = mlx5_get_priv(dev);
810 	struct ethtool_link_settings edata = {
811 		.cmd = ETHTOOL_GLINKSETTINGS,
812 	};
813 	struct ifreq ifr;
814 	struct rte_eth_link dev_link;
815 	uint64_t sc;
816 
817 	(void)wait_to_complete;
818 	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
819 		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
820 		return -1;
821 	}
822 	memset(&dev_link, 0, sizeof(dev_link));
823 	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
824 				(ifr.ifr_flags & IFF_RUNNING));
825 	ifr.ifr_data = (void *)&edata;
826 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
827 		DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
828 		      strerror(errno));
829 		return -1;
830 	}
831 	dev_link.link_speed = edata.speed;
832 	sc = edata.link_mode_masks[0] |
833 		((uint64_t)edata.link_mode_masks[1] << 32);
834 	priv->link_speed_capa = 0;
835 	if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
836 		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
837 	if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT |
838 		  ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))
839 		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
840 	if (sc & (ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT |
841 		  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT |
842 		  ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))
843 		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
844 	if (sc & (ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT |
845 		  ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))
846 		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
847 	if (sc & (ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT |
848 		  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT |
849 		  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT |
850 		  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))
851 		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
852 	if (sc & (ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT |
853 		  ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT |
854 		  ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT |
855 		  ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
856 		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
857 	if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT |
858 		  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT |
859 		  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
860 		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
861 	if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT |
862 		  ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
863 		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
864 	if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT |
865 		  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT |
866 		  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
867 		  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
868 		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
869 	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
870 				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
871 	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
872 				  ETH_LINK_SPEED_FIXED);
873 	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
874 		/* Link status changed. */
875 		dev->data->dev_link = dev_link;
876 		return 0;
877 	}
878 	/* Link status is still the same. */
879 	return -1;
880 }
881 
882 /**
883  * DPDK callback to retrieve physical link information.
884  *
885  * @param dev
886  *   Pointer to Ethernet device structure.
887  * @param wait_to_complete
888  *   Wait for request completion (ignored).
889  */
890 int
891 mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
892 {
893 	struct utsname utsname;
894 	int ver[3];
895 
896 	if (uname(&utsname) == -1 ||
897 	    sscanf(utsname.release, "%d.%d.%d",
898 		   &ver[0], &ver[1], &ver[2]) != 3 ||
899 	    KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
900 		return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
901 	return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
902 }
903 
904 /**
905  * DPDK callback to change the MTU.
906  *
907  * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
908  * received). Use this as a hint to enable/disable scattered packets support
909  * and improve performance when not needed.
910  * Since failure is not an option, reconfiguring queues on the fly is not
911  * recommended.
912  *
913  * @param dev
914  *   Pointer to Ethernet device structure.
915  * @param in_mtu
916  *   New MTU.
917  *
918  * @return
919  *   0 on success, negative errno value on failure.
920  */
921 int
922 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
923 {
924 	struct priv *priv = dev->data->dev_private;
925 	int ret = 0;
926 	unsigned int i;
927 	unsigned int max_frame_len;
928 	int rehash;
929 	int restart = priv->started;
930 
931 	if (mlx5_is_secondary())
932 		return -E_RTE_SECONDARY;
933 
934 	priv_lock(priv);
935 	/* Set kernel interface MTU first. */
936 	if (priv_set_mtu(priv, mtu)) {
937 		ret = errno;
938 		WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
939 		     strerror(ret));
940 		goto out;
941 	} else
942 		DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
943 	/* Temporarily replace RX handler with a fake one, assuming it has not
944 	 * been copied elsewhere. */
945 	dev->rx_pkt_burst = removed_rx_burst;
946 	/* Make sure everyone has left dev->rx_pkt_burst() and uses
947 	 * removed_rx_burst() instead. */
948 	rte_wmb();
949 	usleep(1000);
950 	/* MTU does not include header and CRC. */
951 	max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
952 	/* Check if at least one queue is going to need a SGE update. */
953 	for (i = 0; i != priv->rxqs_n; ++i) {
954 		struct rxq *rxq = (*priv->rxqs)[i];
955 		unsigned int mb_len;
956 		unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
957 		unsigned int sges_n;
958 
959 		if (rxq == NULL)
960 			continue;
961 		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
962 		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
963 		/*
964 		 * Determine the number of SGEs needed for a full packet
965 		 * and round it to the next power of two.
966 		 */
967 		sges_n = log2above((size / mb_len) + !!(size % mb_len));
968 		if (sges_n != rxq->sges_n)
969 			break;
970 	}
971 	/*
972 	 * If all queues have the right number of SGEs, a simple rehash
973 	 * of their buffers is enough, otherwise SGE information can only
974 	 * be updated in a queue by recreating it. All resources that depend
975 	 * on queues (flows, indirection tables) must be recreated as well in
976 	 * that case.
977 	 */
978 	rehash = (i == priv->rxqs_n);
979 	if (!rehash) {
980 		/* Clean up everything as with mlx5_dev_stop(). */
981 		priv_special_flow_disable_all(priv);
982 		priv_mac_addrs_disable(priv);
983 		priv_destroy_hash_rxqs(priv);
984 		priv_fdir_disable(priv);
985 		priv_dev_interrupt_handler_uninstall(priv, dev);
986 	}
987 recover:
988 	/* Reconfigure each RX queue. */
989 	for (i = 0; (i != priv->rxqs_n); ++i) {
990 		struct rxq *rxq = (*priv->rxqs)[i];
991 		struct rxq_ctrl *rxq_ctrl =
992 			container_of(rxq, struct rxq_ctrl, rxq);
993 		unsigned int mb_len;
994 		unsigned int tmp;
995 
996 		if (rxq == NULL)
997 			continue;
998 		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
999 		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
1000 		/* Provide new values to rxq_setup(). */
1001 		dev->data->dev_conf.rxmode.jumbo_frame =
1002 			(max_frame_len > ETHER_MAX_LEN);
1003 		dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
1004 		if (rehash)
1005 			ret = rxq_rehash(dev, rxq_ctrl);
1006 		else
1007 			ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
1008 					     rxq_ctrl->socket, NULL, rxq->mp);
1009 		if (!ret)
1010 			continue;
1011 		/* Attempt to roll back in case of error. */
1012 		tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
1013 		if (max_frame_len != tmp) {
1014 			max_frame_len = tmp;
1015 			goto recover;
1016 		}
1017 		/* Double fault, disable RX. */
1018 		break;
1019 	}
1020 	/* Mimic mlx5_dev_start(). */
1021 	if (ret) {
1022 		ERROR("unable to reconfigure RX queues, RX disabled");
1023 	} else if (restart &&
1024 		   !rehash &&
1025 		   !priv_create_hash_rxqs(priv) &&
1026 		   !priv_rehash_flows(priv)) {
1027 		if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
1028 			priv_fdir_enable(priv);
1029 		priv_dev_interrupt_handler_install(priv, dev);
1030 	}
1031 	priv->mtu = mtu;
1032 	/* Burst functions can now be called again. */
1033 	rte_wmb();
1034 	/*
1035 	 * Use a safe RX burst function in case of error, otherwise select RX
1036 	 * burst function again.
1037 	 */
1038 	if (!ret)
1039 		priv_select_rx_function(priv);
1040 out:
1041 	priv_unlock(priv);
1042 	assert(ret >= 0);
1043 	return -ret;
1044 }
1045 
1046 /**
1047  * DPDK callback to get flow control status.
1048  *
1049  * @param dev
1050  *   Pointer to Ethernet device structure.
1051  * @param[out] fc_conf
1052  *   Flow control output buffer.
1053  *
1054  * @return
1055  *   0 on success, negative errno value on failure.
1056  */
1057 int
1058 mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1059 {
1060 	struct priv *priv = dev->data->dev_private;
1061 	struct ifreq ifr;
1062 	struct ethtool_pauseparam ethpause = {
1063 		.cmd = ETHTOOL_GPAUSEPARAM
1064 	};
1065 	int ret;
1066 
1067 	if (mlx5_is_secondary())
1068 		return -E_RTE_SECONDARY;
1069 
1070 	ifr.ifr_data = (void *)&ethpause;
1071 	priv_lock(priv);
1072 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
1073 		ret = errno;
1074 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
1075 		     " failed: %s",
1076 		     strerror(ret));
1077 		goto out;
1078 	}
1079 
1080 	fc_conf->autoneg = ethpause.autoneg;
1081 	if (ethpause.rx_pause && ethpause.tx_pause)
1082 		fc_conf->mode = RTE_FC_FULL;
1083 	else if (ethpause.rx_pause)
1084 		fc_conf->mode = RTE_FC_RX_PAUSE;
1085 	else if (ethpause.tx_pause)
1086 		fc_conf->mode = RTE_FC_TX_PAUSE;
1087 	else
1088 		fc_conf->mode = RTE_FC_NONE;
1089 	ret = 0;
1090 
1091 out:
1092 	priv_unlock(priv);
1093 	assert(ret >= 0);
1094 	return -ret;
1095 }
1096 
1097 /**
1098  * DPDK callback to modify flow control parameters.
1099  *
1100  * @param dev
1101  *   Pointer to Ethernet device structure.
1102  * @param[in] fc_conf
1103  *   Flow control parameters.
1104  *
1105  * @return
1106  *   0 on success, negative errno value on failure.
1107  */
1108 int
1109 mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1110 {
1111 	struct priv *priv = dev->data->dev_private;
1112 	struct ifreq ifr;
1113 	struct ethtool_pauseparam ethpause = {
1114 		.cmd = ETHTOOL_SPAUSEPARAM
1115 	};
1116 	int ret;
1117 
1118 	if (mlx5_is_secondary())
1119 		return -E_RTE_SECONDARY;
1120 
1121 	ifr.ifr_data = (void *)&ethpause;
1122 	ethpause.autoneg = fc_conf->autoneg;
1123 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1124 	    (fc_conf->mode & RTE_FC_RX_PAUSE))
1125 		ethpause.rx_pause = 1;
1126 	else
1127 		ethpause.rx_pause = 0;
1128 
1129 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1130 	    (fc_conf->mode & RTE_FC_TX_PAUSE))
1131 		ethpause.tx_pause = 1;
1132 	else
1133 		ethpause.tx_pause = 0;
1134 
1135 	priv_lock(priv);
1136 	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
1137 		ret = errno;
1138 		WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
1139 		     " failed: %s",
1140 		     strerror(ret));
1141 		goto out;
1142 	}
1143 	ret = 0;
1144 
1145 out:
1146 	priv_unlock(priv);
1147 	assert(ret >= 0);
1148 	return -ret;
1149 }
1150 
1151 /**
1152  * Get PCI information from struct ibv_device.
1153  *
1154  * @param device
1155  *   Pointer to Ethernet device structure.
1156  * @param[out] pci_addr
1157  *   PCI bus address output buffer.
1158  *
1159  * @return
1160  *   0 on success, -1 on failure and errno is set.
1161  */
1162 int
1163 mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
1164 			    struct rte_pci_addr *pci_addr)
1165 {
1166 	FILE *file;
1167 	char line[32];
1168 	MKSTR(path, "%s/device/uevent", device->ibdev_path);
1169 
1170 	file = fopen(path, "rb");
1171 	if (file == NULL)
1172 		return -1;
1173 	while (fgets(line, sizeof(line), file) == line) {
1174 		size_t len = strlen(line);
1175 		int ret;
1176 
1177 		/* Truncate long lines. */
1178 		if (len == (sizeof(line) - 1))
1179 			while (line[(len - 1)] != '\n') {
1180 				ret = fgetc(file);
1181 				if (ret == EOF)
1182 					break;
1183 				line[(len - 1)] = ret;
1184 			}
1185 		/* Extract information. */
1186 		if (sscanf(line,
1187 			   "PCI_SLOT_NAME="
1188 			   "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
1189 			   &pci_addr->domain,
1190 			   &pci_addr->bus,
1191 			   &pci_addr->devid,
1192 			   &pci_addr->function) == 4) {
1193 			ret = 0;
1194 			break;
1195 		}
1196 	}
1197 	fclose(file);
1198 	return 0;
1199 }
1200 
1201 /**
1202  * Link status handler.
1203  *
1204  * @param priv
1205  *   Pointer to private structure.
1206  * @param dev
1207  *   Pointer to the rte_eth_dev structure.
1208  *
1209  * @return
1210  *   Nonzero if the callback process can be called immediately.
1211  */
1212 static int
1213 priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
1214 {
1215 	struct ibv_async_event event;
1216 	struct rte_eth_link *link = &dev->data->dev_link;
1217 	int ret = 0;
1218 
1219 	/* Read all message and acknowledge them. */
1220 	for (;;) {
1221 		if (ibv_get_async_event(priv->ctx, &event))
1222 			break;
1223 
1224 		if (event.event_type != IBV_EVENT_PORT_ACTIVE &&
1225 		    event.event_type != IBV_EVENT_PORT_ERR)
1226 			DEBUG("event type %d on port %d not handled",
1227 			      event.event_type, event.element.port_num);
1228 		ibv_ack_async_event(&event);
1229 	}
1230 	mlx5_link_update(dev, 0);
1231 	if (((link->link_speed == 0) && link->link_status) ||
1232 	    ((link->link_speed != 0) && !link->link_status)) {
1233 		if (!priv->pending_alarm) {
1234 			/* Inconsistent status, check again later. */
1235 			priv->pending_alarm = 1;
1236 			rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
1237 					  mlx5_dev_link_status_handler,
1238 					  dev);
1239 		}
1240 	} else {
1241 		ret = 1;
1242 	}
1243 	return ret;
1244 }
1245 
1246 /**
1247  * Handle delayed link status event.
1248  *
1249  * @param arg
1250  *   Registered argument.
1251  */
1252 void
1253 mlx5_dev_link_status_handler(void *arg)
1254 {
1255 	struct rte_eth_dev *dev = arg;
1256 	struct priv *priv = dev->data->dev_private;
1257 	int ret;
1258 
1259 	priv_lock(priv);
1260 	assert(priv->pending_alarm == 1);
1261 	priv->pending_alarm = 0;
1262 	ret = priv_dev_link_status_handler(priv, dev);
1263 	priv_unlock(priv);
1264 	if (ret)
1265 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
1266 					      NULL);
1267 }
1268 
1269 /**
1270  * Handle interrupts from the NIC.
1271  *
1272  * @param[in] intr_handle
1273  *   Interrupt handler.
1274  * @param cb_arg
1275  *   Callback argument.
1276  */
1277 void
1278 mlx5_dev_interrupt_handler(void *cb_arg)
1279 {
1280 	struct rte_eth_dev *dev = cb_arg;
1281 	struct priv *priv = dev->data->dev_private;
1282 	int ret;
1283 
1284 	priv_lock(priv);
1285 	ret = priv_dev_link_status_handler(priv, dev);
1286 	priv_unlock(priv);
1287 	if (ret)
1288 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
1289 					      NULL);
1290 }
1291 
1292 /**
1293  * Uninstall interrupt handler.
1294  *
1295  * @param priv
1296  *   Pointer to private structure.
1297  * @param dev
1298  *   Pointer to the rte_eth_dev structure.
1299  */
1300 void
1301 priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
1302 {
1303 	if (!dev->data->dev_conf.intr_conf.lsc)
1304 		return;
1305 	rte_intr_callback_unregister(&priv->intr_handle,
1306 				     mlx5_dev_interrupt_handler,
1307 				     dev);
1308 	if (priv->pending_alarm)
1309 		rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
1310 	priv->pending_alarm = 0;
1311 	priv->intr_handle.fd = 0;
1312 	priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1313 }
1314 
1315 /**
1316  * Install interrupt handler.
1317  *
1318  * @param priv
1319  *   Pointer to private structure.
1320  * @param dev
1321  *   Pointer to the rte_eth_dev structure.
1322  */
1323 void
1324 priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
1325 {
1326 	int rc, flags;
1327 
1328 	if (!dev->data->dev_conf.intr_conf.lsc)
1329 		return;
1330 	assert(priv->ctx->async_fd > 0);
1331 	flags = fcntl(priv->ctx->async_fd, F_GETFL);
1332 	rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1333 	if (rc < 0) {
1334 		INFO("failed to change file descriptor async event queue");
1335 		dev->data->dev_conf.intr_conf.lsc = 0;
1336 	} else {
1337 		priv->intr_handle.fd = priv->ctx->async_fd;
1338 		priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
1339 		rte_intr_callback_register(&priv->intr_handle,
1340 					   mlx5_dev_interrupt_handler,
1341 					   dev);
1342 	}
1343 }
1344 
1345 /**
1346  * Change the link state (UP / DOWN).
1347  *
1348  * @param priv
1349  *   Pointer to Ethernet device structure.
1350  * @param up
1351  *   Nonzero for link up, otherwise link down.
1352  *
1353  * @return
1354  *   0 on success, errno value on failure.
1355  */
1356 static int
1357 priv_set_link(struct priv *priv, int up)
1358 {
1359 	struct rte_eth_dev *dev = priv->dev;
1360 	int err;
1361 
1362 	if (up) {
1363 		err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
1364 		if (err)
1365 			return err;
1366 		priv_select_tx_function(priv);
1367 		priv_select_rx_function(priv);
1368 	} else {
1369 		err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
1370 		if (err)
1371 			return err;
1372 		dev->rx_pkt_burst = removed_rx_burst;
1373 		dev->tx_pkt_burst = removed_tx_burst;
1374 	}
1375 	return 0;
1376 }
1377 
1378 /**
1379  * DPDK callback to bring the link DOWN.
1380  *
1381  * @param dev
1382  *   Pointer to Ethernet device structure.
1383  *
1384  * @return
1385  *   0 on success, errno value on failure.
1386  */
1387 int
1388 mlx5_set_link_down(struct rte_eth_dev *dev)
1389 {
1390 	struct priv *priv = dev->data->dev_private;
1391 	int err;
1392 
1393 	priv_lock(priv);
1394 	err = priv_set_link(priv, 0);
1395 	priv_unlock(priv);
1396 	return err;
1397 }
1398 
1399 /**
1400  * DPDK callback to bring the link UP.
1401  *
1402  * @param dev
1403  *   Pointer to Ethernet device structure.
1404  *
1405  * @return
1406  *   0 on success, errno value on failure.
1407  */
1408 int
1409 mlx5_set_link_up(struct rte_eth_dev *dev)
1410 {
1411 	struct priv *priv = dev->data->dev_private;
1412 	int err;
1413 
1414 	priv_lock(priv);
1415 	err = priv_set_link(priv, 1);
1416 	priv_unlock(priv);
1417 	return err;
1418 }
1419 
1420 /**
1421  * Configure secondary process queues from a private data pointer (primary
1422  * or secondary) and update burst callbacks. Can take place only once.
1423  *
1424  * All queues must have been previously created by the primary process to
1425  * avoid undefined behavior.
1426  *
1427  * @param priv
1428  *   Private data pointer from either primary or secondary process.
1429  *
1430  * @return
1431  *   Private data pointer from secondary process, NULL in case of error.
1432  */
1433 struct priv *
1434 mlx5_secondary_data_setup(struct priv *priv)
1435 {
1436 	unsigned int port_id = 0;
1437 	struct mlx5_secondary_data *sd;
1438 	void **tx_queues;
1439 	void **rx_queues;
1440 	unsigned int nb_tx_queues;
1441 	unsigned int nb_rx_queues;
1442 	unsigned int i;
1443 
1444 	/* priv must be valid at this point. */
1445 	assert(priv != NULL);
1446 	/* priv->dev must also be valid but may point to local memory from
1447 	 * another process, possibly with the same address and must not
1448 	 * be dereferenced yet. */
1449 	assert(priv->dev != NULL);
1450 	/* Determine port ID by finding out where priv comes from. */
1451 	while (1) {
1452 		sd = &mlx5_secondary_data[port_id];
1453 		rte_spinlock_lock(&sd->lock);
1454 		/* Primary process? */
1455 		if (sd->primary_priv == priv)
1456 			break;
1457 		/* Secondary process? */
1458 		if (sd->data.dev_private == priv)
1459 			break;
1460 		rte_spinlock_unlock(&sd->lock);
1461 		if (++port_id == RTE_DIM(mlx5_secondary_data))
1462 			port_id = 0;
1463 	}
1464 	/* Switch to secondary private structure. If private data has already
1465 	 * been updated by another thread, there is nothing else to do. */
1466 	priv = sd->data.dev_private;
1467 	if (priv->dev->data == &sd->data)
1468 		goto end;
1469 	/* Sanity checks. Secondary private structure is supposed to point
1470 	 * to local eth_dev, itself still pointing to the shared device data
1471 	 * structure allocated by the primary process. */
1472 	assert(sd->shared_dev_data != &sd->data);
1473 	assert(sd->data.nb_tx_queues == 0);
1474 	assert(sd->data.tx_queues == NULL);
1475 	assert(sd->data.nb_rx_queues == 0);
1476 	assert(sd->data.rx_queues == NULL);
1477 	assert(priv != sd->primary_priv);
1478 	assert(priv->dev->data == sd->shared_dev_data);
1479 	assert(priv->txqs_n == 0);
1480 	assert(priv->txqs == NULL);
1481 	assert(priv->rxqs_n == 0);
1482 	assert(priv->rxqs == NULL);
1483 	nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
1484 	nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
1485 	/* Allocate local storage for queues. */
1486 	tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
1487 				sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
1488 				RTE_CACHE_LINE_SIZE);
1489 	rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
1490 				sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
1491 				RTE_CACHE_LINE_SIZE);
1492 	if (tx_queues == NULL || rx_queues == NULL)
1493 		goto error;
1494 	/* Lock to prevent control operations during setup. */
1495 	priv_lock(priv);
1496 	/* TX queues. */
1497 	for (i = 0; i != nb_tx_queues; ++i) {
1498 		struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
1499 		struct txq_ctrl *primary_txq_ctrl;
1500 		struct txq_ctrl *txq_ctrl;
1501 
1502 		if (primary_txq == NULL)
1503 			continue;
1504 		primary_txq_ctrl = container_of(primary_txq,
1505 						struct txq_ctrl, txq);
1506 		txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl) +
1507 					     (1 << primary_txq->elts_n) *
1508 					     sizeof(struct rte_mbuf *), 0,
1509 					     primary_txq_ctrl->socket);
1510 		if (txq_ctrl != NULL) {
1511 			if (txq_ctrl_setup(priv->dev,
1512 					   txq_ctrl,
1513 					   1 << primary_txq->elts_n,
1514 					   primary_txq_ctrl->socket,
1515 					   NULL) == 0) {
1516 				txq_ctrl->txq.stats.idx =
1517 					primary_txq->stats.idx;
1518 				tx_queues[i] = &txq_ctrl->txq;
1519 				continue;
1520 			}
1521 			rte_free(txq_ctrl);
1522 		}
1523 		while (i) {
1524 			txq_ctrl = tx_queues[--i];
1525 			txq_cleanup(txq_ctrl);
1526 			rte_free(txq_ctrl);
1527 		}
1528 		goto error;
1529 	}
1530 	/* RX queues. */
1531 	for (i = 0; i != nb_rx_queues; ++i) {
1532 		struct rxq_ctrl *primary_rxq =
1533 			container_of((*sd->primary_priv->rxqs)[i],
1534 				     struct rxq_ctrl, rxq);
1535 
1536 		if (primary_rxq == NULL)
1537 			continue;
1538 		/* Not supported yet. */
1539 		rx_queues[i] = NULL;
1540 	}
1541 	/* Update everything. */
1542 	priv->txqs = (void *)tx_queues;
1543 	priv->txqs_n = nb_tx_queues;
1544 	priv->rxqs = (void *)rx_queues;
1545 	priv->rxqs_n = nb_rx_queues;
1546 	sd->data.rx_queues = rx_queues;
1547 	sd->data.tx_queues = tx_queues;
1548 	sd->data.nb_rx_queues = nb_rx_queues;
1549 	sd->data.nb_tx_queues = nb_tx_queues;
1550 	sd->data.dev_link = sd->shared_dev_data->dev_link;
1551 	sd->data.mtu = sd->shared_dev_data->mtu;
1552 	memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
1553 	       sizeof(sd->data.rx_queue_state));
1554 	memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
1555 	       sizeof(sd->data.tx_queue_state));
1556 	sd->data.dev_flags = sd->shared_dev_data->dev_flags;
1557 	/* Use local data from now on. */
1558 	rte_mb();
1559 	priv->dev->data = &sd->data;
1560 	rte_mb();
1561 	priv_select_tx_function(priv);
1562 	priv_select_rx_function(priv);
1563 	priv_unlock(priv);
1564 end:
1565 	/* More sanity checks. */
1566 	assert(priv->dev->data == &sd->data);
1567 	rte_spinlock_unlock(&sd->lock);
1568 	return priv;
1569 error:
1570 	priv_unlock(priv);
1571 	rte_free(tx_queues);
1572 	rte_free(rx_queues);
1573 	rte_spinlock_unlock(&sd->lock);
1574 	return NULL;
1575 }
1576 
1577 /**
1578  * Configure the TX function to use.
1579  *
1580  * @param priv
1581  *   Pointer to private structure.
1582  */
1583 void
1584 priv_select_tx_function(struct priv *priv)
1585 {
1586 	priv->dev->tx_pkt_burst = mlx5_tx_burst;
1587 	/* Select appropriate TX function. */
1588 	if (priv->mps == MLX5_MPW_ENHANCED) {
1589 		if (priv_check_vec_tx_support(priv) > 0) {
1590 			if (priv_check_raw_vec_tx_support(priv) > 0)
1591 				priv->dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
1592 			else
1593 				priv->dev->tx_pkt_burst = mlx5_tx_burst_vec;
1594 			DEBUG("selected Enhanced MPW TX vectorized function");
1595 		} else {
1596 			priv->dev->tx_pkt_burst = mlx5_tx_burst_empw;
1597 			DEBUG("selected Enhanced MPW TX function");
1598 		}
1599 	} else if (priv->mps && priv->txq_inline) {
1600 		priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
1601 		DEBUG("selected MPW inline TX function");
1602 	} else if (priv->mps) {
1603 		priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
1604 		DEBUG("selected MPW TX function");
1605 	}
1606 }
1607 
1608 /**
1609  * Configure the RX function to use.
1610  *
1611  * @param priv
1612  *   Pointer to private structure.
1613  */
1614 void
1615 priv_select_rx_function(struct priv *priv)
1616 {
1617 	if (priv_check_vec_rx_support(priv) > 0) {
1618 		priv_prep_vec_rx_function(priv);
1619 		priv->dev->rx_pkt_burst = mlx5_rx_burst_vec;
1620 		DEBUG("selected RX vectorized function");
1621 	} else {
1622 		priv->dev->rx_pkt_burst = mlx5_rx_burst;
1623 	}
1624 }
1625