xref: /dpdk/drivers/net/nfp/nfp_net_common.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include "nfp_net_common.h"
9 
10 #include <rte_alarm.h>
11 
12 #include "flower/nfp_flower_representor.h"
13 #include "nfd3/nfp_nfd3.h"
14 #include "nfdk/nfp_nfdk.h"
15 #include "nfpcore/nfp_mip.h"
16 #include "nfpcore/nfp_nsp.h"
17 #include "nfp_logs.h"
18 #include "nfp_net_meta.h"
19 
20 #define NFP_TX_MAX_SEG       UINT8_MAX
21 #define NFP_TX_MAX_MTU_SEG   8
22 
23 #define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
24 #define NFP_NET_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
25 
26 #define DEFAULT_FLBUF_SIZE        9216
27 #define NFP_ETH_OVERHEAD \
28 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
29 
30 /* Only show FEC capability supported by the current speed. */
31 #define NFP_FEC_CAPA_ENTRY_NUM  1
32 
33 enum nfp_xstat_group {
34 	NFP_XSTAT_GROUP_NET,
35 	NFP_XSTAT_GROUP_MAC
36 };
37 
38 struct nfp_xstat {
39 	char name[RTE_ETH_XSTATS_NAME_SIZE];
40 	int offset;
41 	enum nfp_xstat_group group;
42 };
43 
44 #define NFP_XSTAT_NET(_name, _offset) {                 \
45 	.name = _name,                                  \
46 	.offset = NFP_NET_CFG_STATS_##_offset,          \
47 	.group = NFP_XSTAT_GROUP_NET,                   \
48 }
49 
50 #define NFP_XSTAT_MAC(_name, _offset) {                 \
51 	.name = _name,                                  \
52 	.offset = NFP_MAC_STATS_##_offset,              \
53 	.group = NFP_XSTAT_GROUP_MAC,                   \
54 }
55 
56 static const struct nfp_xstat nfp_net_xstats[] = {
57 	/*
58 	 * Basic xstats available on both VF and PF.
59 	 * Note that in case new statistics of group NFP_XSTAT_GROUP_NET
60 	 * are added to this array, they must appear before any statistics
61 	 * of group NFP_XSTAT_GROUP_MAC.
62 	 */
63 	NFP_XSTAT_NET("rx_good_packets_mc", RX_MC_FRAMES),
64 	NFP_XSTAT_NET("tx_good_packets_mc", TX_MC_FRAMES),
65 	NFP_XSTAT_NET("rx_good_packets_bc", RX_BC_FRAMES),
66 	NFP_XSTAT_NET("tx_good_packets_bc", TX_BC_FRAMES),
67 	NFP_XSTAT_NET("rx_good_bytes_uc", RX_UC_OCTETS),
68 	NFP_XSTAT_NET("tx_good_bytes_uc", TX_UC_OCTETS),
69 	NFP_XSTAT_NET("rx_good_bytes_mc", RX_MC_OCTETS),
70 	NFP_XSTAT_NET("tx_good_bytes_mc", TX_MC_OCTETS),
71 	NFP_XSTAT_NET("rx_good_bytes_bc", RX_BC_OCTETS),
72 	NFP_XSTAT_NET("tx_good_bytes_bc", TX_BC_OCTETS),
73 	NFP_XSTAT_NET("tx_missed_erros", TX_DISCARDS),
74 	NFP_XSTAT_NET("bpf_pass_pkts", APP0_FRAMES),
75 	NFP_XSTAT_NET("bpf_pass_bytes", APP0_BYTES),
76 	NFP_XSTAT_NET("bpf_app1_pkts", APP1_FRAMES),
77 	NFP_XSTAT_NET("bpf_app1_bytes", APP1_BYTES),
78 	NFP_XSTAT_NET("bpf_app2_pkts", APP2_FRAMES),
79 	NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES),
80 	NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES),
81 	NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES),
82 	/*
83 	 * MAC xstats available only on PF. These statistics are not available for VFs as the
84 	 * PF is not initialized when the VF is initialized as it is still bound to the kernel
85 	 * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order
86 	 * to get the pointer to the start of the MAC statistics counters.
87 	 */
88 	NFP_XSTAT_MAC("mac.rx_octets", RX_IN_OCTS),
89 	NFP_XSTAT_MAC("mac.rx_frame_too_long_errors", RX_FRAME_TOO_LONG_ERRORS),
90 	NFP_XSTAT_MAC("mac.rx_range_length_errors", RX_RANGE_LENGTH_ERRORS),
91 	NFP_XSTAT_MAC("mac.rx_vlan_received_ok", RX_VLAN_RECEIVED_OK),
92 	NFP_XSTAT_MAC("mac.rx_errors", RX_IN_ERRORS),
93 	NFP_XSTAT_MAC("mac.rx_broadcast_pkts", RX_IN_BROADCAST_PKTS),
94 	NFP_XSTAT_MAC("mac.rx_drop_events", RX_DROP_EVENTS),
95 	NFP_XSTAT_MAC("mac.rx_alignment_errors", RX_ALIGNMENT_ERRORS),
96 	NFP_XSTAT_MAC("mac.rx_pause_mac_ctrl_frames", RX_PAUSE_MAC_CTRL_FRAMES),
97 	NFP_XSTAT_MAC("mac.rx_frames_received_ok", RX_FRAMES_RECEIVED_OK),
98 	NFP_XSTAT_MAC("mac.rx_frame_check_sequence_errors", RX_FRAME_CHECK_SEQ_ERRORS),
99 	NFP_XSTAT_MAC("mac.rx_unicast_pkts", RX_UNICAST_PKTS),
100 	NFP_XSTAT_MAC("mac.rx_multicast_pkts", RX_MULTICAST_PKTS),
101 	NFP_XSTAT_MAC("mac.rx_pkts", RX_PKTS),
102 	NFP_XSTAT_MAC("mac.rx_undersize_pkts", RX_UNDERSIZE_PKTS),
103 	NFP_XSTAT_MAC("mac.rx_pkts_64_octets", RX_PKTS_64_OCTS),
104 	NFP_XSTAT_MAC("mac.rx_pkts_65_to_127_octets", RX_PKTS_65_TO_127_OCTS),
105 	NFP_XSTAT_MAC("mac.rx_pkts_128_to_255_octets", RX_PKTS_128_TO_255_OCTS),
106 	NFP_XSTAT_MAC("mac.rx_pkts_256_to_511_octets", RX_PKTS_256_TO_511_OCTS),
107 	NFP_XSTAT_MAC("mac.rx_pkts_512_to_1023_octets", RX_PKTS_512_TO_1023_OCTS),
108 	NFP_XSTAT_MAC("mac.rx_pkts_1024_to_1518_octets", RX_PKTS_1024_TO_1518_OCTS),
109 	NFP_XSTAT_MAC("mac.rx_pkts_1519_to_max_octets", RX_PKTS_1519_TO_MAX_OCTS),
110 	NFP_XSTAT_MAC("mac.rx_jabbers", RX_JABBERS),
111 	NFP_XSTAT_MAC("mac.rx_fragments", RX_FRAGMENTS),
112 	NFP_XSTAT_MAC("mac.rx_oversize_pkts", RX_OVERSIZE_PKTS),
113 	NFP_XSTAT_MAC("mac.rx_pause_frames_class0", RX_PAUSE_FRAMES_CLASS0),
114 	NFP_XSTAT_MAC("mac.rx_pause_frames_class1", RX_PAUSE_FRAMES_CLASS1),
115 	NFP_XSTAT_MAC("mac.rx_pause_frames_class2", RX_PAUSE_FRAMES_CLASS2),
116 	NFP_XSTAT_MAC("mac.rx_pause_frames_class3", RX_PAUSE_FRAMES_CLASS3),
117 	NFP_XSTAT_MAC("mac.rx_pause_frames_class4", RX_PAUSE_FRAMES_CLASS4),
118 	NFP_XSTAT_MAC("mac.rx_pause_frames_class5", RX_PAUSE_FRAMES_CLASS5),
119 	NFP_XSTAT_MAC("mac.rx_pause_frames_class6", RX_PAUSE_FRAMES_CLASS6),
120 	NFP_XSTAT_MAC("mac.rx_pause_frames_class7", RX_PAUSE_FRAMES_CLASS7),
121 	NFP_XSTAT_MAC("mac.rx_mac_ctrl_frames_received", RX_MAC_CTRL_FRAMES_REC),
122 	NFP_XSTAT_MAC("mac.rx_mac_head_drop", RX_MAC_HEAD_DROP),
123 	NFP_XSTAT_MAC("mac.tx_queue_drop", TX_QUEUE_DROP),
124 	NFP_XSTAT_MAC("mac.tx_octets", TX_OUT_OCTS),
125 	NFP_XSTAT_MAC("mac.tx_vlan_transmitted_ok", TX_VLAN_TRANSMITTED_OK),
126 	NFP_XSTAT_MAC("mac.tx_errors", TX_OUT_ERRORS),
127 	NFP_XSTAT_MAC("mac.tx_broadcast_pkts", TX_BROADCAST_PKTS),
128 	NFP_XSTAT_MAC("mac.tx_pause_mac_ctrl_frames", TX_PAUSE_MAC_CTRL_FRAMES),
129 	NFP_XSTAT_MAC("mac.tx_frames_transmitted_ok", TX_FRAMES_TRANSMITTED_OK),
130 	NFP_XSTAT_MAC("mac.tx_unicast_pkts", TX_UNICAST_PKTS),
131 	NFP_XSTAT_MAC("mac.tx_multicast_pkts", TX_MULTICAST_PKTS),
132 	NFP_XSTAT_MAC("mac.tx_pkts_64_octets", TX_PKTS_64_OCTS),
133 	NFP_XSTAT_MAC("mac.tx_pkts_65_to_127_octets", TX_PKTS_65_TO_127_OCTS),
134 	NFP_XSTAT_MAC("mac.tx_pkts_128_to_255_octets", TX_PKTS_128_TO_255_OCTS),
135 	NFP_XSTAT_MAC("mac.tx_pkts_256_to_511_octets", TX_PKTS_256_TO_511_OCTS),
136 	NFP_XSTAT_MAC("mac.tx_pkts_512_to_1023_octets", TX_PKTS_512_TO_1023_OCTS),
137 	NFP_XSTAT_MAC("mac.tx_pkts_1024_to_1518_octets", TX_PKTS_1024_TO_1518_OCTS),
138 	NFP_XSTAT_MAC("mac.tx_pkts_1519_to_max_octets", TX_PKTS_1519_TO_MAX_OCTS),
139 	NFP_XSTAT_MAC("mac.tx_pause_frames_class0", TX_PAUSE_FRAMES_CLASS0),
140 	NFP_XSTAT_MAC("mac.tx_pause_frames_class1", TX_PAUSE_FRAMES_CLASS1),
141 	NFP_XSTAT_MAC("mac.tx_pause_frames_class2", TX_PAUSE_FRAMES_CLASS2),
142 	NFP_XSTAT_MAC("mac.tx_pause_frames_class3", TX_PAUSE_FRAMES_CLASS3),
143 	NFP_XSTAT_MAC("mac.tx_pause_frames_class4", TX_PAUSE_FRAMES_CLASS4),
144 	NFP_XSTAT_MAC("mac.tx_pause_frames_class5", TX_PAUSE_FRAMES_CLASS5),
145 	NFP_XSTAT_MAC("mac.tx_pause_frames_class6", TX_PAUSE_FRAMES_CLASS6),
146 	NFP_XSTAT_MAC("mac.tx_pause_frames_class7", TX_PAUSE_FRAMES_CLASS7),
147 };
148 
149 static const uint32_t nfp_net_link_speed_nfp2rte[] = {
150 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
151 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
152 	[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
153 	[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
154 	[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
155 	[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
156 	[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
157 	[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
158 };
159 
160 static uint16_t
161 nfp_net_link_speed_rte2nfp(uint16_t speed)
162 {
163 	uint16_t i;
164 
165 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
166 		if (speed == nfp_net_link_speed_nfp2rte[i])
167 			return i;
168 	}
169 
170 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
171 }
172 
173 static void
174 nfp_net_notify_port_speed(struct nfp_net_hw *hw,
175 		struct rte_eth_link *link)
176 {
177 	/*
178 	 * Read the link status from NFP_NET_CFG_STS. If the link is down
179 	 * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to
180 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
181 	 */
182 	if (link->link_status == RTE_ETH_LINK_DOWN) {
183 		nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
184 				NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
185 		return;
186 	}
187 
188 	/*
189 	 * Link is up so write the link speed from the eth_table to
190 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
191 	 */
192 	nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
193 			nfp_net_link_speed_rte2nfp(link->link_speed));
194 }
195 
196 /**
197  * Reconfigure the firmware of VF configure
198  *
199  * @param net_hw
200  *   Device to reconfigure
201  * @param pf_dev
202  *   Get the Device info
203  * @param update
204  *   The value for the mailbox VF command
205  * @param value
206  *   The value of update
207  * @param offset
208  *   The offset in the VF configure table
209  *
210  * @return
211  *   - (0) if OK to reconfigure vf configure.
212  *   - (-EIO) if I/O err and fail to configure the vf configure
213  */
214 static int
215 nfp_net_vf_reconfig(struct nfp_net_hw *net_hw,
216 		struct nfp_pf_dev *pf_dev,
217 		uint16_t update,
218 		uint8_t value,
219 		uint32_t offset)
220 {
221 	int ret;
222 	struct nfp_hw *hw;
223 
224 	hw = &net_hw->super;
225 	rte_spinlock_lock(&hw->reconfig_lock);
226 
227 	/* Write update info to mailbox in VF config symbol */
228 	nn_writeb(value, pf_dev->vf_cfg_tbl_bar + offset);
229 	nn_writew(update, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_UPD);
230 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VF);
231 
232 	rte_wmb();
233 
234 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VF);
235 
236 	rte_spinlock_unlock(&hw->reconfig_lock);
237 
238 	if (ret != 0)
239 		return -EIO;
240 
241 	return nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_RET);
242 }
243 
244 /**
245  * Reconfigure the firmware via the mailbox
246  *
247  * @param net_hw
248  *   Device to reconfigure
249  * @param mbox_cmd
250  *   The value for the mailbox command
251  *
252  * @return
253  *   - (0) if OK to reconfigure by the mailbox.
254  *   - (-EIO) if I/O err and fail to reconfigure by the mailbox
255  */
256 int
257 nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
258 		uint32_t mbox_cmd)
259 {
260 	int ret;
261 	uint32_t mbox;
262 
263 	mbox = net_hw->tlv_caps.mbox_off;
264 
265 	rte_spinlock_lock(&net_hw->super.reconfig_lock);
266 
267 	nn_cfg_writeq(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
268 	nn_cfg_writel(&net_hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
269 
270 	rte_wmb();
271 
272 	ret = nfp_reconfig_real(&net_hw->super, NFP_NET_CFG_UPDATE_MBOX);
273 
274 	rte_spinlock_unlock(&net_hw->super.reconfig_lock);
275 
276 	if (ret != 0) {
277 		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x",
278 				mbox_cmd, NFP_NET_CFG_UPDATE_MBOX);
279 		return -EIO;
280 	}
281 
282 	return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
283 }
284 
285 struct nfp_net_hw *
286 nfp_net_get_hw(const struct rte_eth_dev *dev)
287 {
288 	struct nfp_net_hw *hw;
289 
290 	if (rte_eth_dev_is_repr(dev)) {
291 		struct nfp_flower_representor *repr;
292 		repr = dev->data->dev_private;
293 		hw = repr->app_fw_flower->pf_hw;
294 	} else {
295 		hw = dev->data->dev_private;
296 	}
297 
298 	return hw;
299 }
300 
301 /*
302  * Configure an Ethernet device.
303  *
304  * This function must be invoked first before any other function in the Ethernet API.
305  * This function can also be re-invoked when a device is in the stopped state.
306  *
307  * A DPDK app sends info about how many queues to use and how  those queues
308  * need to be configured. This is used by the DPDK core and it makes sure no
309  * more queues than those advertised by the driver are requested.
310  * This function is called after that internal process.
311  */
312 int
313 nfp_net_configure(struct rte_eth_dev *dev)
314 {
315 	struct nfp_net_hw *hw;
316 	struct rte_eth_conf *dev_conf;
317 	struct rte_eth_rxmode *rxmode;
318 	struct rte_eth_txmode *txmode;
319 
320 	hw = nfp_net_get_hw(dev);
321 	dev_conf = &dev->data->dev_conf;
322 	rxmode = &dev_conf->rxmode;
323 	txmode = &dev_conf->txmode;
324 
325 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0)
326 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
327 
328 	/* Checking TX mode */
329 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
330 		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported");
331 		return -EINVAL;
332 	}
333 
334 	/* Checking RX mode */
335 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
336 			(hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
337 		PMD_DRV_LOG(ERR, "RSS not supported");
338 		return -EINVAL;
339 	}
340 
341 	/* Checking MTU set */
342 	if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) {
343 		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u)",
344 				rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD);
345 		return -ERANGE;
346 	}
347 
348 	return 0;
349 }
350 
351 void
352 nfp_net_log_device_information(const struct nfp_net_hw *hw)
353 {
354 	uint32_t cap = hw->super.cap;
355 	uint32_t cap_ext = hw->super.cap_ext;
356 
357 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
358 			hw->ver.major, hw->ver.minor, hw->max_mtu);
359 
360 	PMD_INIT_LOG(INFO, "CAP: %#x", cap);
361 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
362 			cap & NFP_NET_CFG_CTRL_ENABLE        ? "ENABLE "      : "",
363 			cap & NFP_NET_CFG_CTRL_PROMISC       ? "PROMISC "     : "",
364 			cap & NFP_NET_CFG_CTRL_L2BC          ? "L2BCFILT "    : "",
365 			cap & NFP_NET_CFG_CTRL_L2MC          ? "L2MCFILT "    : "",
366 			cap & NFP_NET_CFG_CTRL_RXCSUM        ? "RXCSUM "      : "",
367 			cap & NFP_NET_CFG_CTRL_TXCSUM        ? "TXCSUM "      : "",
368 			cap & NFP_NET_CFG_CTRL_RXVLAN        ? "RXVLAN "      : "",
369 			cap & NFP_NET_CFG_CTRL_TXVLAN        ? "TXVLAN "      : "",
370 			cap & NFP_NET_CFG_CTRL_SCATTER       ? "SCATTER "     : "",
371 			cap & NFP_NET_CFG_CTRL_GATHER        ? "GATHER "      : "",
372 			cap & NFP_NET_CFG_CTRL_LSO           ? "TSO "         : "",
373 			cap & NFP_NET_CFG_CTRL_RXQINQ        ? "RXQINQ "      : "",
374 			cap & NFP_NET_CFG_CTRL_RXVLAN_V2     ? "RXVLANv2 "    : "",
375 			cap & NFP_NET_CFG_CTRL_RINGCFG       ? "RINGCFG "     : "",
376 			cap & NFP_NET_CFG_CTRL_RSS           ? "RSS "         : "",
377 			cap & NFP_NET_CFG_CTRL_IRQMOD        ? "IRQMOD "      : "",
378 			cap & NFP_NET_CFG_CTRL_RINGPRIO      ? "RINGPRIO "    : "",
379 			cap & NFP_NET_CFG_CTRL_MSIXAUTO      ? "MSIXAUTO "    : "",
380 			cap & NFP_NET_CFG_CTRL_TXRWB         ? "TXRWB "       : "",
381 			cap & NFP_NET_CFG_CTRL_L2SWITCH      ? "L2SWITCH "    : "",
382 			cap & NFP_NET_CFG_CTRL_TXVLAN_V2     ? "TXVLANv2 "    : "",
383 			cap & NFP_NET_CFG_CTRL_VXLAN         ? "VXLAN "       : "",
384 			cap & NFP_NET_CFG_CTRL_NVGRE         ? "NVGRE "       : "",
385 			cap & NFP_NET_CFG_CTRL_MSIX_TX_OFF   ? "MSIX_TX_OFF " : "",
386 			cap & NFP_NET_CFG_CTRL_LSO2          ? "TSOv2 "       : "",
387 			cap & NFP_NET_CFG_CTRL_RSS2          ? "RSSv2 "       : "",
388 			cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ? "CSUM "        : "",
389 			cap & NFP_NET_CFG_CTRL_LIVE_ADDR     ? "LIVE_ADDR "   : "",
390 			cap & NFP_NET_CFG_CTRL_USO           ? "USO"          : "");
391 
392 	PMD_INIT_LOG(INFO, "CAP_WORD1: %#x", cap_ext);
393 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s",
394 			cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE        ? "PKT_TYPE "        : "",
395 			cap_ext & NFP_NET_CFG_CTRL_IPSEC           ? "IPSEC "           : "",
396 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP ? "IPSEC_SM "        : "",
397 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP ? "IPSEC_LM "        : "",
398 			cap_ext & NFP_NET_CFG_CTRL_MULTI_PF        ? "MULTI_PF "        : "",
399 			cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER      ? "FLOW_STEER "      : "",
400 			cap_ext & NFP_NET_CFG_CTRL_IN_ORDER        ? "VIRTIO_IN_ORDER " : "");
401 
402 	PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
403 			hw->max_rx_queues, hw->max_tx_queues);
404 }
405 
406 static inline void
407 nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw,
408 		uint32_t *ctrl)
409 {
410 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
411 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2;
412 	else if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN) != 0)
413 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
414 }
415 
416 void
417 nfp_net_enable_queues(struct rte_eth_dev *dev)
418 {
419 	struct nfp_net_hw *hw;
420 
421 	hw = nfp_net_get_hw(dev);
422 
423 	nfp_enable_queues(&hw->super, dev->data->nb_rx_queues,
424 			dev->data->nb_tx_queues);
425 }
426 
427 void
428 nfp_net_disable_queues(struct rte_eth_dev *dev)
429 {
430 	struct nfp_net_hw *net_hw;
431 
432 	net_hw = nfp_net_get_hw(dev);
433 
434 	nfp_disable_queues(&net_hw->super);
435 }
436 
437 void
438 nfp_net_params_setup(struct nfp_net_hw *hw)
439 {
440 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, hw->mtu);
441 	nn_cfg_writel(&hw->super, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
442 }
443 
444 void
445 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
446 {
447 	hw->super.qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
448 }
449 
450 int
451 nfp_net_set_mac_addr(struct rte_eth_dev *dev,
452 		struct rte_ether_addr *mac_addr)
453 {
454 	uint32_t update;
455 	uint32_t new_ctrl;
456 	struct nfp_hw *hw;
457 	struct nfp_net_hw *net_hw;
458 
459 	net_hw = nfp_net_get_hw(dev);
460 	hw = &net_hw->super;
461 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
462 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
463 		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled");
464 		return -EBUSY;
465 	}
466 
467 	if (rte_is_valid_assigned_ether_addr(mac_addr) == 0) {
468 		PMD_DRV_LOG(ERR, "Invalid MAC address");
469 		return -EINVAL;
470 	}
471 
472 	/* Writing new MAC to the specific port BAR address */
473 	nfp_write_mac(hw, (uint8_t *)mac_addr);
474 
475 	update = NFP_NET_CFG_UPDATE_MACADDR;
476 	new_ctrl = hw->ctrl;
477 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
478 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
479 		new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
480 
481 	/* Signal the NIC about the change */
482 	if (nfp_reconfig(hw, new_ctrl, update) != 0) {
483 		PMD_DRV_LOG(ERR, "MAC address update failed");
484 		return -EIO;
485 	}
486 
487 	hw->ctrl = new_ctrl;
488 
489 	return 0;
490 }
491 
492 int
493 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
494 		struct rte_intr_handle *intr_handle)
495 {
496 	uint16_t i;
497 	struct nfp_net_hw *hw;
498 
499 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
500 				dev->data->nb_rx_queues) != 0) {
501 		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec",
502 				dev->data->nb_rx_queues);
503 		return -ENOMEM;
504 	}
505 
506 	hw = nfp_net_get_hw(dev);
507 
508 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
509 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO");
510 		/* UIO just supports one queue and no LSC */
511 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
512 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
513 			return -1;
514 	} else {
515 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO");
516 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
517 			/*
518 			 * The first msix vector is reserved for non
519 			 * efd interrupts.
520 			 */
521 			nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(i), i + 1);
522 			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
523 				return -1;
524 		}
525 	}
526 
527 	/* Avoiding TX interrupts */
528 	hw->super.ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
529 	return 0;
530 }
531 
532 uint32_t
533 nfp_check_offloads(struct rte_eth_dev *dev)
534 {
535 	uint32_t cap;
536 	uint32_t ctrl = 0;
537 	uint64_t rx_offload;
538 	uint64_t tx_offload;
539 	struct nfp_net_hw *hw;
540 	struct rte_eth_conf *dev_conf;
541 
542 	hw = nfp_net_get_hw(dev);
543 	cap = hw->super.cap;
544 
545 	dev_conf = &dev->data->dev_conf;
546 	rx_offload = dev_conf->rxmode.offloads;
547 	tx_offload = dev_conf->txmode.offloads;
548 
549 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
550 		if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
551 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
552 	}
553 
554 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
555 		nfp_net_enable_rxvlan_cap(hw, &ctrl);
556 
557 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
558 		if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
559 			ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
560 	}
561 
562 	hw->mtu = dev->data->mtu;
563 
564 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
565 		if ((cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
566 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
567 		else if ((cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
568 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
569 	}
570 
571 	/* L2 broadcast */
572 	if ((cap & NFP_NET_CFG_CTRL_L2BC) != 0)
573 		ctrl |= NFP_NET_CFG_CTRL_L2BC;
574 
575 	/* L2 multicast */
576 	if ((cap & NFP_NET_CFG_CTRL_L2MC) != 0)
577 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
578 
579 	/* TX checksum offload */
580 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
581 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
582 			(tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
583 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
584 
585 	/* LSO offload */
586 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
587 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_TSO) != 0 ||
588 			(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
589 		if ((cap & NFP_NET_CFG_CTRL_LSO) != 0)
590 			ctrl |= NFP_NET_CFG_CTRL_LSO;
591 		else if ((cap & NFP_NET_CFG_CTRL_LSO2) != 0)
592 			ctrl |= NFP_NET_CFG_CTRL_LSO2;
593 	}
594 
595 	/* RX gather */
596 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
597 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
598 
599 	return ctrl;
600 }
601 
602 int
603 nfp_net_promisc_enable(struct rte_eth_dev *dev)
604 {
605 	int ret;
606 	uint32_t update;
607 	uint32_t new_ctrl;
608 	struct nfp_hw *hw;
609 	struct nfp_net_hw *net_hw;
610 
611 	net_hw = nfp_net_get_hw(dev);
612 
613 	hw = &net_hw->super;
614 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
615 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
616 		return -ENOTSUP;
617 	}
618 
619 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
620 		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
621 		return 0;
622 	}
623 
624 	new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
625 	update = NFP_NET_CFG_UPDATE_GEN;
626 
627 	ret = nfp_reconfig(hw, new_ctrl, update);
628 	if (ret != 0)
629 		return ret;
630 
631 	hw->ctrl = new_ctrl;
632 
633 	return 0;
634 }
635 
636 int
637 nfp_net_promisc_disable(struct rte_eth_dev *dev)
638 {
639 	int ret;
640 	uint32_t update;
641 	uint32_t new_ctrl;
642 	struct nfp_hw *hw;
643 	struct nfp_net_hw *net_hw;
644 
645 	net_hw = nfp_net_get_hw(dev);
646 	hw = &net_hw->super;
647 
648 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
649 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
650 		return -ENOTSUP;
651 	}
652 
653 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
654 		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
655 		return 0;
656 	}
657 
658 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
659 	update = NFP_NET_CFG_UPDATE_GEN;
660 
661 	ret = nfp_reconfig(hw, new_ctrl, update);
662 	if (ret != 0)
663 		return ret;
664 
665 	hw->ctrl = new_ctrl;
666 
667 	return 0;
668 }
669 
670 static int
671 nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev,
672 		bool enable)
673 {
674 	int ret;
675 	uint32_t update;
676 	struct nfp_hw *hw;
677 	uint32_t cap_extend;
678 	uint32_t ctrl_extend;
679 	uint32_t new_ctrl_extend;
680 	struct nfp_net_hw *net_hw;
681 
682 	net_hw = nfp_net_get_hw(dev);
683 	hw = &net_hw->super;
684 
685 	cap_extend = hw->cap_ext;
686 	if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) {
687 		PMD_DRV_LOG(ERR, "Allmulticast mode not supported");
688 		return -ENOTSUP;
689 	}
690 
691 	/*
692 	 * Allmulticast mode enabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 0.
693 	 * Allmulticast mode disabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 1.
694 	 */
695 	ctrl_extend = hw->ctrl_ext;
696 	if (enable) {
697 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0)
698 			return 0;
699 
700 		new_ctrl_extend = ctrl_extend & ~NFP_NET_CFG_CTRL_MCAST_FILTER;
701 	} else {
702 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) != 0)
703 			return 0;
704 
705 		new_ctrl_extend = ctrl_extend | NFP_NET_CFG_CTRL_MCAST_FILTER;
706 	}
707 
708 	update = NFP_NET_CFG_UPDATE_GEN;
709 
710 	ret = nfp_ext_reconfig(hw, new_ctrl_extend, update);
711 	if (ret != 0)
712 		return ret;
713 
714 	hw->ctrl_ext = new_ctrl_extend;
715 	return 0;
716 }
717 
718 int
719 nfp_net_allmulticast_enable(struct rte_eth_dev *dev)
720 {
721 	return nfp_net_set_allmulticast_mode(dev, true);
722 }
723 
724 int
725 nfp_net_allmulticast_disable(struct rte_eth_dev *dev)
726 {
727 	return nfp_net_set_allmulticast_mode(dev, false);
728 }
729 
730 static void
731 nfp_net_speed_aneg_update(struct rte_eth_dev *dev,
732 		struct nfp_net_hw *hw,
733 		struct nfp_net_hw_priv *hw_priv,
734 		struct rte_eth_link *link)
735 {
736 	uint32_t i;
737 	uint32_t speed;
738 	enum nfp_eth_aneg aneg;
739 	struct nfp_pf_dev *pf_dev;
740 	struct nfp_eth_table *nfp_eth_table;
741 	struct nfp_eth_table_port *eth_port;
742 
743 	pf_dev = hw_priv->pf_dev;
744 	aneg = pf_dev->nfp_eth_table->ports[hw->idx].aneg;
745 
746 	/* Compare whether the current status has changed. */
747 	if (pf_dev->speed_updated || aneg == NFP_ANEG_AUTO) {
748 		nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
749 		if (nfp_eth_table == NULL) {
750 			PMD_DRV_LOG(WARNING, "Failed to update port speed.");
751 		} else {
752 			pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
753 			free(nfp_eth_table);
754 			pf_dev->speed_updated = false;
755 		}
756 	}
757 
758 	nfp_eth_table = pf_dev->nfp_eth_table;
759 	eth_port = &nfp_eth_table->ports[hw->idx];
760 	speed = eth_port->speed;
761 
762 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
763 		if (nfp_net_link_speed_nfp2rte[i] == speed) {
764 			link->link_speed = speed;
765 			break;
766 		}
767 	}
768 
769 	if (dev->data->dev_conf.link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
770 			eth_port->supp_aneg)
771 		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
772 }
773 
774 int
775 nfp_net_link_update_common(struct rte_eth_dev *dev,
776 		struct nfp_net_hw *hw,
777 		struct rte_eth_link *link,
778 		uint32_t link_status)
779 {
780 	int ret;
781 	uint32_t nn_link_status;
782 	struct nfp_net_hw_priv *hw_priv;
783 
784 	hw_priv = dev->process_private;
785 	if (link->link_status == RTE_ETH_LINK_UP) {
786 		if (hw_priv->pf_dev != NULL) {
787 			nfp_net_speed_aneg_update(dev, hw, hw_priv, link);
788 		} else {
789 			/*
790 			 * Shift and mask nn_link_status so that it is effectively the value
791 			 * at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
792 			 */
793 			nn_link_status = (link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
794 					NFP_NET_CFG_STS_LINK_RATE_MASK;
795 			if (nn_link_status < RTE_DIM(nfp_net_link_speed_nfp2rte))
796 				link->link_speed = nfp_net_link_speed_nfp2rte[nn_link_status];
797 		}
798 	}
799 
800 	ret = rte_eth_linkstatus_set(dev, link);
801 	if (ret == 0) {
802 		if (link->link_status != 0)
803 			PMD_DRV_LOG(INFO, "NIC Link is Up");
804 		else
805 			PMD_DRV_LOG(INFO, "NIC Link is Down");
806 	}
807 
808 	return ret;
809 }
810 
811 /*
812  * Return 0 means link status changed, -1 means not changed
813  *
814  * Wait to complete is needed as it can take up to 9 seconds to get the Link
815  * status.
816  */
817 int
818 nfp_net_link_update(struct rte_eth_dev *dev,
819 		__rte_unused int wait_to_complete)
820 {
821 	int ret;
822 	struct nfp_net_hw *hw;
823 	uint32_t nn_link_status;
824 	struct rte_eth_link link;
825 	struct nfp_net_hw_priv *hw_priv;
826 
827 	hw = nfp_net_get_hw(dev);
828 	hw_priv = dev->process_private;
829 
830 	memset(&link, 0, sizeof(struct rte_eth_link));
831 
832 	/* Read link status */
833 	nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
834 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
835 		link.link_status = RTE_ETH_LINK_UP;
836 
837 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
838 
839 	ret = nfp_net_link_update_common(dev, hw, &link, nn_link_status);
840 	if (ret == -EIO)
841 		return ret;
842 
843 	/*
844 	 * Notify the port to update the speed value in the CTRL BAR from NSP.
845 	 * Not applicable for VFs as the associated PF is still attached to the
846 	 * kernel driver.
847 	 */
848 	if (hw_priv != NULL && hw_priv->pf_dev != NULL)
849 		nfp_net_notify_port_speed(hw, &link);
850 
851 	return ret;
852 }
853 
854 int
855 nfp_net_stats_get(struct rte_eth_dev *dev,
856 		struct rte_eth_stats *stats)
857 {
858 	uint16_t i;
859 	struct nfp_net_hw *hw;
860 	struct rte_eth_stats nfp_dev_stats;
861 
862 	if (stats == NULL)
863 		return -EINVAL;
864 
865 	hw = nfp_net_get_hw(dev);
866 
867 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
868 
869 	/* Reading per RX ring stats */
870 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
871 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
872 			break;
873 
874 		nfp_dev_stats.q_ipackets[i] =
875 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
876 		nfp_dev_stats.q_ipackets[i] -=
877 				hw->eth_stats_base.q_ipackets[i];
878 
879 		nfp_dev_stats.q_ibytes[i] =
880 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
881 		nfp_dev_stats.q_ibytes[i] -=
882 				hw->eth_stats_base.q_ibytes[i];
883 	}
884 
885 	/* Reading per TX ring stats */
886 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
887 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
888 			break;
889 
890 		nfp_dev_stats.q_opackets[i] =
891 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
892 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
893 
894 		nfp_dev_stats.q_obytes[i] =
895 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
896 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
897 	}
898 
899 	nfp_dev_stats.ipackets = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
900 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
901 
902 	nfp_dev_stats.ibytes = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
903 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
904 
905 	nfp_dev_stats.opackets =
906 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
907 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
908 
909 	nfp_dev_stats.obytes =
910 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
911 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
912 
913 	/* Reading general device stats */
914 	nfp_dev_stats.ierrors =
915 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
916 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
917 
918 	nfp_dev_stats.oerrors =
919 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
920 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
921 
922 	/* RX ring mbuf allocation failures */
923 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
924 
925 	nfp_dev_stats.imissed =
926 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
927 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
928 
929 	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
930 	return 0;
931 }
932 
933 /*
934  * hw->eth_stats_base records the per counter starting point.
935  * Lets update it now.
936  */
937 int
938 nfp_net_stats_reset(struct rte_eth_dev *dev)
939 {
940 	uint16_t i;
941 	struct nfp_net_hw *hw;
942 
943 	hw = nfp_net_get_hw(dev);
944 
945 	/* Reading per RX ring stats */
946 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
947 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
948 			break;
949 
950 		hw->eth_stats_base.q_ipackets[i] =
951 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
952 
953 		hw->eth_stats_base.q_ibytes[i] =
954 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
955 	}
956 
957 	/* Reading per TX ring stats */
958 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
959 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
960 			break;
961 
962 		hw->eth_stats_base.q_opackets[i] =
963 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
964 
965 		hw->eth_stats_base.q_obytes[i] =
966 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
967 	}
968 
969 	hw->eth_stats_base.ipackets =
970 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
971 
972 	hw->eth_stats_base.ibytes =
973 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
974 
975 	hw->eth_stats_base.opackets =
976 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
977 
978 	hw->eth_stats_base.obytes =
979 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
980 
981 	/* Reading general device stats */
982 	hw->eth_stats_base.ierrors =
983 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
984 
985 	hw->eth_stats_base.oerrors =
986 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
987 
988 	/* RX ring mbuf allocation failures */
989 	dev->data->rx_mbuf_alloc_failed = 0;
990 
991 	hw->eth_stats_base.imissed =
992 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
993 
994 	return 0;
995 }
996 
997 uint32_t
998 nfp_net_xstats_size(const struct rte_eth_dev *dev)
999 {
1000 	uint32_t count;
1001 	bool vf_flag = false;
1002 	struct nfp_net_hw *hw;
1003 	struct nfp_flower_representor *repr;
1004 	const uint32_t size = RTE_DIM(nfp_net_xstats);
1005 
1006 	if (rte_eth_dev_is_repr(dev)) {
1007 		repr = dev->data->dev_private;
1008 		if (repr->mac_stats == NULL)
1009 			vf_flag = true;
1010 	} else {
1011 		hw = dev->data->dev_private;
1012 		if (hw->mac_stats == NULL)
1013 			vf_flag = true;
1014 	}
1015 
1016 	/* If the device is a VF or VF-repr, then there will be no MAC stats */
1017 	if (vf_flag) {
1018 		for (count = 0; count < size; count++) {
1019 			if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC)
1020 				break;
1021 		}
1022 
1023 		return count;
1024 	}
1025 
1026 	return size;
1027 }
1028 
1029 static const struct nfp_xstat *
1030 nfp_net_xstats_info(const struct rte_eth_dev *dev,
1031 		uint32_t index)
1032 {
1033 	if (index >= nfp_net_xstats_size(dev)) {
1034 		PMD_DRV_LOG(ERR, "xstat index out of bounds");
1035 		return NULL;
1036 	}
1037 
1038 	return &nfp_net_xstats[index];
1039 }
1040 
1041 static uint64_t
1042 nfp_net_xstats_value(const struct rte_eth_dev *dev,
1043 		uint32_t index,
1044 		bool raw)
1045 {
1046 	uint64_t value;
1047 	uint8_t *mac_stats;
1048 	struct nfp_net_hw *hw;
1049 	struct nfp_xstat xstat;
1050 	struct rte_eth_xstat *xstats_base;
1051 	struct nfp_flower_representor *repr;
1052 
1053 	if (rte_eth_dev_is_repr(dev)) {
1054 		repr = dev->data->dev_private;
1055 		hw = repr->app_fw_flower->pf_hw;
1056 
1057 		mac_stats = repr->mac_stats;
1058 		xstats_base = repr->repr_xstats_base;
1059 	} else {
1060 		hw = dev->data->dev_private;
1061 
1062 		mac_stats = hw->mac_stats;
1063 		xstats_base = hw->eth_xstats_base;
1064 	}
1065 
1066 	xstat = nfp_net_xstats[index];
1067 
1068 	if (xstat.group == NFP_XSTAT_GROUP_MAC)
1069 		value = nn_readq(mac_stats + xstat.offset);
1070 	else
1071 		value = nn_cfg_readq(&hw->super, xstat.offset);
1072 
1073 	if (raw)
1074 		return value;
1075 
1076 	/*
1077 	 * A baseline value of each statistic counter is recorded when stats are "reset".
1078 	 * Thus, the value returned by this function need to be decremented by this
1079 	 * baseline value. The result is the count of this statistic since the last time
1080 	 * it was "reset".
1081 	 */
1082 	return value - xstats_base[index].value;
1083 }
1084 
1085 /* NOTE: All callers ensure dev is always set. */
1086 int
1087 nfp_net_xstats_get_names(struct rte_eth_dev *dev,
1088 		struct rte_eth_xstat_name *xstats_names,
1089 		unsigned int size)
1090 {
1091 	uint32_t id;
1092 	uint32_t nfp_size;
1093 	uint32_t read_size;
1094 
1095 	nfp_size = nfp_net_xstats_size(dev);
1096 
1097 	if (xstats_names == NULL)
1098 		return nfp_size;
1099 
1100 	/* Read at most NFP xstats number of names. */
1101 	read_size = RTE_MIN(size, nfp_size);
1102 
1103 	for (id = 0; id < read_size; id++)
1104 		rte_strlcpy(xstats_names[id].name, nfp_net_xstats[id].name,
1105 				RTE_ETH_XSTATS_NAME_SIZE);
1106 
1107 	return read_size;
1108 }
1109 
1110 /* NOTE: All callers ensure dev is always set. */
1111 int
1112 nfp_net_xstats_get(struct rte_eth_dev *dev,
1113 		struct rte_eth_xstat *xstats,
1114 		unsigned int n)
1115 {
1116 	uint32_t id;
1117 	uint32_t nfp_size;
1118 	uint32_t read_size;
1119 
1120 	nfp_size = nfp_net_xstats_size(dev);
1121 
1122 	if (xstats == NULL)
1123 		return nfp_size;
1124 
1125 	/* Read at most NFP xstats number of values. */
1126 	read_size = RTE_MIN(n, nfp_size);
1127 
1128 	for (id = 0; id < read_size; id++) {
1129 		xstats[id].id = id;
1130 		xstats[id].value = nfp_net_xstats_value(dev, id, false);
1131 	}
1132 
1133 	return read_size;
1134 }
1135 
1136 /*
1137  * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
1138  * ids, xstats_names and size are valid, and non-NULL.
1139  */
1140 int
1141 nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
1142 		const uint64_t *ids,
1143 		struct rte_eth_xstat_name *xstats_names,
1144 		unsigned int size)
1145 {
1146 	uint32_t i;
1147 	uint32_t read_size;
1148 
1149 	/* Read at most NFP xstats number of names. */
1150 	read_size = RTE_MIN(size, nfp_net_xstats_size(dev));
1151 
1152 	for (i = 0; i < read_size; i++) {
1153 		const struct nfp_xstat *xstat;
1154 
1155 		/* Make sure ID is valid for device. */
1156 		xstat = nfp_net_xstats_info(dev, ids[i]);
1157 		if (xstat == NULL)
1158 			return -EINVAL;
1159 
1160 		rte_strlcpy(xstats_names[i].name, xstat->name,
1161 				RTE_ETH_XSTATS_NAME_SIZE);
1162 	}
1163 
1164 	return read_size;
1165 }
1166 
1167 /*
1168  * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
1169  * ids, values and n are valid, and non-NULL.
1170  */
1171 int
1172 nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
1173 		const uint64_t *ids,
1174 		uint64_t *values,
1175 		unsigned int n)
1176 {
1177 	uint32_t i;
1178 	uint32_t read_size;
1179 
1180 	/* Read at most NFP xstats number of values. */
1181 	read_size = RTE_MIN(n, nfp_net_xstats_size(dev));
1182 
1183 	for (i = 0; i < read_size; i++) {
1184 		const struct nfp_xstat *xstat;
1185 
1186 		/* Make sure index is valid for device. */
1187 		xstat = nfp_net_xstats_info(dev, ids[i]);
1188 		if (xstat == NULL)
1189 			return -EINVAL;
1190 
1191 		values[i] = nfp_net_xstats_value(dev, ids[i], false);
1192 	}
1193 
1194 	return read_size;
1195 }
1196 
1197 int
1198 nfp_net_xstats_reset(struct rte_eth_dev *dev)
1199 {
1200 	uint32_t id;
1201 	uint32_t read_size;
1202 	struct nfp_net_hw *hw;
1203 	struct rte_eth_xstat *xstats_base;
1204 	struct nfp_flower_representor *repr;
1205 
1206 	read_size = nfp_net_xstats_size(dev);
1207 
1208 	if (rte_eth_dev_is_repr(dev)) {
1209 		repr = dev->data->dev_private;
1210 		xstats_base = repr->repr_xstats_base;
1211 	} else {
1212 		hw = dev->data->dev_private;
1213 		xstats_base = hw->eth_xstats_base;
1214 	}
1215 
1216 	for (id = 0; id < read_size; id++) {
1217 		xstats_base[id].id = id;
1218 		xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
1219 	}
1220 
1221 	/* Successfully reset xstats, now call function to reset basic stats. */
1222 	if (rte_eth_dev_is_repr(dev))
1223 		return nfp_flower_repr_stats_reset(dev);
1224 	else
1225 		return nfp_net_stats_reset(dev);
1226 }
1227 
1228 void
1229 nfp_net_rx_desc_limits(struct nfp_net_hw_priv *hw_priv,
1230 		uint16_t *min_rx_desc,
1231 		uint16_t *max_rx_desc)
1232 {
1233 	*max_rx_desc = hw_priv->dev_info->max_qc_size;
1234 	*min_rx_desc = hw_priv->dev_info->min_qc_size;
1235 }
1236 
1237 void
1238 nfp_net_tx_desc_limits(struct nfp_net_hw *hw,
1239 		struct nfp_net_hw_priv *hw_priv,
1240 		uint16_t *min_tx_desc,
1241 		uint16_t *max_tx_desc)
1242 {
1243 	uint16_t tx_dpp;
1244 
1245 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
1246 		tx_dpp = NFD3_TX_DESC_PER_PKT;
1247 	else
1248 		tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT;
1249 
1250 	*max_tx_desc = hw_priv->dev_info->max_qc_size / tx_dpp;
1251 	*min_tx_desc = hw_priv->dev_info->min_qc_size / tx_dpp;
1252 }
1253 
1254 int
1255 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1256 {
1257 	uint32_t cap;
1258 	uint32_t cap_extend;
1259 	uint16_t min_rx_desc;
1260 	uint16_t max_rx_desc;
1261 	uint16_t min_tx_desc;
1262 	uint16_t max_tx_desc;
1263 	struct nfp_net_hw *hw;
1264 	struct nfp_net_hw_priv *hw_priv;
1265 
1266 	hw = nfp_net_get_hw(dev);
1267 	hw_priv = dev->process_private;
1268 	if (hw_priv == NULL)
1269 		return -EINVAL;
1270 
1271 	nfp_net_rx_desc_limits(hw_priv, &min_rx_desc, &max_rx_desc);
1272 	nfp_net_tx_desc_limits(hw, hw_priv, &min_tx_desc, &max_tx_desc);
1273 
1274 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1275 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1276 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1277 	/*
1278 	 * The maximum rx packet length is set to the maximum layer 3 MTU,
1279 	 * plus layer 2, CRC and VLAN headers.
1280 	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
1281 	 * which was set by the firmware loaded onto the card.
1282 	 */
1283 	dev_info->max_rx_pktlen = hw->max_mtu + NFP_ETH_OVERHEAD;
1284 	dev_info->max_mtu = hw->max_mtu;
1285 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1286 	/* Next should change when PF support is implemented */
1287 	dev_info->max_mac_addrs = 1;
1288 
1289 	cap = hw->super.cap;
1290 
1291 	if ((cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
1292 		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1293 
1294 	if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
1295 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
1296 
1297 	if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
1298 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1299 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1300 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
1301 
1302 	if ((cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
1303 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1304 
1305 	if ((cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
1306 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1307 				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1308 				RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1309 
1310 	if ((cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
1311 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1312 		if ((cap & NFP_NET_CFG_CTRL_USO) != 0)
1313 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_TSO;
1314 		if ((cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
1315 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
1316 	}
1317 
1318 	if ((cap & NFP_NET_CFG_CTRL_GATHER) != 0)
1319 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1320 
1321 	cap_extend = hw->super.cap_ext;
1322 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) {
1323 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1324 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1325 	}
1326 
1327 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1328 		.rx_thresh = {
1329 			.pthresh = DEFAULT_RX_PTHRESH,
1330 			.hthresh = DEFAULT_RX_HTHRESH,
1331 			.wthresh = DEFAULT_RX_WTHRESH,
1332 		},
1333 		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1334 		.rx_drop_en = 0,
1335 	};
1336 
1337 	dev_info->default_txconf = (struct rte_eth_txconf) {
1338 		.tx_thresh = {
1339 			.pthresh = DEFAULT_TX_PTHRESH,
1340 			.hthresh = DEFAULT_TX_HTHRESH,
1341 			.wthresh = DEFAULT_TX_WTHRESH,
1342 		},
1343 		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1344 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1345 	};
1346 
1347 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1348 		.nb_max = max_rx_desc,
1349 		.nb_min = min_rx_desc,
1350 		.nb_align = NFP_ALIGN_RING_DESC,
1351 	};
1352 
1353 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1354 		.nb_max = max_tx_desc,
1355 		.nb_min = min_tx_desc,
1356 		.nb_align = NFP_ALIGN_RING_DESC,
1357 		.nb_seg_max = NFP_TX_MAX_SEG,
1358 		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1359 	};
1360 
1361 	if ((cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
1362 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1363 		dev_info->flow_type_rss_offloads = NFP_NET_RSS_CAP;
1364 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1365 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1366 	}
1367 
1368 	/* Only PF supports getting speed capability. */
1369 	if (hw_priv->pf_dev != NULL)
1370 		dev_info->speed_capa = hw_priv->pf_dev->speed_capa;
1371 
1372 	return 0;
1373 }
1374 
1375 int
1376 nfp_net_common_init(struct rte_pci_device *pci_dev,
1377 		struct nfp_net_hw *hw)
1378 {
1379 	const int stride = 4;
1380 
1381 	hw->device_id = pci_dev->id.device_id;
1382 	hw->vendor_id = pci_dev->id.vendor_id;
1383 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1384 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1385 
1386 	hw->max_rx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_RXRINGS);
1387 	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
1388 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
1389 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
1390 				"pairs for use", pci_dev->name);
1391 		return -ENODEV;
1392 	}
1393 
1394 	nfp_net_cfg_read_version(hw);
1395 	if (!nfp_net_is_valid_nfd_version(hw->ver))
1396 		return -EINVAL;
1397 
1398 	if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0)
1399 		return -ENODEV;
1400 
1401 	/* Get some of the read-only fields from the config BAR */
1402 	hw->super.cap = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP);
1403 	hw->super.cap_ext = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP_WORD1);
1404 	hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
1405 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
1406 
1407 	nfp_net_meta_init_format(hw);
1408 
1409 	/* Read the Rx offset configured from firmware */
1410 	if (hw->ver.major < 2)
1411 		hw->rx_offset = NFP_NET_RX_OFFSET;
1412 	else
1413 		hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET);
1414 
1415 	hw->super.ctrl = 0;
1416 	hw->stride_rx = stride;
1417 	hw->stride_tx = stride;
1418 
1419 	return 0;
1420 }
1421 
1422 const uint32_t *
1423 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1424 {
1425 	struct nfp_net_hw *net_hw;
1426 	static const uint32_t ptypes[] = {
1427 		RTE_PTYPE_L2_ETHER,
1428 		RTE_PTYPE_L3_IPV4,
1429 		RTE_PTYPE_L3_IPV4_EXT,
1430 		RTE_PTYPE_L3_IPV6,
1431 		RTE_PTYPE_L3_IPV6_EXT,
1432 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1433 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1434 		RTE_PTYPE_L4_TCP,
1435 		RTE_PTYPE_L4_UDP,
1436 		RTE_PTYPE_L4_FRAG,
1437 		RTE_PTYPE_L4_NONFRAG,
1438 		RTE_PTYPE_L4_ICMP,
1439 		RTE_PTYPE_L4_SCTP,
1440 		RTE_PTYPE_TUNNEL_VXLAN,
1441 		RTE_PTYPE_TUNNEL_NVGRE,
1442 		RTE_PTYPE_TUNNEL_GENEVE,
1443 		RTE_PTYPE_INNER_L2_ETHER,
1444 		RTE_PTYPE_INNER_L3_IPV4,
1445 		RTE_PTYPE_INNER_L3_IPV4_EXT,
1446 		RTE_PTYPE_INNER_L3_IPV6,
1447 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1448 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1449 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1450 		RTE_PTYPE_INNER_L4_TCP,
1451 		RTE_PTYPE_INNER_L4_UDP,
1452 		RTE_PTYPE_INNER_L4_FRAG,
1453 		RTE_PTYPE_INNER_L4_NONFRAG,
1454 		RTE_PTYPE_INNER_L4_ICMP,
1455 		RTE_PTYPE_INNER_L4_SCTP,
1456 	};
1457 
1458 	if (dev->rx_pkt_burst == NULL)
1459 		return NULL;
1460 
1461 	net_hw = dev->data->dev_private;
1462 	if ((net_hw->super.ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1463 		return NULL;
1464 
1465 	*no_of_elements = RTE_DIM(ptypes);
1466 	return ptypes;
1467 }
1468 
1469 int
1470 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
1471 		uint16_t queue_id)
1472 {
1473 	uint16_t base = 0;
1474 	struct nfp_net_hw *hw;
1475 	struct rte_pci_device *pci_dev;
1476 
1477 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1478 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1479 		base = 1;
1480 
1481 	/* Make sure all updates are written before un-masking */
1482 	rte_wmb();
1483 
1484 	hw = nfp_net_get_hw(dev);
1485 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id),
1486 			NFP_NET_CFG_ICR_UNMASKED);
1487 	return 0;
1488 }
1489 
1490 int
1491 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
1492 		uint16_t queue_id)
1493 {
1494 	uint16_t base = 0;
1495 	struct nfp_net_hw *hw;
1496 	struct rte_pci_device *pci_dev;
1497 
1498 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1499 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1500 		base = 1;
1501 
1502 	/* Make sure all updates are written before un-masking */
1503 	rte_wmb();
1504 
1505 	hw = nfp_net_get_hw(dev);
1506 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
1507 
1508 	return 0;
1509 }
1510 
1511 static void
1512 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1513 {
1514 	struct rte_eth_link link;
1515 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1516 
1517 	rte_eth_linkstatus_get(dev, &link);
1518 	if (link.link_status != 0)
1519 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1520 				dev->data->port_id, link.link_speed,
1521 				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1522 				"full-duplex" : "half-duplex");
1523 	else
1524 		PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
1525 
1526 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1527 			pci_dev->addr.domain, pci_dev->addr.bus,
1528 			pci_dev->addr.devid, pci_dev->addr.function);
1529 }
1530 
1531 /*
1532  * Unmask an interrupt
1533  *
1534  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1535  * clear the ICR for the entry.
1536  */
1537 void
1538 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1539 {
1540 	struct nfp_net_hw *hw;
1541 	struct rte_pci_device *pci_dev;
1542 
1543 	hw = nfp_net_get_hw(dev);
1544 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1545 
1546 	/* Make sure all updates are written before un-masking */
1547 	rte_wmb();
1548 
1549 	if ((hw->super.ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
1550 		/* If MSI-X auto-masking is used, clear the entry */
1551 		rte_intr_ack(pci_dev->intr_handle);
1552 	} else {
1553 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1554 				NFP_NET_CFG_ICR_UNMASKED);
1555 	}
1556 }
1557 
1558 /**
1559  * Interrupt handler which shall be registered for alarm callback for delayed
1560  * handling specific interrupt to wait for the stable nic state. As the NIC
1561  * interrupt state is not stable for nfp after link is just down, it needs
1562  * to wait 4 seconds to get the stable status.
1563  *
1564  * @param param
1565  *   The address of parameter (struct rte_eth_dev *)
1566  */
1567 void
1568 nfp_net_dev_interrupt_delayed_handler(void *param)
1569 {
1570 	struct rte_eth_dev *dev = param;
1571 
1572 	nfp_net_link_update(dev, 0);
1573 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1574 
1575 	nfp_net_dev_link_status_print(dev);
1576 
1577 	/* Unmasking */
1578 	nfp_net_irq_unmask(dev);
1579 }
1580 
1581 void
1582 nfp_net_dev_interrupt_handler(void *param)
1583 {
1584 	int64_t timeout;
1585 	struct rte_eth_link link;
1586 	struct rte_eth_dev *dev = param;
1587 
1588 	PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1589 
1590 	rte_eth_linkstatus_get(dev, &link);
1591 
1592 	nfp_net_link_update(dev, 0);
1593 
1594 	/* Likely to up */
1595 	if (link.link_status == 0) {
1596 		/* Handle it 1 sec later, wait it being stable */
1597 		timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1598 	} else {  /* Likely to down */
1599 		/* Handle it 4 sec later, wait it being stable */
1600 		timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1601 	}
1602 
1603 	if (rte_eal_alarm_set(timeout * 1000,
1604 			nfp_net_dev_interrupt_delayed_handler,
1605 			(void *)dev) != 0) {
1606 		PMD_INIT_LOG(ERR, "Error setting alarm");
1607 		/* Unmasking */
1608 		nfp_net_irq_unmask(dev);
1609 	}
1610 }
1611 
1612 int
1613 nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
1614 		uint16_t mtu)
1615 {
1616 	struct nfp_net_hw *hw;
1617 
1618 	hw = nfp_net_get_hw(dev);
1619 
1620 	/* MTU setting is forbidden if port is started */
1621 	if (dev->data->dev_started) {
1622 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1623 				dev->data->port_id);
1624 		return -EBUSY;
1625 	}
1626 
1627 	/* MTU larger than current mbufsize not supported */
1628 	if (mtu > hw->flbufsz) {
1629 		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported",
1630 				mtu, hw->flbufsz);
1631 		return -ERANGE;
1632 	}
1633 
1634 	/* Writing to configuration space */
1635 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, mtu);
1636 
1637 	hw->mtu = mtu;
1638 
1639 	return 0;
1640 }
1641 
1642 int
1643 nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
1644 		int mask)
1645 {
1646 	int ret;
1647 	uint32_t update;
1648 	uint32_t new_ctrl;
1649 	struct nfp_hw *hw;
1650 	uint64_t rx_offload;
1651 	struct nfp_net_hw *net_hw;
1652 	uint32_t rxvlan_ctrl = 0;
1653 
1654 	net_hw = nfp_net_get_hw(dev);
1655 	hw = &net_hw->super;
1656 	rx_offload = dev->data->dev_conf.rxmode.offloads;
1657 	new_ctrl = hw->ctrl;
1658 
1659 	/* VLAN stripping setting */
1660 	if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
1661 		nfp_net_enable_rxvlan_cap(net_hw, &rxvlan_ctrl);
1662 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
1663 			new_ctrl |= rxvlan_ctrl;
1664 		else
1665 			new_ctrl &= ~rxvlan_ctrl;
1666 	}
1667 
1668 	/* QinQ stripping setting */
1669 	if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
1670 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
1671 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1672 		else
1673 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1674 	}
1675 
1676 	if (new_ctrl == hw->ctrl)
1677 		return 0;
1678 
1679 	update = NFP_NET_CFG_UPDATE_GEN;
1680 
1681 	ret = nfp_reconfig(hw, new_ctrl, update);
1682 	if (ret != 0)
1683 		return ret;
1684 
1685 	hw->ctrl = new_ctrl;
1686 
1687 	return 0;
1688 }
1689 
1690 static int
1691 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1692 		struct rte_eth_rss_reta_entry64 *reta_conf,
1693 		uint16_t reta_size)
1694 {
1695 	uint16_t i;
1696 	uint16_t j;
1697 	uint16_t idx;
1698 	uint8_t mask;
1699 	uint32_t reta;
1700 	uint16_t shift;
1701 	struct nfp_hw *hw;
1702 	struct nfp_net_hw *net_hw;
1703 
1704 	net_hw = nfp_net_get_hw(dev);
1705 	hw = &net_hw->super;
1706 
1707 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1708 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
1709 				" doesn't match hardware can supported (%d)",
1710 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1711 		return -EINVAL;
1712 	}
1713 
1714 	/*
1715 	 * Update Redirection Table. There are 128 8bit-entries which can be
1716 	 * manage as 32 32bit-entries.
1717 	 */
1718 	for (i = 0; i < reta_size; i += 4) {
1719 		/* Handling 4 RSS entries per loop */
1720 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1721 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1722 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1723 		if (mask == 0)
1724 			continue;
1725 
1726 		reta = 0;
1727 
1728 		/* If all 4 entries were set, don't need read RETA register */
1729 		if (mask != 0xF)
1730 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1731 
1732 		for (j = 0; j < 4; j++) {
1733 			if ((mask & (0x1 << j)) == 0)
1734 				continue;
1735 
1736 			/* Clearing the entry bits */
1737 			if (mask != 0xF)
1738 				reta &= ~(0xFF << (8 * j));
1739 
1740 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1741 		}
1742 
1743 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
1744 	}
1745 
1746 	return 0;
1747 }
1748 
1749 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1750 int
1751 nfp_net_reta_update(struct rte_eth_dev *dev,
1752 		struct rte_eth_rss_reta_entry64 *reta_conf,
1753 		uint16_t reta_size)
1754 {
1755 	int ret;
1756 	uint32_t update;
1757 	struct nfp_hw *hw;
1758 	struct nfp_net_hw *net_hw;
1759 
1760 	net_hw = nfp_net_get_hw(dev);
1761 	hw = &net_hw->super;
1762 
1763 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1764 		return -EINVAL;
1765 
1766 	ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1767 	if (ret != 0)
1768 		return ret;
1769 
1770 	update = NFP_NET_CFG_UPDATE_RSS;
1771 
1772 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1773 		return -EIO;
1774 
1775 	return 0;
1776 }
1777 
1778 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1779 int
1780 nfp_net_reta_query(struct rte_eth_dev *dev,
1781 		struct rte_eth_rss_reta_entry64 *reta_conf,
1782 		uint16_t reta_size)
1783 {
1784 	uint16_t i;
1785 	uint16_t j;
1786 	uint16_t idx;
1787 	uint8_t mask;
1788 	uint32_t reta;
1789 	uint16_t shift;
1790 	struct nfp_hw *hw;
1791 	struct nfp_net_hw *net_hw;
1792 
1793 	net_hw = nfp_net_get_hw(dev);
1794 	hw = &net_hw->super;
1795 
1796 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1797 		return -EINVAL;
1798 
1799 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1800 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)"
1801 				" doesn't match hardware can supported (%d)",
1802 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1803 		return -EINVAL;
1804 	}
1805 
1806 	/*
1807 	 * Reading Redirection Table. There are 128 8bit-entries which can be
1808 	 * manage as 32 32bit-entries.
1809 	 */
1810 	for (i = 0; i < reta_size; i += 4) {
1811 		/* Handling 4 RSS entries per loop */
1812 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1813 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1814 		mask = (reta_conf[idx].mask >> shift) & 0xF;
1815 
1816 		if (mask == 0)
1817 			continue;
1818 
1819 		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
1820 		for (j = 0; j < 4; j++) {
1821 			if ((mask & (0x1 << j)) == 0)
1822 				continue;
1823 
1824 			reta_conf[idx].reta[shift + j] =
1825 					(uint8_t)((reta >> (8 * j)) & 0xF);
1826 		}
1827 	}
1828 
1829 	return 0;
1830 }
1831 
1832 static int
1833 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1834 		struct rte_eth_rss_conf *rss_conf)
1835 {
1836 	uint8_t i;
1837 	uint8_t key;
1838 	uint64_t rss_hf;
1839 	struct nfp_hw *hw;
1840 	struct nfp_net_hw *net_hw;
1841 	uint32_t cfg_rss_ctrl = 0;
1842 
1843 	net_hw = nfp_net_get_hw(dev);
1844 	hw = &net_hw->super;
1845 
1846 	/* Writing the key byte by byte */
1847 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1848 		memcpy(&key, &rss_conf->rss_key[i], 1);
1849 		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1850 	}
1851 
1852 	rss_hf = rss_conf->rss_hf;
1853 
1854 	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
1855 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1856 
1857 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1858 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1859 
1860 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
1861 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1862 
1863 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0)
1864 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP;
1865 
1866 	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
1867 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1868 
1869 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
1870 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1871 
1872 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
1873 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1874 
1875 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0)
1876 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP;
1877 
1878 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1879 
1880 	if (rte_eth_dev_is_repr(dev))
1881 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_CRC32;
1882 	else
1883 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1884 
1885 	/* Configuring where to apply the RSS hash */
1886 	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1887 
1888 	/* Writing the key size */
1889 	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1890 
1891 	return 0;
1892 }
1893 
1894 int
1895 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1896 		struct rte_eth_rss_conf *rss_conf)
1897 {
1898 	uint32_t update;
1899 	uint64_t rss_hf;
1900 	struct nfp_hw *hw;
1901 	struct nfp_net_hw *net_hw;
1902 
1903 	net_hw = nfp_net_get_hw(dev);
1904 	hw = &net_hw->super;
1905 
1906 	rss_hf = rss_conf->rss_hf;
1907 
1908 	/* Checking if RSS is enabled */
1909 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
1910 		if (rss_hf != 0) {
1911 			PMD_DRV_LOG(ERR, "RSS unsupported");
1912 			return -EINVAL;
1913 		}
1914 
1915 		return 0; /* Nothing to do */
1916 	}
1917 
1918 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1919 		PMD_DRV_LOG(ERR, "RSS hash key too long");
1920 		return -EINVAL;
1921 	}
1922 
1923 	nfp_net_rss_hash_write(dev, rss_conf);
1924 
1925 	update = NFP_NET_CFG_UPDATE_RSS;
1926 
1927 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1928 		return -EIO;
1929 
1930 	return 0;
1931 }
1932 
1933 int
1934 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1935 		struct rte_eth_rss_conf *rss_conf)
1936 {
1937 	uint8_t i;
1938 	uint8_t key;
1939 	uint64_t rss_hf;
1940 	struct nfp_hw *hw;
1941 	uint32_t cfg_rss_ctrl;
1942 	struct nfp_net_hw *net_hw;
1943 
1944 	net_hw = nfp_net_get_hw(dev);
1945 	hw = &net_hw->super;
1946 
1947 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1948 		return -EINVAL;
1949 
1950 	rss_hf = rss_conf->rss_hf;
1951 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1952 
1953 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
1954 		rss_hf |= RTE_ETH_RSS_IPV4;
1955 
1956 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0)
1957 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1958 
1959 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0)
1960 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1961 
1962 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0)
1963 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1964 
1965 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0)
1966 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1967 
1968 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0)
1969 		rss_hf |= RTE_ETH_RSS_IPV6;
1970 
1971 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0)
1972 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP;
1973 
1974 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0)
1975 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
1976 
1977 	/* Propagate current RSS hash functions to caller */
1978 	rss_conf->rss_hf = rss_hf;
1979 
1980 	/* Reading the key size */
1981 	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1982 
1983 	/* Reading the key byte a byte */
1984 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1985 		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1986 		memcpy(&rss_conf->rss_key[i], &key, 1);
1987 	}
1988 
1989 	return 0;
1990 }
1991 
1992 int
1993 nfp_net_rss_config_default(struct rte_eth_dev *dev)
1994 {
1995 	int ret;
1996 	uint8_t i;
1997 	uint8_t j;
1998 	uint16_t queue = 0;
1999 	struct rte_eth_conf *dev_conf;
2000 	struct rte_eth_rss_conf rss_conf;
2001 	uint16_t rx_queues = dev->data->nb_rx_queues;
2002 	struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2003 
2004 	nfp_reta_conf[0].mask = ~0x0;
2005 	nfp_reta_conf[1].mask = ~0x0;
2006 
2007 	for (i = 0; i < 0x40; i += 8) {
2008 		for (j = i; j < (i + 8); j++) {
2009 			nfp_reta_conf[0].reta[j] = queue;
2010 			nfp_reta_conf[1].reta[j] = queue++;
2011 			queue %= rx_queues;
2012 		}
2013 	}
2014 
2015 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2016 	if (ret != 0)
2017 		return ret;
2018 
2019 	dev_conf = &dev->data->dev_conf;
2020 	if (dev_conf == NULL) {
2021 		PMD_DRV_LOG(ERR, "Wrong rss conf");
2022 		return -EINVAL;
2023 	}
2024 
2025 	rss_conf = dev_conf->rx_adv_conf.rss_conf;
2026 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
2027 
2028 	return ret;
2029 }
2030 
2031 void
2032 nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
2033 {
2034 	uint16_t i;
2035 	struct nfp_net_rxq *this_rx_q;
2036 
2037 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2038 		this_rx_q = dev->data->rx_queues[i];
2039 		nfp_net_reset_rx_queue(this_rx_q);
2040 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2041 	}
2042 }
2043 
2044 void
2045 nfp_net_close_rx_queue(struct rte_eth_dev *dev)
2046 {
2047 	uint16_t i;
2048 	struct nfp_net_rxq *this_rx_q;
2049 
2050 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2051 		this_rx_q = dev->data->rx_queues[i];
2052 		nfp_net_reset_rx_queue(this_rx_q);
2053 		nfp_net_rx_queue_release(dev, i);
2054 	}
2055 }
2056 
2057 void
2058 nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
2059 {
2060 	uint16_t i;
2061 	struct nfp_net_txq *this_tx_q;
2062 
2063 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2064 		this_tx_q = dev->data->tx_queues[i];
2065 		nfp_net_reset_tx_queue(this_tx_q);
2066 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2067 	}
2068 }
2069 
2070 void
2071 nfp_net_close_tx_queue(struct rte_eth_dev *dev)
2072 {
2073 	uint16_t i;
2074 	struct nfp_net_txq *this_tx_q;
2075 
2076 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2077 		this_tx_q = dev->data->tx_queues[i];
2078 		nfp_net_reset_tx_queue(this_tx_q);
2079 		nfp_net_tx_queue_release(dev, i);
2080 	}
2081 }
2082 
2083 int
2084 nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw,
2085 		size_t idx,
2086 		uint16_t port)
2087 {
2088 	int ret;
2089 	uint32_t i;
2090 	struct nfp_hw *hw = &net_hw->super;
2091 
2092 	if (idx >= NFP_NET_N_VXLAN_PORTS) {
2093 		PMD_DRV_LOG(ERR, "The idx value is out of range.");
2094 		return -ERANGE;
2095 	}
2096 
2097 	net_hw->vxlan_ports[idx] = port;
2098 
2099 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2100 		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2101 				(net_hw->vxlan_ports[i + 1] << 16) | net_hw->vxlan_ports[i]);
2102 	}
2103 
2104 	rte_spinlock_lock(&hw->reconfig_lock);
2105 
2106 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VXLAN);
2107 	rte_wmb();
2108 
2109 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VXLAN);
2110 
2111 	rte_spinlock_unlock(&hw->reconfig_lock);
2112 
2113 	return ret;
2114 }
2115 
2116 /*
2117  * The firmware with NFD3 can not handle DMA address requiring more
2118  * than 40 bits.
2119  */
2120 int
2121 nfp_net_check_dma_mask(struct nfp_net_hw *hw,
2122 		char *name)
2123 {
2124 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
2125 			rte_mem_check_dma_mask(40) != 0) {
2126 		PMD_DRV_LOG(ERR, "Device %s can't be used: restricted dma mask to 40 bits!",
2127 				name);
2128 		return -ENODEV;
2129 	}
2130 
2131 	return 0;
2132 }
2133 
2134 int
2135 nfp_net_txrwb_alloc(struct rte_eth_dev *eth_dev)
2136 {
2137 	struct nfp_net_hw *net_hw;
2138 	char mz_name[RTE_MEMZONE_NAMESIZE];
2139 
2140 	net_hw = nfp_net_get_hw(eth_dev);
2141 	snprintf(mz_name, sizeof(mz_name), "%s_TXRWB", eth_dev->data->name);
2142 	net_hw->txrwb_mz = rte_memzone_reserve_aligned(mz_name,
2143 			net_hw->max_tx_queues * sizeof(uint64_t),
2144 			rte_socket_id(),
2145 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2146 	if (net_hw->txrwb_mz == NULL) {
2147 		PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back",
2148 				mz_name);
2149 		return -ENOMEM;
2150 	}
2151 
2152 	return 0;
2153 }
2154 
2155 void
2156 nfp_net_txrwb_free(struct rte_eth_dev *eth_dev)
2157 {
2158 	struct nfp_net_hw *net_hw;
2159 
2160 	net_hw = nfp_net_get_hw(eth_dev);
2161 	if (net_hw->txrwb_mz == NULL)
2162 		return;
2163 
2164 	rte_memzone_free(net_hw->txrwb_mz);
2165 	net_hw->txrwb_mz = NULL;
2166 }
2167 
2168 void
2169 nfp_net_cfg_read_version(struct nfp_net_hw *hw)
2170 {
2171 	union {
2172 		uint32_t whole;
2173 		struct nfp_net_fw_ver split;
2174 	} version;
2175 
2176 	version.whole = nn_cfg_readl(&hw->super, NFP_NET_CFG_VERSION);
2177 	hw->ver = version.split;
2178 }
2179 
2180 static void
2181 nfp_net_get_nsp_info(struct nfp_net_hw_priv *hw_priv,
2182 		char *nsp_version)
2183 {
2184 	struct nfp_nsp *nsp;
2185 
2186 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
2187 	if (nsp == NULL)
2188 		return;
2189 
2190 	snprintf(nsp_version, FW_VER_LEN, "%hu.%hu",
2191 			nfp_nsp_get_abi_ver_major(nsp),
2192 			nfp_nsp_get_abi_ver_minor(nsp));
2193 
2194 	nfp_nsp_close(nsp);
2195 }
2196 
2197 void
2198 nfp_net_get_fw_version(struct nfp_cpp *cpp,
2199 		uint32_t *mip_version)
2200 {
2201 	struct nfp_mip *mip;
2202 
2203 	mip = nfp_mip_open(cpp);
2204 	if (mip == NULL) {
2205 		*mip_version = 0;
2206 		return;
2207 	}
2208 
2209 	*mip_version = nfp_mip_fw_version(mip);
2210 
2211 	nfp_mip_close(mip);
2212 }
2213 
2214 static void
2215 nfp_net_get_mip_name(struct nfp_net_hw_priv *hw_priv,
2216 		char *mip_name)
2217 {
2218 	struct nfp_mip *mip;
2219 
2220 	mip = nfp_mip_open(hw_priv->pf_dev->cpp);
2221 	if (mip == NULL)
2222 		return;
2223 
2224 	snprintf(mip_name, FW_VER_LEN, "%s", nfp_mip_name(mip));
2225 
2226 	nfp_mip_close(mip);
2227 }
2228 
2229 static void
2230 nfp_net_get_app_name(struct nfp_net_hw_priv *hw_priv,
2231 		char *app_name)
2232 {
2233 	switch (hw_priv->pf_dev->app_fw_id) {
2234 	case NFP_APP_FW_CORE_NIC:
2235 		snprintf(app_name, FW_VER_LEN, "%s", "nic");
2236 		break;
2237 	case NFP_APP_FW_FLOWER_NIC:
2238 		snprintf(app_name, FW_VER_LEN, "%s", "flower");
2239 		break;
2240 	default:
2241 		snprintf(app_name, FW_VER_LEN, "%s", "unknown");
2242 		break;
2243 	}
2244 }
2245 
2246 int
2247 nfp_net_firmware_version_get(struct rte_eth_dev *dev,
2248 		char *fw_version,
2249 		size_t fw_size)
2250 {
2251 	struct nfp_net_hw *hw;
2252 	struct nfp_net_hw_priv *hw_priv;
2253 	char app_name[FW_VER_LEN] = {0};
2254 	char mip_name[FW_VER_LEN] = {0};
2255 	char nsp_version[FW_VER_LEN] = {0};
2256 	char vnic_version[FW_VER_LEN] = {0};
2257 
2258 	if (fw_size < FW_VER_LEN)
2259 		return FW_VER_LEN;
2260 
2261 	hw = nfp_net_get_hw(dev);
2262 	hw_priv = dev->process_private;
2263 
2264 	if (hw->fw_version[0] != 0) {
2265 		snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2266 		return 0;
2267 	}
2268 
2269 	if (!rte_eth_dev_is_repr(dev)) {
2270 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
2271 			hw->ver.extend, hw->ver.class,
2272 			hw->ver.major, hw->ver.minor);
2273 	} else {
2274 		snprintf(vnic_version, FW_VER_LEN, "*");
2275 	}
2276 
2277 	nfp_net_get_nsp_info(hw_priv, nsp_version);
2278 	nfp_net_get_mip_name(hw_priv, mip_name);
2279 	nfp_net_get_app_name(hw_priv, app_name);
2280 
2281 	if (nsp_version[0] == 0 || mip_name[0] == 0) {
2282 		snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
2283 			vnic_version, nsp_version, mip_name, app_name);
2284 		return 0;
2285 	}
2286 
2287 	snprintf(hw->fw_version, FW_VER_LEN, "%s %s %s %s",
2288 			vnic_version, nsp_version, mip_name, app_name);
2289 
2290 	snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2291 
2292 	return 0;
2293 }
2294 
2295 bool
2296 nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
2297 {
2298 	uint8_t nfd_version = version.extend;
2299 
2300 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFD3)
2301 		return true;
2302 
2303 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) {
2304 		if (version.major < 5) {
2305 			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
2306 					version.major);
2307 			return false;
2308 		}
2309 
2310 		return true;
2311 	}
2312 
2313 	return false;
2314 }
2315 
2316 /* Disable rx and tx functions to allow for reconfiguring. */
2317 int
2318 nfp_net_stop(struct rte_eth_dev *dev)
2319 {
2320 	struct nfp_net_hw *hw;
2321 	struct nfp_net_hw_priv *hw_priv;
2322 
2323 	hw = nfp_net_get_hw(dev);
2324 	hw_priv = dev->process_private;
2325 
2326 	nfp_net_disable_queues(dev);
2327 
2328 	/* Clear queues */
2329 	nfp_net_stop_tx_queue(dev);
2330 	nfp_net_stop_rx_queue(dev);
2331 
2332 	nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0);
2333 
2334 	return 0;
2335 }
2336 
2337 static enum rte_eth_fc_mode
2338 nfp_net_get_pause_mode(struct nfp_eth_table_port *eth_port)
2339 {
2340 	enum rte_eth_fc_mode mode;
2341 
2342 	if (eth_port->rx_pause_enabled) {
2343 		if (eth_port->tx_pause_enabled)
2344 			mode = RTE_ETH_FC_FULL;
2345 		else
2346 			mode = RTE_ETH_FC_RX_PAUSE;
2347 	} else {
2348 		if (eth_port->tx_pause_enabled)
2349 			mode = RTE_ETH_FC_TX_PAUSE;
2350 		else
2351 			mode = RTE_ETH_FC_NONE;
2352 	}
2353 
2354 	return mode;
2355 }
2356 
2357 int
2358 nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
2359 		struct rte_eth_fc_conf *fc_conf)
2360 {
2361 	struct nfp_net_hw_priv *hw_priv;
2362 	struct nfp_eth_table *nfp_eth_table;
2363 	struct nfp_eth_table_port *eth_port;
2364 
2365 	hw_priv = dev->process_private;
2366 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2367 		return -EINVAL;
2368 
2369 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2370 	eth_port = &nfp_eth_table->ports[dev->data->port_id];
2371 
2372 	/* Currently only RX/TX switch are supported */
2373 	fc_conf->mode = nfp_net_get_pause_mode(eth_port);
2374 
2375 	return 0;
2376 }
2377 
2378 static int
2379 nfp_net_pause_frame_set(struct nfp_net_hw_priv *hw_priv,
2380 		struct nfp_eth_table_port *eth_port,
2381 		enum rte_eth_fc_mode mode)
2382 {
2383 	int err;
2384 	bool flag;
2385 	struct nfp_nsp *nsp;
2386 
2387 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
2388 	if (nsp == NULL) {
2389 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
2390 		return -EIO;
2391 	}
2392 
2393 	flag = (mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2394 	err = nfp_eth_set_tx_pause(nsp, flag);
2395 	if (err != 0) {
2396 		PMD_DRV_LOG(ERR, "Failed to configure TX pause frame.");
2397 		nfp_eth_config_cleanup_end(nsp);
2398 		return err;
2399 	}
2400 
2401 	flag = (mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2402 	err = nfp_eth_set_rx_pause(nsp, flag);
2403 	if (err != 0) {
2404 		PMD_DRV_LOG(ERR, "Failed to configure RX pause frame.");
2405 		nfp_eth_config_cleanup_end(nsp);
2406 		return err;
2407 	}
2408 
2409 	err = nfp_eth_config_commit_end(nsp);
2410 	if (err != 0) {
2411 		PMD_DRV_LOG(ERR, "Failed to configure pause frame.");
2412 		return err;
2413 	}
2414 
2415 	return 0;
2416 }
2417 
2418 int
2419 nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
2420 		struct rte_eth_fc_conf *fc_conf)
2421 {
2422 	int ret;
2423 	struct nfp_net_hw *net_hw;
2424 	enum rte_eth_fc_mode set_mode;
2425 	struct nfp_net_hw_priv *hw_priv;
2426 	enum rte_eth_fc_mode original_mode;
2427 	struct nfp_eth_table *nfp_eth_table;
2428 	struct nfp_eth_table_port *eth_port;
2429 
2430 	net_hw = nfp_net_get_hw(dev);
2431 	hw_priv = dev->process_private;
2432 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2433 		return -EINVAL;
2434 
2435 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2436 	eth_port = &nfp_eth_table->ports[net_hw->idx];
2437 
2438 	original_mode = nfp_net_get_pause_mode(eth_port);
2439 	set_mode = fc_conf->mode;
2440 
2441 	if (set_mode == original_mode)
2442 		return 0;
2443 
2444 	ret = nfp_net_pause_frame_set(hw_priv, eth_port, set_mode);
2445 	if (ret != 0)
2446 		return ret;
2447 
2448 	/* Update eth_table after modifying RX/TX pause frame mode. */
2449 	eth_port->tx_pause_enabled = (set_mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2450 	eth_port->rx_pause_enabled = (set_mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2451 
2452 	return 0;
2453 }
2454 
2455 int
2456 nfp_net_fec_get_capability(struct rte_eth_dev *dev,
2457 		struct rte_eth_fec_capa *speed_fec_capa,
2458 		__rte_unused unsigned int num)
2459 {
2460 	uint16_t speed;
2461 	struct nfp_net_hw *hw;
2462 	uint32_t supported_fec;
2463 	struct nfp_net_hw_priv *hw_priv;
2464 	struct nfp_eth_table *nfp_eth_table;
2465 	struct nfp_eth_table_port *eth_port;
2466 
2467 	hw = nfp_net_get_hw(dev);
2468 	hw_priv = dev->process_private;
2469 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2470 		return -EINVAL;
2471 
2472 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2473 	eth_port = &nfp_eth_table->ports[hw->idx];
2474 
2475 	speed = eth_port->speed;
2476 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2477 	if (speed == 0 || supported_fec == 0) {
2478 		PMD_DRV_LOG(ERR, "FEC modes supported or Speed is invalid.");
2479 		return -EINVAL;
2480 	}
2481 
2482 	if (speed_fec_capa == NULL)
2483 		return NFP_FEC_CAPA_ENTRY_NUM;
2484 
2485 	speed_fec_capa->speed = speed;
2486 
2487 	if ((supported_fec & NFP_FEC_AUTO) != 0)
2488 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2489 	if ((supported_fec & NFP_FEC_BASER) != 0)
2490 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2491 	if ((supported_fec & NFP_FEC_REED_SOLOMON) != 0)
2492 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2493 	if ((supported_fec & NFP_FEC_DISABLED) != 0)
2494 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2495 
2496 	return NFP_FEC_CAPA_ENTRY_NUM;
2497 }
2498 
2499 static uint32_t
2500 nfp_net_fec_nfp_to_rte(enum nfp_eth_fec fec)
2501 {
2502 	switch (fec) {
2503 	case NFP_FEC_AUTO_BIT:
2504 		return RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2505 	case NFP_FEC_BASER_BIT:
2506 		return RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2507 	case NFP_FEC_REED_SOLOMON_BIT:
2508 		return RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2509 	case NFP_FEC_DISABLED_BIT:
2510 		return RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2511 	default:
2512 		PMD_DRV_LOG(ERR, "FEC mode is invalid.");
2513 		return 0;
2514 	}
2515 }
2516 
2517 int
2518 nfp_net_fec_get(struct rte_eth_dev *dev,
2519 		uint32_t *fec_capa)
2520 {
2521 	struct nfp_net_hw *hw;
2522 	struct nfp_net_hw_priv *hw_priv;
2523 	struct nfp_eth_table *nfp_eth_table;
2524 	struct nfp_eth_table_port *eth_port;
2525 
2526 	hw = nfp_net_get_hw(dev);
2527 	hw_priv = dev->process_private;
2528 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2529 		return -EINVAL;
2530 
2531 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN) {
2532 		nfp_eth_table = nfp_eth_read_ports(hw_priv->pf_dev->cpp);
2533 		hw_priv->pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
2534 		free(nfp_eth_table);
2535 	}
2536 
2537 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2538 	eth_port = &nfp_eth_table->ports[hw->idx];
2539 
2540 	if (!nfp_eth_can_support_fec(eth_port)) {
2541 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2542 		return -ENOTSUP;
2543 	}
2544 
2545 	/*
2546 	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
2547 	 * configured FEC mode is returned.
2548 	 * If link is up, current FEC mode is returned.
2549 	 */
2550 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN)
2551 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->fec);
2552 	else
2553 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->act_fec);
2554 
2555 	if (*fec_capa == 0)
2556 		return -EINVAL;
2557 
2558 	return 0;
2559 }
2560 
2561 static enum nfp_eth_fec
2562 nfp_net_fec_rte_to_nfp(uint32_t fec)
2563 {
2564 	switch (fec) {
2565 	case RTE_BIT32(RTE_ETH_FEC_AUTO):
2566 		return NFP_FEC_AUTO_BIT;
2567 	case RTE_BIT32(RTE_ETH_FEC_NOFEC):
2568 		return NFP_FEC_DISABLED_BIT;
2569 	case RTE_BIT32(RTE_ETH_FEC_RS):
2570 		return NFP_FEC_REED_SOLOMON_BIT;
2571 	case RTE_BIT32(RTE_ETH_FEC_BASER):
2572 		return NFP_FEC_BASER_BIT;
2573 	default:
2574 		return NFP_FEC_INVALID_BIT;
2575 	}
2576 }
2577 
2578 int
2579 nfp_net_fec_set(struct rte_eth_dev *dev,
2580 		uint32_t fec_capa)
2581 {
2582 	enum nfp_eth_fec fec;
2583 	struct nfp_net_hw *hw;
2584 	uint32_t supported_fec;
2585 	struct nfp_net_hw_priv *hw_priv;
2586 	struct nfp_eth_table *nfp_eth_table;
2587 	struct nfp_eth_table_port *eth_port;
2588 
2589 	hw = nfp_net_get_hw(dev);
2590 	hw_priv = dev->process_private;
2591 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2592 		return -EINVAL;
2593 
2594 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2595 	eth_port = &nfp_eth_table->ports[hw->idx];
2596 
2597 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2598 	if (supported_fec == 0) {
2599 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2600 		return -ENOTSUP;
2601 	}
2602 
2603 	fec = nfp_net_fec_rte_to_nfp(fec_capa);
2604 	if (fec == NFP_FEC_INVALID_BIT) {
2605 		PMD_DRV_LOG(ERR, "FEC modes is invalid.");
2606 		return -EINVAL;
2607 	}
2608 
2609 	if ((RTE_BIT32(fec) & supported_fec) == 0) {
2610 		PMD_DRV_LOG(ERR, "Unsupported FEC mode is set.");
2611 		return -EIO;
2612 	}
2613 
2614 	return nfp_eth_set_fec(hw_priv->pf_dev->cpp, eth_port->index, fec);
2615 }
2616 
2617 uint32_t
2618 nfp_net_get_port_num(struct nfp_pf_dev *pf_dev,
2619 		struct nfp_eth_table *nfp_eth_table)
2620 {
2621 	if (pf_dev->multi_pf.enabled)
2622 		return 1;
2623 	else
2624 		return nfp_eth_table->count;
2625 }
2626 
2627 uint8_t
2628 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
2629 		uint8_t port_id)
2630 {
2631 	if (pf_dev->multi_pf.enabled)
2632 		return pf_dev->multi_pf.function_id;
2633 
2634 	return port_id;
2635 }
2636 
2637 static int
2638 nfp_net_sriov_check(struct nfp_pf_dev *pf_dev,
2639 		uint16_t cap)
2640 {
2641 	uint16_t cap_vf;
2642 
2643 	cap_vf = nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_CAP);
2644 	if ((cap_vf & cap) != cap)
2645 		return -ENOTSUP;
2646 
2647 	return 0;
2648 }
2649 
2650 static int
2651 nfp_net_sriov_update(struct nfp_net_hw *net_hw,
2652 		struct nfp_pf_dev *pf_dev,
2653 		uint16_t update)
2654 {
2655 	int ret;
2656 
2657 	/* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_base_id to FW. */
2658 	ret = nfp_net_vf_reconfig(net_hw, pf_dev, update, pf_dev->vf_base_id,
2659 			NFP_NET_VF_CFG_MB_VF_NUM);
2660 	if (ret != 0) {
2661 		PMD_INIT_LOG(ERR, "Error nfp VF reconfig");
2662 		return ret;
2663 	}
2664 
2665 	return 0;
2666 }
2667 
2668 static int
2669 nfp_net_vf_queues_config(struct nfp_net_hw *net_hw,
2670 		struct nfp_pf_dev *pf_dev)
2671 {
2672 	int ret;
2673 	uint32_t i;
2674 	uint32_t offset;
2675 
2676 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG);
2677 	if (ret != 0) {
2678 		if (ret == -ENOTSUP) {
2679 			PMD_INIT_LOG(WARNING, "Set VF max queue not supported");
2680 			return 0;
2681 		}
2682 
2683 		PMD_INIT_LOG(ERR, "Set VF max queue failed");
2684 		return ret;
2685 	}
2686 
2687 	offset = NFP_NET_VF_CFG_MB_SZ + pf_dev->max_vfs * NFP_NET_VF_CFG_SZ;
2688 	for (i = 0; i < pf_dev->sriov_vf; i++) {
2689 		ret = nfp_net_vf_reconfig(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG,
2690 				pf_dev->queue_per_vf, pf_dev->vf_base_id + offset + i);
2691 		if (ret != 0) {
2692 			PMD_INIT_LOG(ERR, "Set VF max_queue failed");
2693 			return ret;
2694 		}
2695 	}
2696 
2697 	return 0;
2698 }
2699 
2700 static int
2701 nfp_net_sriov_init(struct nfp_net_hw *net_hw,
2702 		struct nfp_pf_dev *pf_dev)
2703 {
2704 	int ret;
2705 
2706 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_SPLIT);
2707 	if (ret != 0) {
2708 		if (ret == -ENOTSUP) {
2709 			PMD_INIT_LOG(WARNING, "Set VF split not supported");
2710 			return 0;
2711 		}
2712 
2713 		PMD_INIT_LOG(ERR, "Set VF split failed");
2714 		return ret;
2715 	}
2716 
2717 	nn_writeb(pf_dev->sriov_vf, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_VF_CNT);
2718 
2719 	ret = nfp_net_sriov_update(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_SPLIT);
2720 	if (ret != 0) {
2721 		PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed");
2722 		return ret;
2723 	}
2724 
2725 	return 0;
2726 }
2727 
2728 int
2729 nfp_net_vf_config_app_init(struct nfp_net_hw *net_hw,
2730 		struct nfp_pf_dev *pf_dev)
2731 {
2732 	int ret;
2733 
2734 	if (pf_dev->sriov_vf == 0)
2735 		return 0;
2736 
2737 	ret = nfp_net_sriov_init(net_hw, pf_dev);
2738 	if (ret != 0) {
2739 		PMD_INIT_LOG(ERR, "Failed to init sriov module");
2740 		return ret;
2741 	}
2742 
2743 	ret = nfp_net_vf_queues_config(net_hw, pf_dev);
2744 	if (ret != 0) {
2745 		PMD_INIT_LOG(ERR, "Failed to config vf queue");
2746 		return ret;
2747 	}
2748 
2749 	return 0;
2750 }
2751