xref: /dpdk/drivers/net/nfp/nfp_net_common.c (revision 25a2a0dc3de31ca0a6fbc9371cf3dd85dfd74b07)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include "nfp_net_common.h"
9 
10 #include <rte_alarm.h>
11 
12 #include "flower/nfp_flower_representor.h"
13 #include "nfd3/nfp_nfd3.h"
14 #include "nfdk/nfp_nfdk.h"
15 #include "nfpcore/nfp_mip.h"
16 #include "nfpcore/nfp_nsp.h"
17 #include "nfp_logs.h"
18 #include "nfp_net_meta.h"
19 
20 #define NFP_TX_MAX_SEG       UINT8_MAX
21 #define NFP_TX_MAX_MTU_SEG   8
22 
23 #define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
24 #define NFP_NET_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
25 
26 #define DEFAULT_FLBUF_SIZE        9216
27 #define NFP_ETH_OVERHEAD \
28 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
29 
30 /* Only show FEC capability supported by the current speed. */
31 #define NFP_FEC_CAPA_ENTRY_NUM  1
32 
33 enum nfp_xstat_group {
34 	NFP_XSTAT_GROUP_NET,
35 	NFP_XSTAT_GROUP_MAC
36 };
37 
38 struct nfp_xstat {
39 	char name[RTE_ETH_XSTATS_NAME_SIZE];
40 	int offset;
41 	enum nfp_xstat_group group;
42 };
43 
44 #define NFP_XSTAT_NET(_name, _offset) {                 \
45 	.name = _name,                                  \
46 	.offset = NFP_NET_CFG_STATS_##_offset,          \
47 	.group = NFP_XSTAT_GROUP_NET,                   \
48 }
49 
50 #define NFP_XSTAT_MAC(_name, _offset) {                 \
51 	.name = _name,                                  \
52 	.offset = NFP_MAC_STATS_##_offset,              \
53 	.group = NFP_XSTAT_GROUP_MAC,                   \
54 }
55 
56 static const struct nfp_xstat nfp_net_xstats[] = {
57 	/*
58 	 * Basic xstats available on both VF and PF.
59 	 * Note that in case new statistics of group NFP_XSTAT_GROUP_NET
60 	 * are added to this array, they must appear before any statistics
61 	 * of group NFP_XSTAT_GROUP_MAC.
62 	 */
63 	NFP_XSTAT_NET("rx_good_packets_mc", RX_MC_FRAMES),
64 	NFP_XSTAT_NET("tx_good_packets_mc", TX_MC_FRAMES),
65 	NFP_XSTAT_NET("rx_good_packets_bc", RX_BC_FRAMES),
66 	NFP_XSTAT_NET("tx_good_packets_bc", TX_BC_FRAMES),
67 	NFP_XSTAT_NET("rx_good_bytes_uc", RX_UC_OCTETS),
68 	NFP_XSTAT_NET("tx_good_bytes_uc", TX_UC_OCTETS),
69 	NFP_XSTAT_NET("rx_good_bytes_mc", RX_MC_OCTETS),
70 	NFP_XSTAT_NET("tx_good_bytes_mc", TX_MC_OCTETS),
71 	NFP_XSTAT_NET("rx_good_bytes_bc", RX_BC_OCTETS),
72 	NFP_XSTAT_NET("tx_good_bytes_bc", TX_BC_OCTETS),
73 	NFP_XSTAT_NET("tx_missed_erros", TX_DISCARDS),
74 	NFP_XSTAT_NET("bpf_pass_pkts", APP0_FRAMES),
75 	NFP_XSTAT_NET("bpf_pass_bytes", APP0_BYTES),
76 	NFP_XSTAT_NET("bpf_app1_pkts", APP1_FRAMES),
77 	NFP_XSTAT_NET("bpf_app1_bytes", APP1_BYTES),
78 	NFP_XSTAT_NET("bpf_app2_pkts", APP2_FRAMES),
79 	NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES),
80 	NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES),
81 	NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES),
82 	/*
83 	 * MAC xstats available only on PF. These statistics are not available for VFs as the
84 	 * PF is not initialized when the VF is initialized as it is still bound to the kernel
85 	 * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order
86 	 * to get the pointer to the start of the MAC statistics counters.
87 	 */
88 	NFP_XSTAT_MAC("mac.rx_octets", RX_IN_OCTS),
89 	NFP_XSTAT_MAC("mac.rx_frame_too_long_errors", RX_FRAME_TOO_LONG_ERRORS),
90 	NFP_XSTAT_MAC("mac.rx_range_length_errors", RX_RANGE_LENGTH_ERRORS),
91 	NFP_XSTAT_MAC("mac.rx_vlan_received_ok", RX_VLAN_RECEIVED_OK),
92 	NFP_XSTAT_MAC("mac.rx_errors", RX_IN_ERRORS),
93 	NFP_XSTAT_MAC("mac.rx_broadcast_pkts", RX_IN_BROADCAST_PKTS),
94 	NFP_XSTAT_MAC("mac.rx_drop_events", RX_DROP_EVENTS),
95 	NFP_XSTAT_MAC("mac.rx_alignment_errors", RX_ALIGNMENT_ERRORS),
96 	NFP_XSTAT_MAC("mac.rx_pause_mac_ctrl_frames", RX_PAUSE_MAC_CTRL_FRAMES),
97 	NFP_XSTAT_MAC("mac.rx_frames_received_ok", RX_FRAMES_RECEIVED_OK),
98 	NFP_XSTAT_MAC("mac.rx_frame_check_sequence_errors", RX_FRAME_CHECK_SEQ_ERRORS),
99 	NFP_XSTAT_MAC("mac.rx_unicast_pkts", RX_UNICAST_PKTS),
100 	NFP_XSTAT_MAC("mac.rx_multicast_pkts", RX_MULTICAST_PKTS),
101 	NFP_XSTAT_MAC("mac.rx_pkts", RX_PKTS),
102 	NFP_XSTAT_MAC("mac.rx_undersize_pkts", RX_UNDERSIZE_PKTS),
103 	NFP_XSTAT_MAC("mac.rx_pkts_64_octets", RX_PKTS_64_OCTS),
104 	NFP_XSTAT_MAC("mac.rx_pkts_65_to_127_octets", RX_PKTS_65_TO_127_OCTS),
105 	NFP_XSTAT_MAC("mac.rx_pkts_128_to_255_octets", RX_PKTS_128_TO_255_OCTS),
106 	NFP_XSTAT_MAC("mac.rx_pkts_256_to_511_octets", RX_PKTS_256_TO_511_OCTS),
107 	NFP_XSTAT_MAC("mac.rx_pkts_512_to_1023_octets", RX_PKTS_512_TO_1023_OCTS),
108 	NFP_XSTAT_MAC("mac.rx_pkts_1024_to_1518_octets", RX_PKTS_1024_TO_1518_OCTS),
109 	NFP_XSTAT_MAC("mac.rx_pkts_1519_to_max_octets", RX_PKTS_1519_TO_MAX_OCTS),
110 	NFP_XSTAT_MAC("mac.rx_jabbers", RX_JABBERS),
111 	NFP_XSTAT_MAC("mac.rx_fragments", RX_FRAGMENTS),
112 	NFP_XSTAT_MAC("mac.rx_oversize_pkts", RX_OVERSIZE_PKTS),
113 	NFP_XSTAT_MAC("mac.rx_pause_frames_class0", RX_PAUSE_FRAMES_CLASS0),
114 	NFP_XSTAT_MAC("mac.rx_pause_frames_class1", RX_PAUSE_FRAMES_CLASS1),
115 	NFP_XSTAT_MAC("mac.rx_pause_frames_class2", RX_PAUSE_FRAMES_CLASS2),
116 	NFP_XSTAT_MAC("mac.rx_pause_frames_class3", RX_PAUSE_FRAMES_CLASS3),
117 	NFP_XSTAT_MAC("mac.rx_pause_frames_class4", RX_PAUSE_FRAMES_CLASS4),
118 	NFP_XSTAT_MAC("mac.rx_pause_frames_class5", RX_PAUSE_FRAMES_CLASS5),
119 	NFP_XSTAT_MAC("mac.rx_pause_frames_class6", RX_PAUSE_FRAMES_CLASS6),
120 	NFP_XSTAT_MAC("mac.rx_pause_frames_class7", RX_PAUSE_FRAMES_CLASS7),
121 	NFP_XSTAT_MAC("mac.rx_mac_ctrl_frames_received", RX_MAC_CTRL_FRAMES_REC),
122 	NFP_XSTAT_MAC("mac.rx_mac_head_drop", RX_MAC_HEAD_DROP),
123 	NFP_XSTAT_MAC("mac.tx_queue_drop", TX_QUEUE_DROP),
124 	NFP_XSTAT_MAC("mac.tx_octets", TX_OUT_OCTS),
125 	NFP_XSTAT_MAC("mac.tx_vlan_transmitted_ok", TX_VLAN_TRANSMITTED_OK),
126 	NFP_XSTAT_MAC("mac.tx_errors", TX_OUT_ERRORS),
127 	NFP_XSTAT_MAC("mac.tx_broadcast_pkts", TX_BROADCAST_PKTS),
128 	NFP_XSTAT_MAC("mac.tx_pause_mac_ctrl_frames", TX_PAUSE_MAC_CTRL_FRAMES),
129 	NFP_XSTAT_MAC("mac.tx_frames_transmitted_ok", TX_FRAMES_TRANSMITTED_OK),
130 	NFP_XSTAT_MAC("mac.tx_unicast_pkts", TX_UNICAST_PKTS),
131 	NFP_XSTAT_MAC("mac.tx_multicast_pkts", TX_MULTICAST_PKTS),
132 	NFP_XSTAT_MAC("mac.tx_pkts_64_octets", TX_PKTS_64_OCTS),
133 	NFP_XSTAT_MAC("mac.tx_pkts_65_to_127_octets", TX_PKTS_65_TO_127_OCTS),
134 	NFP_XSTAT_MAC("mac.tx_pkts_128_to_255_octets", TX_PKTS_128_TO_255_OCTS),
135 	NFP_XSTAT_MAC("mac.tx_pkts_256_to_511_octets", TX_PKTS_256_TO_511_OCTS),
136 	NFP_XSTAT_MAC("mac.tx_pkts_512_to_1023_octets", TX_PKTS_512_TO_1023_OCTS),
137 	NFP_XSTAT_MAC("mac.tx_pkts_1024_to_1518_octets", TX_PKTS_1024_TO_1518_OCTS),
138 	NFP_XSTAT_MAC("mac.tx_pkts_1519_to_max_octets", TX_PKTS_1519_TO_MAX_OCTS),
139 	NFP_XSTAT_MAC("mac.tx_pause_frames_class0", TX_PAUSE_FRAMES_CLASS0),
140 	NFP_XSTAT_MAC("mac.tx_pause_frames_class1", TX_PAUSE_FRAMES_CLASS1),
141 	NFP_XSTAT_MAC("mac.tx_pause_frames_class2", TX_PAUSE_FRAMES_CLASS2),
142 	NFP_XSTAT_MAC("mac.tx_pause_frames_class3", TX_PAUSE_FRAMES_CLASS3),
143 	NFP_XSTAT_MAC("mac.tx_pause_frames_class4", TX_PAUSE_FRAMES_CLASS4),
144 	NFP_XSTAT_MAC("mac.tx_pause_frames_class5", TX_PAUSE_FRAMES_CLASS5),
145 	NFP_XSTAT_MAC("mac.tx_pause_frames_class6", TX_PAUSE_FRAMES_CLASS6),
146 	NFP_XSTAT_MAC("mac.tx_pause_frames_class7", TX_PAUSE_FRAMES_CLASS7),
147 };
148 
149 static const uint32_t nfp_net_link_speed_nfp2rte[] = {
150 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
151 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
152 	[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
153 	[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
154 	[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
155 	[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
156 	[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
157 	[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
158 };
159 
160 static size_t
161 nfp_net_link_speed_rte2nfp(uint32_t speed)
162 {
163 	size_t i;
164 
165 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
166 		if (speed == nfp_net_link_speed_nfp2rte[i])
167 			return i;
168 	}
169 
170 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
171 }
172 
173 static uint32_t
174 nfp_net_link_speed_nfp2rte_check(uint32_t speed)
175 {
176 	size_t i;
177 
178 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
179 		if (speed == nfp_net_link_speed_nfp2rte[i])
180 			return nfp_net_link_speed_nfp2rte[i];
181 	}
182 
183 	return RTE_ETH_SPEED_NUM_NONE;
184 }
185 
186 static void
187 nfp_net_notify_port_speed(struct nfp_net_hw *hw,
188 		struct rte_eth_link *link)
189 {
190 	/*
191 	 * Read the link status from NFP_NET_CFG_STS. If the link is down
192 	 * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to
193 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
194 	 */
195 	if (link->link_status == RTE_ETH_LINK_DOWN) {
196 		nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
197 				NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
198 		return;
199 	}
200 
201 	/*
202 	 * Link is up so write the link speed from the eth_table to
203 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
204 	 */
205 	nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
206 			nfp_net_link_speed_rte2nfp(link->link_speed));
207 }
208 
209 /**
210  * Reconfigure the firmware of VF configure
211  *
212  * @param net_hw
213  *   Device to reconfigure
214  * @param pf_dev
215  *   Get the Device info
216  * @param update
217  *   The value for the mailbox VF command
218  * @param value
219  *   The value of update
220  * @param offset
221  *   The offset in the VF configure table
222  *
223  * @return
224  *   - (0) if OK to reconfigure vf configure.
225  *   - (-EIO) if I/O err and fail to configure the vf configure
226  */
227 static int
228 nfp_net_vf_reconfig(struct nfp_net_hw *net_hw,
229 		struct nfp_pf_dev *pf_dev,
230 		uint16_t update,
231 		uint8_t value,
232 		uint32_t offset)
233 {
234 	int ret;
235 	struct nfp_hw *hw;
236 
237 	hw = &net_hw->super;
238 	rte_spinlock_lock(&hw->reconfig_lock);
239 
240 	/* Write update info to mailbox in VF config symbol */
241 	nn_writeb(value, pf_dev->vf_cfg_tbl_bar + offset);
242 	nn_writew(update, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_UPD);
243 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VF);
244 
245 	rte_wmb();
246 
247 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VF);
248 
249 	rte_spinlock_unlock(&hw->reconfig_lock);
250 
251 	if (ret != 0)
252 		return -EIO;
253 
254 	return nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_RET);
255 }
256 
257 /**
258  * Reconfigure the firmware via the mailbox
259  *
260  * @param net_hw
261  *   Device to reconfigure
262  * @param mbox_cmd
263  *   The value for the mailbox command
264  *
265  * @return
266  *   - (0) if OK to reconfigure by the mailbox.
267  *   - (-EIO) if I/O err and fail to reconfigure by the mailbox
268  */
269 int
270 nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
271 		uint32_t mbox_cmd)
272 {
273 	int ret;
274 	uint32_t mbox;
275 
276 	mbox = net_hw->tlv_caps.mbox_off;
277 
278 	rte_spinlock_lock(&net_hw->super.reconfig_lock);
279 
280 	nn_cfg_writeq(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
281 	nn_cfg_writel(&net_hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
282 
283 	rte_wmb();
284 
285 	ret = nfp_reconfig_real(&net_hw->super, NFP_NET_CFG_UPDATE_MBOX);
286 
287 	rte_spinlock_unlock(&net_hw->super.reconfig_lock);
288 
289 	if (ret != 0) {
290 		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x",
291 				mbox_cmd, NFP_NET_CFG_UPDATE_MBOX);
292 		return -EIO;
293 	}
294 
295 	return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
296 }
297 
298 struct nfp_net_hw *
299 nfp_net_get_hw(const struct rte_eth_dev *dev)
300 {
301 	struct nfp_net_hw *hw;
302 
303 	if (rte_eth_dev_is_repr(dev)) {
304 		struct nfp_flower_representor *repr;
305 		repr = dev->data->dev_private;
306 		hw = repr->app_fw_flower->pf_hw;
307 	} else {
308 		hw = dev->data->dev_private;
309 	}
310 
311 	return hw;
312 }
313 
314 uint8_t
315 nfp_net_get_idx(const struct rte_eth_dev *dev)
316 {
317 	uint8_t idx;
318 
319 	if (rte_eth_dev_is_repr(dev)) {
320 		struct nfp_flower_representor *repr;
321 		repr = dev->data->dev_private;
322 		idx = repr->idx;
323 	} else {
324 		struct nfp_net_hw *hw;
325 		hw = dev->data->dev_private;
326 		idx = hw->idx;
327 	}
328 
329 	return idx;
330 }
331 
332 /*
333  * Configure an Ethernet device.
334  *
335  * This function must be invoked first before any other function in the Ethernet API.
336  * This function can also be re-invoked when a device is in the stopped state.
337  *
338  * A DPDK app sends info about how many queues to use and how  those queues
339  * need to be configured. This is used by the DPDK core and it makes sure no
340  * more queues than those advertised by the driver are requested.
341  * This function is called after that internal process.
342  */
343 int
344 nfp_net_configure(struct rte_eth_dev *dev)
345 {
346 	struct nfp_net_hw *hw;
347 	struct rte_eth_conf *dev_conf;
348 	struct rte_eth_rxmode *rxmode;
349 	struct rte_eth_txmode *txmode;
350 
351 	hw = nfp_net_get_hw(dev);
352 	dev_conf = &dev->data->dev_conf;
353 	rxmode = &dev_conf->rxmode;
354 	txmode = &dev_conf->txmode;
355 
356 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0)
357 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
358 
359 	/* Checking TX mode */
360 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
361 		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported");
362 		return -EINVAL;
363 	}
364 
365 	/* Checking RX mode */
366 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
367 			(hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
368 		PMD_DRV_LOG(ERR, "RSS not supported");
369 		return -EINVAL;
370 	}
371 
372 	/* Checking MTU set */
373 	if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) {
374 		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u)",
375 				rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD);
376 		return -ERANGE;
377 	}
378 
379 	return 0;
380 }
381 
382 void
383 nfp_net_log_device_information(const struct nfp_net_hw *hw,
384 		struct nfp_pf_dev *pf_dev)
385 {
386 	uint32_t cap = hw->super.cap;
387 	uint32_t cap_ext = hw->super.cap_ext;
388 
389 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
390 			pf_dev->ver.major, pf_dev->ver.minor, hw->max_mtu);
391 
392 	PMD_INIT_LOG(INFO, "CAP: %#x", cap);
393 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
394 			cap & NFP_NET_CFG_CTRL_ENABLE        ? "ENABLE "      : "",
395 			cap & NFP_NET_CFG_CTRL_PROMISC       ? "PROMISC "     : "",
396 			cap & NFP_NET_CFG_CTRL_L2BC          ? "L2BCFILT "    : "",
397 			cap & NFP_NET_CFG_CTRL_L2MC          ? "L2MCFILT "    : "",
398 			cap & NFP_NET_CFG_CTRL_RXCSUM        ? "RXCSUM "      : "",
399 			cap & NFP_NET_CFG_CTRL_TXCSUM        ? "TXCSUM "      : "",
400 			cap & NFP_NET_CFG_CTRL_RXVLAN        ? "RXVLAN "      : "",
401 			cap & NFP_NET_CFG_CTRL_TXVLAN        ? "TXVLAN "      : "",
402 			cap & NFP_NET_CFG_CTRL_SCATTER       ? "SCATTER "     : "",
403 			cap & NFP_NET_CFG_CTRL_GATHER        ? "GATHER "      : "",
404 			cap & NFP_NET_CFG_CTRL_LSO           ? "TSO "         : "",
405 			cap & NFP_NET_CFG_CTRL_RXQINQ        ? "RXQINQ "      : "",
406 			cap & NFP_NET_CFG_CTRL_RXVLAN_V2     ? "RXVLANv2 "    : "",
407 			cap & NFP_NET_CFG_CTRL_RINGCFG       ? "RINGCFG "     : "",
408 			cap & NFP_NET_CFG_CTRL_RSS           ? "RSS "         : "",
409 			cap & NFP_NET_CFG_CTRL_IRQMOD        ? "IRQMOD "      : "",
410 			cap & NFP_NET_CFG_CTRL_RINGPRIO      ? "RINGPRIO "    : "",
411 			cap & NFP_NET_CFG_CTRL_MSIXAUTO      ? "MSIXAUTO "    : "",
412 			cap & NFP_NET_CFG_CTRL_TXRWB         ? "TXRWB "       : "",
413 			cap & NFP_NET_CFG_CTRL_L2SWITCH      ? "L2SWITCH "    : "",
414 			cap & NFP_NET_CFG_CTRL_TXVLAN_V2     ? "TXVLANv2 "    : "",
415 			cap & NFP_NET_CFG_CTRL_VXLAN         ? "VXLAN "       : "",
416 			cap & NFP_NET_CFG_CTRL_NVGRE         ? "NVGRE "       : "",
417 			cap & NFP_NET_CFG_CTRL_MSIX_TX_OFF   ? "MSIX_TX_OFF " : "",
418 			cap & NFP_NET_CFG_CTRL_LSO2          ? "TSOv2 "       : "",
419 			cap & NFP_NET_CFG_CTRL_RSS2          ? "RSSv2 "       : "",
420 			cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ? "CSUM "        : "",
421 			cap & NFP_NET_CFG_CTRL_LIVE_ADDR     ? "LIVE_ADDR "   : "",
422 			cap & NFP_NET_CFG_CTRL_USO           ? "USO"          : "");
423 
424 	PMD_INIT_LOG(INFO, "CAP_WORD1: %#x", cap_ext);
425 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s",
426 			cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE        ? "PKT_TYPE "        : "",
427 			cap_ext & NFP_NET_CFG_CTRL_IPSEC           ? "IPSEC "           : "",
428 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP ? "IPSEC_SM "        : "",
429 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP ? "IPSEC_LM "        : "",
430 			cap_ext & NFP_NET_CFG_CTRL_MULTI_PF        ? "MULTI_PF "        : "",
431 			cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER      ? "FLOW_STEER "      : "",
432 			cap_ext & NFP_NET_CFG_CTRL_IN_ORDER        ? "VIRTIO_IN_ORDER " : "");
433 
434 	PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
435 			hw->max_rx_queues, hw->max_tx_queues);
436 }
437 
438 static inline void
439 nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw,
440 		uint32_t *ctrl)
441 {
442 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
443 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2;
444 	else if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN) != 0)
445 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
446 }
447 
448 void
449 nfp_net_enable_queues(struct rte_eth_dev *dev)
450 {
451 	struct nfp_net_hw *hw;
452 
453 	hw = nfp_net_get_hw(dev);
454 
455 	nfp_enable_queues(&hw->super, dev->data->nb_rx_queues,
456 			dev->data->nb_tx_queues);
457 }
458 
459 void
460 nfp_net_disable_queues(struct rte_eth_dev *dev)
461 {
462 	struct nfp_net_hw *net_hw;
463 
464 	net_hw = nfp_net_get_hw(dev);
465 
466 	nfp_disable_queues(&net_hw->super);
467 }
468 
469 void
470 nfp_net_params_setup(struct nfp_net_hw *hw)
471 {
472 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, hw->mtu);
473 	nn_cfg_writel(&hw->super, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
474 }
475 
476 void
477 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
478 {
479 	hw->super.qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
480 }
481 
482 int
483 nfp_net_set_mac_addr(struct rte_eth_dev *dev,
484 		struct rte_ether_addr *mac_addr)
485 {
486 	uint32_t update;
487 	uint32_t new_ctrl;
488 	struct nfp_hw *hw;
489 	struct nfp_net_hw *net_hw;
490 
491 	net_hw = nfp_net_get_hw(dev);
492 	hw = &net_hw->super;
493 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
494 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
495 		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled");
496 		return -EBUSY;
497 	}
498 
499 	if (rte_is_valid_assigned_ether_addr(mac_addr) == 0) {
500 		PMD_DRV_LOG(ERR, "Invalid MAC address");
501 		return -EINVAL;
502 	}
503 
504 	/* Writing new MAC to the specific port BAR address */
505 	nfp_write_mac(hw, (uint8_t *)mac_addr);
506 
507 	update = NFP_NET_CFG_UPDATE_MACADDR;
508 	new_ctrl = hw->ctrl;
509 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
510 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
511 		new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
512 
513 	/* Signal the NIC about the change */
514 	if (nfp_reconfig(hw, new_ctrl, update) != 0) {
515 		PMD_DRV_LOG(ERR, "MAC address update failed");
516 		return -EIO;
517 	}
518 
519 	hw->ctrl = new_ctrl;
520 
521 	return 0;
522 }
523 
524 int
525 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
526 		struct rte_intr_handle *intr_handle)
527 {
528 	uint16_t i;
529 	struct nfp_net_hw *hw;
530 
531 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
532 				dev->data->nb_rx_queues) != 0) {
533 		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec",
534 				dev->data->nb_rx_queues);
535 		return -ENOMEM;
536 	}
537 
538 	hw = nfp_net_get_hw(dev);
539 
540 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
541 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO");
542 		/* UIO just supports one queue and no LSC */
543 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
544 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
545 			return -1;
546 	} else {
547 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO");
548 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
549 			/*
550 			 * The first msix vector is reserved for non
551 			 * efd interrupts.
552 			 */
553 			nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(i), i + 1);
554 			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
555 				return -1;
556 		}
557 	}
558 
559 	/* Avoiding TX interrupts */
560 	hw->super.ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
561 	return 0;
562 }
563 
564 uint32_t
565 nfp_check_offloads(struct rte_eth_dev *dev)
566 {
567 	uint32_t cap;
568 	uint32_t ctrl = 0;
569 	uint64_t rx_offload;
570 	uint64_t tx_offload;
571 	struct nfp_net_hw *hw;
572 	struct rte_eth_conf *dev_conf;
573 
574 	hw = nfp_net_get_hw(dev);
575 	cap = hw->super.cap;
576 
577 	dev_conf = &dev->data->dev_conf;
578 	rx_offload = dev_conf->rxmode.offloads;
579 	tx_offload = dev_conf->txmode.offloads;
580 
581 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
582 		if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
583 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
584 	}
585 
586 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
587 		nfp_net_enable_rxvlan_cap(hw, &ctrl);
588 
589 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
590 		if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
591 			ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
592 	}
593 
594 	hw->mtu = dev->data->mtu;
595 
596 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
597 		if ((cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
598 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
599 		else if ((cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
600 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
601 	}
602 
603 	/* L2 broadcast */
604 	if ((cap & NFP_NET_CFG_CTRL_L2BC) != 0)
605 		ctrl |= NFP_NET_CFG_CTRL_L2BC;
606 
607 	/* L2 multicast */
608 	if ((cap & NFP_NET_CFG_CTRL_L2MC) != 0)
609 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
610 
611 	/* TX checksum offload */
612 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
613 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
614 			(tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
615 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
616 
617 	/* LSO offload */
618 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
619 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_TSO) != 0 ||
620 			(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
621 		if ((cap & NFP_NET_CFG_CTRL_LSO) != 0)
622 			ctrl |= NFP_NET_CFG_CTRL_LSO;
623 		else if ((cap & NFP_NET_CFG_CTRL_LSO2) != 0)
624 			ctrl |= NFP_NET_CFG_CTRL_LSO2;
625 	}
626 
627 	/* RX gather */
628 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
629 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
630 
631 	return ctrl;
632 }
633 
634 int
635 nfp_net_promisc_enable(struct rte_eth_dev *dev)
636 {
637 	int ret;
638 	uint32_t update;
639 	uint32_t new_ctrl;
640 	struct nfp_hw *hw;
641 	struct nfp_net_hw *net_hw;
642 
643 	net_hw = nfp_net_get_hw(dev);
644 
645 	hw = &net_hw->super;
646 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
647 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
648 		return -ENOTSUP;
649 	}
650 
651 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
652 		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
653 		return 0;
654 	}
655 
656 	new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
657 	update = NFP_NET_CFG_UPDATE_GEN;
658 
659 	ret = nfp_reconfig(hw, new_ctrl, update);
660 	if (ret != 0)
661 		return ret;
662 
663 	hw->ctrl = new_ctrl;
664 
665 	return 0;
666 }
667 
668 int
669 nfp_net_promisc_disable(struct rte_eth_dev *dev)
670 {
671 	int ret;
672 	uint32_t update;
673 	uint32_t new_ctrl;
674 	struct nfp_hw *hw;
675 	struct nfp_net_hw *net_hw;
676 
677 	net_hw = nfp_net_get_hw(dev);
678 	hw = &net_hw->super;
679 
680 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
681 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
682 		return -ENOTSUP;
683 	}
684 
685 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
686 		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
687 		return 0;
688 	}
689 
690 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
691 	update = NFP_NET_CFG_UPDATE_GEN;
692 
693 	ret = nfp_reconfig(hw, new_ctrl, update);
694 	if (ret != 0)
695 		return ret;
696 
697 	hw->ctrl = new_ctrl;
698 
699 	return 0;
700 }
701 
702 static int
703 nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev,
704 		bool enable)
705 {
706 	int ret;
707 	uint32_t update;
708 	struct nfp_hw *hw;
709 	uint32_t cap_extend;
710 	uint32_t ctrl_extend;
711 	uint32_t new_ctrl_extend;
712 	struct nfp_net_hw *net_hw;
713 
714 	net_hw = nfp_net_get_hw(dev);
715 	hw = &net_hw->super;
716 
717 	cap_extend = hw->cap_ext;
718 	if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) {
719 		PMD_DRV_LOG(ERR, "Allmulticast mode not supported");
720 		return -ENOTSUP;
721 	}
722 
723 	/*
724 	 * Allmulticast mode enabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 0.
725 	 * Allmulticast mode disabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 1.
726 	 */
727 	ctrl_extend = hw->ctrl_ext;
728 	if (enable) {
729 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0)
730 			return 0;
731 
732 		new_ctrl_extend = ctrl_extend & ~NFP_NET_CFG_CTRL_MCAST_FILTER;
733 	} else {
734 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) != 0)
735 			return 0;
736 
737 		new_ctrl_extend = ctrl_extend | NFP_NET_CFG_CTRL_MCAST_FILTER;
738 	}
739 
740 	update = NFP_NET_CFG_UPDATE_GEN;
741 
742 	ret = nfp_ext_reconfig(hw, new_ctrl_extend, update);
743 	if (ret != 0)
744 		return ret;
745 
746 	hw->ctrl_ext = new_ctrl_extend;
747 	return 0;
748 }
749 
750 int
751 nfp_net_allmulticast_enable(struct rte_eth_dev *dev)
752 {
753 	return nfp_net_set_allmulticast_mode(dev, true);
754 }
755 
756 int
757 nfp_net_allmulticast_disable(struct rte_eth_dev *dev)
758 {
759 	return nfp_net_set_allmulticast_mode(dev, false);
760 }
761 
762 static void
763 nfp_net_pf_speed_update(struct rte_eth_dev *dev,
764 		struct nfp_net_hw_priv *hw_priv,
765 		struct rte_eth_link *link)
766 {
767 	uint8_t idx;
768 	enum nfp_eth_aneg aneg;
769 	struct nfp_pf_dev *pf_dev;
770 	struct nfp_eth_table *nfp_eth_table;
771 	struct nfp_eth_table_port *eth_port;
772 
773 	pf_dev = hw_priv->pf_dev;
774 	idx = nfp_net_get_idx(dev);
775 	aneg = pf_dev->nfp_eth_table->ports[idx].aneg;
776 
777 	/* Compare whether the current status has changed. */
778 	if (pf_dev->speed_updated || aneg == NFP_ANEG_AUTO) {
779 		nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
780 		if (nfp_eth_table == NULL) {
781 			PMD_DRV_LOG(WARNING, "Failed to update port speed.");
782 		} else {
783 			pf_dev->nfp_eth_table->ports[idx] = nfp_eth_table->ports[idx];
784 			free(nfp_eth_table);
785 			pf_dev->speed_updated = false;
786 		}
787 	}
788 
789 	nfp_eth_table = pf_dev->nfp_eth_table;
790 	eth_port = &nfp_eth_table->ports[idx];
791 
792 	link->link_speed = nfp_net_link_speed_nfp2rte_check(eth_port->speed);
793 
794 	if (dev->data->dev_conf.link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
795 			eth_port->supp_aneg)
796 		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
797 }
798 
799 static void
800 nfp_net_vf_speed_update(struct rte_eth_link *link,
801 		uint32_t link_status)
802 {
803 	size_t link_rate_index;
804 
805 	/*
806 	 * Shift and mask link_status so that it is effectively the value
807 	 * at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
808 	 */
809 	link_rate_index = (link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
810 			NFP_NET_CFG_STS_LINK_RATE_MASK;
811 	if (link_rate_index < RTE_DIM(nfp_net_link_speed_nfp2rte))
812 		link->link_speed = nfp_net_link_speed_nfp2rte[link_rate_index];
813 	else
814 		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
815 }
816 
817 int
818 nfp_net_link_update_common(struct rte_eth_dev *dev,
819 		struct rte_eth_link *link,
820 		uint32_t link_status)
821 {
822 	int ret;
823 	struct nfp_net_hw_priv *hw_priv;
824 
825 	hw_priv = dev->process_private;
826 	if (link->link_status == RTE_ETH_LINK_UP) {
827 		if (hw_priv->is_pf)
828 			nfp_net_pf_speed_update(dev, hw_priv, link);
829 		else
830 			nfp_net_vf_speed_update(link, link_status);
831 	}
832 
833 	ret = rte_eth_linkstatus_set(dev, link);
834 	if (ret == 0) {
835 		if (link->link_status == RTE_ETH_LINK_UP)
836 			PMD_DRV_LOG(INFO, "NIC Link is Up");
837 		else
838 			PMD_DRV_LOG(INFO, "NIC Link is Down");
839 	}
840 
841 	return ret;
842 }
843 
844 /*
845  * Return 0 means link status changed, -1 means not changed
846  *
847  * Wait to complete is needed as it can take up to 9 seconds to get the Link
848  * status.
849  */
850 int
851 nfp_net_link_update(struct rte_eth_dev *dev,
852 		__rte_unused int wait_to_complete)
853 {
854 	int ret;
855 	struct nfp_net_hw *hw;
856 	uint32_t nn_link_status;
857 	struct rte_eth_link link;
858 	struct nfp_net_hw_priv *hw_priv;
859 
860 	hw = nfp_net_get_hw(dev);
861 	hw_priv = dev->process_private;
862 
863 	memset(&link, 0, sizeof(struct rte_eth_link));
864 
865 	/* Read link status */
866 	nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
867 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
868 		link.link_status = RTE_ETH_LINK_UP;
869 
870 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
871 
872 	ret = nfp_net_link_update_common(dev, &link, nn_link_status);
873 	if (ret == -EIO)
874 		return ret;
875 
876 	/*
877 	 * Notify the port to update the speed value in the CTRL BAR from NSP.
878 	 * Not applicable for VFs as the associated PF is still attached to the
879 	 * kernel driver.
880 	 */
881 	if (hw_priv != NULL && hw_priv->is_pf)
882 		nfp_net_notify_port_speed(hw, &link);
883 
884 	return ret;
885 }
886 
887 int
888 nfp_net_stats_get(struct rte_eth_dev *dev,
889 		struct rte_eth_stats *stats)
890 {
891 	uint16_t i;
892 	struct nfp_net_hw *hw;
893 	struct rte_eth_stats nfp_dev_stats;
894 
895 	if (stats == NULL)
896 		return -EINVAL;
897 
898 	hw = nfp_net_get_hw(dev);
899 
900 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
901 
902 	/* Reading per RX ring stats */
903 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
904 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
905 			break;
906 
907 		nfp_dev_stats.q_ipackets[i] =
908 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
909 		nfp_dev_stats.q_ipackets[i] -=
910 				hw->eth_stats_base.q_ipackets[i];
911 
912 		nfp_dev_stats.q_ibytes[i] =
913 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
914 		nfp_dev_stats.q_ibytes[i] -=
915 				hw->eth_stats_base.q_ibytes[i];
916 	}
917 
918 	/* Reading per TX ring stats */
919 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
920 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
921 			break;
922 
923 		nfp_dev_stats.q_opackets[i] =
924 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
925 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
926 
927 		nfp_dev_stats.q_obytes[i] =
928 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
929 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
930 	}
931 
932 	nfp_dev_stats.ipackets = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
933 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
934 
935 	nfp_dev_stats.ibytes = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
936 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
937 
938 	nfp_dev_stats.opackets =
939 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
940 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
941 
942 	nfp_dev_stats.obytes =
943 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
944 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
945 
946 	/* Reading general device stats */
947 	nfp_dev_stats.ierrors =
948 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
949 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
950 
951 	nfp_dev_stats.oerrors =
952 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
953 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
954 
955 	/* RX ring mbuf allocation failures */
956 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
957 
958 	nfp_dev_stats.imissed =
959 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
960 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
961 
962 	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
963 	return 0;
964 }
965 
966 /*
967  * hw->eth_stats_base records the per counter starting point.
968  * Lets update it now.
969  */
970 int
971 nfp_net_stats_reset(struct rte_eth_dev *dev)
972 {
973 	uint16_t i;
974 	struct nfp_net_hw *hw;
975 
976 	hw = nfp_net_get_hw(dev);
977 
978 	/* Reading per RX ring stats */
979 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
980 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
981 			break;
982 
983 		hw->eth_stats_base.q_ipackets[i] =
984 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
985 
986 		hw->eth_stats_base.q_ibytes[i] =
987 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
988 	}
989 
990 	/* Reading per TX ring stats */
991 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
992 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
993 			break;
994 
995 		hw->eth_stats_base.q_opackets[i] =
996 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
997 
998 		hw->eth_stats_base.q_obytes[i] =
999 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1000 	}
1001 
1002 	hw->eth_stats_base.ipackets =
1003 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
1004 
1005 	hw->eth_stats_base.ibytes =
1006 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
1007 
1008 	hw->eth_stats_base.opackets =
1009 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
1010 
1011 	hw->eth_stats_base.obytes =
1012 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
1013 
1014 	/* Reading general device stats */
1015 	hw->eth_stats_base.ierrors =
1016 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
1017 
1018 	hw->eth_stats_base.oerrors =
1019 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
1020 
1021 	/* RX ring mbuf allocation failures */
1022 	dev->data->rx_mbuf_alloc_failed = 0;
1023 
1024 	hw->eth_stats_base.imissed =
1025 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
1026 
1027 	return 0;
1028 }
1029 
1030 uint32_t
1031 nfp_net_xstats_size(const struct rte_eth_dev *dev)
1032 {
1033 	uint32_t count;
1034 	bool vf_flag = false;
1035 	struct nfp_net_hw *hw;
1036 	struct nfp_flower_representor *repr;
1037 	const uint32_t size = RTE_DIM(nfp_net_xstats);
1038 
1039 	if (rte_eth_dev_is_repr(dev)) {
1040 		repr = dev->data->dev_private;
1041 		if (repr->mac_stats == NULL)
1042 			vf_flag = true;
1043 	} else {
1044 		hw = dev->data->dev_private;
1045 		if (hw->mac_stats == NULL)
1046 			vf_flag = true;
1047 	}
1048 
1049 	/* If the device is a VF or VF-repr, then there will be no MAC stats */
1050 	if (vf_flag) {
1051 		for (count = 0; count < size; count++) {
1052 			if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC)
1053 				break;
1054 		}
1055 
1056 		return count;
1057 	}
1058 
1059 	return size;
1060 }
1061 
1062 static const struct nfp_xstat *
1063 nfp_net_xstats_info(const struct rte_eth_dev *dev,
1064 		uint32_t index)
1065 {
1066 	if (index >= nfp_net_xstats_size(dev)) {
1067 		PMD_DRV_LOG(ERR, "xstat index out of bounds");
1068 		return NULL;
1069 	}
1070 
1071 	return &nfp_net_xstats[index];
1072 }
1073 
1074 static uint64_t
1075 nfp_net_xstats_value(const struct rte_eth_dev *dev,
1076 		uint32_t index,
1077 		bool raw)
1078 {
1079 	uint64_t value;
1080 	uint8_t *mac_stats;
1081 	struct nfp_net_hw *hw;
1082 	struct nfp_xstat xstat;
1083 	struct rte_eth_xstat *xstats_base;
1084 	struct nfp_flower_representor *repr;
1085 
1086 	if (rte_eth_dev_is_repr(dev)) {
1087 		repr = dev->data->dev_private;
1088 		hw = repr->app_fw_flower->pf_hw;
1089 
1090 		mac_stats = repr->mac_stats;
1091 		xstats_base = repr->repr_xstats_base;
1092 	} else {
1093 		hw = dev->data->dev_private;
1094 
1095 		mac_stats = hw->mac_stats;
1096 		xstats_base = hw->eth_xstats_base;
1097 	}
1098 
1099 	xstat = nfp_net_xstats[index];
1100 
1101 	if (xstat.group == NFP_XSTAT_GROUP_MAC)
1102 		value = nn_readq(mac_stats + xstat.offset);
1103 	else
1104 		value = nn_cfg_readq(&hw->super, xstat.offset);
1105 
1106 	if (raw)
1107 		return value;
1108 
1109 	/*
1110 	 * A baseline value of each statistic counter is recorded when stats are "reset".
1111 	 * Thus, the value returned by this function need to be decremented by this
1112 	 * baseline value. The result is the count of this statistic since the last time
1113 	 * it was "reset".
1114 	 */
1115 	return value - xstats_base[index].value;
1116 }
1117 
1118 /* NOTE: All callers ensure dev is always set. */
1119 int
1120 nfp_net_xstats_get_names(struct rte_eth_dev *dev,
1121 		struct rte_eth_xstat_name *xstats_names,
1122 		unsigned int size)
1123 {
1124 	uint32_t id;
1125 	uint32_t nfp_size;
1126 	uint32_t read_size;
1127 
1128 	nfp_size = nfp_net_xstats_size(dev);
1129 
1130 	if (xstats_names == NULL)
1131 		return nfp_size;
1132 
1133 	/* Read at most NFP xstats number of names. */
1134 	read_size = RTE_MIN(size, nfp_size);
1135 
1136 	for (id = 0; id < read_size; id++)
1137 		rte_strlcpy(xstats_names[id].name, nfp_net_xstats[id].name,
1138 				RTE_ETH_XSTATS_NAME_SIZE);
1139 
1140 	return read_size;
1141 }
1142 
1143 /* NOTE: All callers ensure dev is always set. */
1144 int
1145 nfp_net_xstats_get(struct rte_eth_dev *dev,
1146 		struct rte_eth_xstat *xstats,
1147 		unsigned int n)
1148 {
1149 	uint32_t id;
1150 	uint32_t nfp_size;
1151 	uint32_t read_size;
1152 
1153 	nfp_size = nfp_net_xstats_size(dev);
1154 
1155 	if (xstats == NULL)
1156 		return nfp_size;
1157 
1158 	/* Read at most NFP xstats number of values. */
1159 	read_size = RTE_MIN(n, nfp_size);
1160 
1161 	for (id = 0; id < read_size; id++) {
1162 		xstats[id].id = id;
1163 		xstats[id].value = nfp_net_xstats_value(dev, id, false);
1164 	}
1165 
1166 	return read_size;
1167 }
1168 
1169 /*
1170  * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
1171  * ids, xstats_names and size are valid, and non-NULL.
1172  */
1173 int
1174 nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
1175 		const uint64_t *ids,
1176 		struct rte_eth_xstat_name *xstats_names,
1177 		unsigned int size)
1178 {
1179 	uint32_t i;
1180 	uint32_t read_size;
1181 
1182 	/* Read at most NFP xstats number of names. */
1183 	read_size = RTE_MIN(size, nfp_net_xstats_size(dev));
1184 
1185 	for (i = 0; i < read_size; i++) {
1186 		const struct nfp_xstat *xstat;
1187 
1188 		/* Make sure ID is valid for device. */
1189 		xstat = nfp_net_xstats_info(dev, ids[i]);
1190 		if (xstat == NULL)
1191 			return -EINVAL;
1192 
1193 		rte_strlcpy(xstats_names[i].name, xstat->name,
1194 				RTE_ETH_XSTATS_NAME_SIZE);
1195 	}
1196 
1197 	return read_size;
1198 }
1199 
1200 /*
1201  * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
1202  * ids, values and n are valid, and non-NULL.
1203  */
1204 int
1205 nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
1206 		const uint64_t *ids,
1207 		uint64_t *values,
1208 		unsigned int n)
1209 {
1210 	uint32_t i;
1211 	uint32_t read_size;
1212 
1213 	/* Read at most NFP xstats number of values. */
1214 	read_size = RTE_MIN(n, nfp_net_xstats_size(dev));
1215 
1216 	for (i = 0; i < read_size; i++) {
1217 		const struct nfp_xstat *xstat;
1218 
1219 		/* Make sure index is valid for device. */
1220 		xstat = nfp_net_xstats_info(dev, ids[i]);
1221 		if (xstat == NULL)
1222 			return -EINVAL;
1223 
1224 		values[i] = nfp_net_xstats_value(dev, ids[i], false);
1225 	}
1226 
1227 	return read_size;
1228 }
1229 
1230 int
1231 nfp_net_xstats_reset(struct rte_eth_dev *dev)
1232 {
1233 	uint32_t id;
1234 	uint32_t read_size;
1235 	struct nfp_net_hw *hw;
1236 	struct rte_eth_xstat *xstats_base;
1237 	struct nfp_flower_representor *repr;
1238 
1239 	read_size = nfp_net_xstats_size(dev);
1240 
1241 	if (rte_eth_dev_is_repr(dev)) {
1242 		repr = dev->data->dev_private;
1243 		xstats_base = repr->repr_xstats_base;
1244 	} else {
1245 		hw = dev->data->dev_private;
1246 		xstats_base = hw->eth_xstats_base;
1247 	}
1248 
1249 	for (id = 0; id < read_size; id++) {
1250 		xstats_base[id].id = id;
1251 		xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
1252 	}
1253 
1254 	/* Successfully reset xstats, now call function to reset basic stats. */
1255 	if (rte_eth_dev_is_repr(dev))
1256 		return nfp_flower_repr_stats_reset(dev);
1257 	else
1258 		return nfp_net_stats_reset(dev);
1259 }
1260 
1261 void
1262 nfp_net_rx_desc_limits(struct nfp_net_hw_priv *hw_priv,
1263 		uint16_t *min_rx_desc,
1264 		uint16_t *max_rx_desc)
1265 {
1266 	*max_rx_desc = hw_priv->dev_info->max_qc_size;
1267 	*min_rx_desc = hw_priv->dev_info->min_qc_size;
1268 }
1269 
1270 void
1271 nfp_net_tx_desc_limits(struct nfp_net_hw_priv *hw_priv,
1272 		uint16_t *min_tx_desc,
1273 		uint16_t *max_tx_desc)
1274 {
1275 	uint16_t tx_dpp;
1276 
1277 	if (hw_priv->pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
1278 		tx_dpp = NFD3_TX_DESC_PER_PKT;
1279 	else
1280 		tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT;
1281 
1282 	*max_tx_desc = hw_priv->dev_info->max_qc_size / tx_dpp;
1283 	*min_tx_desc = hw_priv->dev_info->min_qc_size / tx_dpp;
1284 }
1285 
1286 int
1287 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1288 {
1289 	uint32_t cap;
1290 	uint32_t cap_extend;
1291 	uint16_t min_rx_desc;
1292 	uint16_t max_rx_desc;
1293 	uint16_t min_tx_desc;
1294 	uint16_t max_tx_desc;
1295 	struct nfp_net_hw *hw;
1296 	struct nfp_net_hw_priv *hw_priv;
1297 
1298 	hw = nfp_net_get_hw(dev);
1299 	hw_priv = dev->process_private;
1300 	if (hw_priv == NULL)
1301 		return -EINVAL;
1302 
1303 	nfp_net_rx_desc_limits(hw_priv, &min_rx_desc, &max_rx_desc);
1304 	nfp_net_tx_desc_limits(hw_priv, &min_tx_desc, &max_tx_desc);
1305 
1306 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1307 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1308 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1309 	/*
1310 	 * The maximum rx packet length is set to the maximum layer 3 MTU,
1311 	 * plus layer 2, CRC and VLAN headers.
1312 	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
1313 	 * which was set by the firmware loaded onto the card.
1314 	 */
1315 	dev_info->max_rx_pktlen = hw->max_mtu + NFP_ETH_OVERHEAD;
1316 	dev_info->max_mtu = hw->max_mtu;
1317 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1318 	/* Next should change when PF support is implemented */
1319 	dev_info->max_mac_addrs = 1;
1320 
1321 	cap = hw->super.cap;
1322 
1323 	if ((cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
1324 		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1325 
1326 	if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
1327 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
1328 
1329 	if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
1330 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1331 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1332 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
1333 
1334 	if ((cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
1335 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1336 
1337 	if ((cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
1338 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1339 				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1340 				RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1341 
1342 	if ((cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
1343 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1344 		if ((cap & NFP_NET_CFG_CTRL_USO) != 0)
1345 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_TSO;
1346 		if ((cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
1347 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
1348 	}
1349 
1350 	if ((cap & NFP_NET_CFG_CTRL_GATHER) != 0)
1351 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1352 
1353 	cap_extend = hw->super.cap_ext;
1354 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) {
1355 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1356 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1357 	}
1358 
1359 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1360 		.rx_thresh = {
1361 			.pthresh = DEFAULT_RX_PTHRESH,
1362 			.hthresh = DEFAULT_RX_HTHRESH,
1363 			.wthresh = DEFAULT_RX_WTHRESH,
1364 		},
1365 		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1366 		.rx_drop_en = 0,
1367 	};
1368 
1369 	dev_info->default_txconf = (struct rte_eth_txconf) {
1370 		.tx_thresh = {
1371 			.pthresh = DEFAULT_TX_PTHRESH,
1372 			.hthresh = DEFAULT_TX_HTHRESH,
1373 			.wthresh = DEFAULT_TX_WTHRESH,
1374 		},
1375 		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1376 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1377 	};
1378 
1379 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1380 		.nb_max = max_rx_desc,
1381 		.nb_min = min_rx_desc,
1382 		.nb_align = NFP_ALIGN_RING_DESC,
1383 	};
1384 
1385 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1386 		.nb_max = max_tx_desc,
1387 		.nb_min = min_tx_desc,
1388 		.nb_align = NFP_ALIGN_RING_DESC,
1389 		.nb_seg_max = NFP_TX_MAX_SEG,
1390 		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1391 	};
1392 
1393 	if ((cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
1394 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1395 		dev_info->flow_type_rss_offloads = NFP_NET_RSS_CAP;
1396 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1397 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1398 	}
1399 
1400 	/* Only PF supports getting speed capability. */
1401 	if (hw_priv->is_pf)
1402 		dev_info->speed_capa = hw_priv->pf_dev->speed_capa;
1403 
1404 	return 0;
1405 }
1406 
1407 int
1408 nfp_net_common_init(struct nfp_pf_dev *pf_dev,
1409 		struct nfp_net_hw *hw)
1410 {
1411 	const int stride = 4;
1412 	struct rte_pci_device *pci_dev;
1413 
1414 	pci_dev = pf_dev->pci_dev;
1415 	hw->device_id = pci_dev->id.device_id;
1416 	hw->vendor_id = pci_dev->id.vendor_id;
1417 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1418 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1419 
1420 	hw->max_rx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_RXRINGS);
1421 	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
1422 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
1423 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
1424 				"pairs for use", pci_dev->name);
1425 		return -ENODEV;
1426 	}
1427 
1428 	if (nfp_net_check_dma_mask(pf_dev, pci_dev->name) != 0)
1429 		return -ENODEV;
1430 
1431 	/* Get some of the read-only fields from the config BAR */
1432 	hw->super.cap = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP);
1433 	hw->super.cap_ext = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP_WORD1);
1434 	hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
1435 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
1436 
1437 	nfp_net_meta_init_format(hw, pf_dev);
1438 
1439 	/* Read the Rx offset configured from firmware */
1440 	if (pf_dev->ver.major < 2)
1441 		hw->rx_offset = NFP_NET_RX_OFFSET;
1442 	else
1443 		hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET);
1444 
1445 	hw->super.ctrl = 0;
1446 	hw->stride_rx = stride;
1447 	hw->stride_tx = stride;
1448 
1449 	return 0;
1450 }
1451 
1452 const uint32_t *
1453 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1454 {
1455 	struct nfp_net_hw *net_hw;
1456 	static const uint32_t ptypes[] = {
1457 		RTE_PTYPE_L2_ETHER,
1458 		RTE_PTYPE_L3_IPV4,
1459 		RTE_PTYPE_L3_IPV4_EXT,
1460 		RTE_PTYPE_L3_IPV6,
1461 		RTE_PTYPE_L3_IPV6_EXT,
1462 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1463 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1464 		RTE_PTYPE_L4_TCP,
1465 		RTE_PTYPE_L4_UDP,
1466 		RTE_PTYPE_L4_FRAG,
1467 		RTE_PTYPE_L4_NONFRAG,
1468 		RTE_PTYPE_L4_ICMP,
1469 		RTE_PTYPE_L4_SCTP,
1470 		RTE_PTYPE_TUNNEL_VXLAN,
1471 		RTE_PTYPE_TUNNEL_NVGRE,
1472 		RTE_PTYPE_TUNNEL_GENEVE,
1473 		RTE_PTYPE_INNER_L2_ETHER,
1474 		RTE_PTYPE_INNER_L3_IPV4,
1475 		RTE_PTYPE_INNER_L3_IPV4_EXT,
1476 		RTE_PTYPE_INNER_L3_IPV6,
1477 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1478 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1479 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1480 		RTE_PTYPE_INNER_L4_TCP,
1481 		RTE_PTYPE_INNER_L4_UDP,
1482 		RTE_PTYPE_INNER_L4_FRAG,
1483 		RTE_PTYPE_INNER_L4_NONFRAG,
1484 		RTE_PTYPE_INNER_L4_ICMP,
1485 		RTE_PTYPE_INNER_L4_SCTP,
1486 	};
1487 
1488 	if (dev->rx_pkt_burst == NULL)
1489 		return NULL;
1490 
1491 	net_hw = dev->data->dev_private;
1492 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1493 		return NULL;
1494 
1495 	*no_of_elements = RTE_DIM(ptypes);
1496 	return ptypes;
1497 }
1498 
1499 int
1500 nfp_net_ptypes_set(struct rte_eth_dev *dev,
1501 		uint32_t ptype_mask)
1502 {
1503 	int ret;
1504 	uint32_t update;
1505 	uint32_t ctrl_ext;
1506 	struct nfp_hw *hw;
1507 	struct nfp_net_hw *net_hw;
1508 
1509 	net_hw = dev->data->dev_private;
1510 	hw = &net_hw->super;
1511 
1512 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1513 		return -ENOTSUP;
1514 
1515 	ctrl_ext = hw->ctrl_ext;
1516 	if (ptype_mask == 0) {
1517 		if ((ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1518 			return 0;
1519 
1520 		ctrl_ext &= ~NFP_NET_CFG_CTRL_PKT_TYPE;
1521 	} else {
1522 		if ((ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
1523 			return 0;
1524 
1525 		ctrl_ext |= NFP_NET_CFG_CTRL_PKT_TYPE;
1526 	}
1527 
1528 	update = NFP_NET_CFG_UPDATE_GEN;
1529 
1530 	ret = nfp_ext_reconfig(hw, ctrl_ext, update);
1531 	if (ret != 0)
1532 		return ret;
1533 
1534 	hw->ctrl_ext = ctrl_ext;
1535 
1536 	return 0;
1537 }
1538 
1539 int
1540 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
1541 		uint16_t queue_id)
1542 {
1543 	uint16_t base = 0;
1544 	struct nfp_net_hw *hw;
1545 	struct rte_pci_device *pci_dev;
1546 
1547 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1548 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1549 		base = 1;
1550 
1551 	/* Make sure all updates are written before un-masking */
1552 	rte_wmb();
1553 
1554 	hw = nfp_net_get_hw(dev);
1555 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id),
1556 			NFP_NET_CFG_ICR_UNMASKED);
1557 	return 0;
1558 }
1559 
1560 int
1561 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
1562 		uint16_t queue_id)
1563 {
1564 	uint16_t base = 0;
1565 	struct nfp_net_hw *hw;
1566 	struct rte_pci_device *pci_dev;
1567 
1568 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1569 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1570 		base = 1;
1571 
1572 	/* Make sure all updates are written before un-masking */
1573 	rte_wmb();
1574 
1575 	hw = nfp_net_get_hw(dev);
1576 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
1577 
1578 	return 0;
1579 }
1580 
1581 static void
1582 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1583 {
1584 	struct rte_eth_link link;
1585 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1586 
1587 	rte_eth_linkstatus_get(dev, &link);
1588 	if (link.link_status != 0)
1589 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1590 				dev->data->port_id, link.link_speed,
1591 				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1592 				"full-duplex" : "half-duplex");
1593 	else
1594 		PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
1595 
1596 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1597 			pci_dev->addr.domain, pci_dev->addr.bus,
1598 			pci_dev->addr.devid, pci_dev->addr.function);
1599 }
1600 
1601 /*
1602  * Unmask an interrupt
1603  *
1604  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1605  * clear the ICR for the entry.
1606  */
1607 void
1608 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1609 {
1610 	struct nfp_net_hw *hw;
1611 	struct rte_pci_device *pci_dev;
1612 
1613 	hw = nfp_net_get_hw(dev);
1614 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1615 
1616 	/* Make sure all updates are written before un-masking */
1617 	rte_wmb();
1618 
1619 	if ((hw->super.ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
1620 		/* If MSI-X auto-masking is used, clear the entry */
1621 		rte_intr_ack(pci_dev->intr_handle);
1622 	} else {
1623 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1624 				NFP_NET_CFG_ICR_UNMASKED);
1625 	}
1626 }
1627 
1628 /**
1629  * Interrupt handler which shall be registered for alarm callback for delayed
1630  * handling specific interrupt to wait for the stable nic state. As the NIC
1631  * interrupt state is not stable for nfp after link is just down, it needs
1632  * to wait 4 seconds to get the stable status.
1633  *
1634  * @param param
1635  *   The address of parameter (struct rte_eth_dev *)
1636  */
1637 void
1638 nfp_net_dev_interrupt_delayed_handler(void *param)
1639 {
1640 	struct rte_eth_dev *dev = param;
1641 
1642 	nfp_net_link_update(dev, 0);
1643 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1644 
1645 	nfp_net_dev_link_status_print(dev);
1646 
1647 	/* Unmasking */
1648 	nfp_net_irq_unmask(dev);
1649 }
1650 
1651 void
1652 nfp_net_dev_interrupt_handler(void *param)
1653 {
1654 	int64_t timeout;
1655 	struct rte_eth_link link;
1656 	struct rte_eth_dev *dev = param;
1657 
1658 	PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1659 
1660 	rte_eth_linkstatus_get(dev, &link);
1661 
1662 	nfp_net_link_update(dev, 0);
1663 
1664 	/* Likely to up */
1665 	if (link.link_status == 0) {
1666 		/* Handle it 1 sec later, wait it being stable */
1667 		timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1668 	} else {  /* Likely to down */
1669 		/* Handle it 4 sec later, wait it being stable */
1670 		timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1671 	}
1672 
1673 	if (rte_eal_alarm_set(timeout * 1000,
1674 			nfp_net_dev_interrupt_delayed_handler,
1675 			(void *)dev) != 0) {
1676 		PMD_INIT_LOG(ERR, "Error setting alarm");
1677 		/* Unmasking */
1678 		nfp_net_irq_unmask(dev);
1679 	}
1680 }
1681 
1682 int
1683 nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
1684 		uint16_t mtu)
1685 {
1686 	struct nfp_net_hw *hw;
1687 
1688 	hw = nfp_net_get_hw(dev);
1689 
1690 	/* MTU setting is forbidden if port is started */
1691 	if (dev->data->dev_started) {
1692 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1693 				dev->data->port_id);
1694 		return -EBUSY;
1695 	}
1696 
1697 	/* MTU larger than current mbufsize not supported */
1698 	if (mtu > hw->flbufsz) {
1699 		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported",
1700 				mtu, hw->flbufsz);
1701 		return -ERANGE;
1702 	}
1703 
1704 	/* Writing to configuration space */
1705 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, mtu);
1706 
1707 	hw->mtu = mtu;
1708 
1709 	return 0;
1710 }
1711 
1712 int
1713 nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
1714 		int mask)
1715 {
1716 	int ret;
1717 	uint32_t update;
1718 	uint32_t new_ctrl;
1719 	struct nfp_hw *hw;
1720 	uint64_t rx_offload;
1721 	struct nfp_net_hw *net_hw;
1722 	uint32_t rxvlan_ctrl = 0;
1723 
1724 	net_hw = nfp_net_get_hw(dev);
1725 	hw = &net_hw->super;
1726 	rx_offload = dev->data->dev_conf.rxmode.offloads;
1727 	new_ctrl = hw->ctrl;
1728 
1729 	/* VLAN stripping setting */
1730 	if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
1731 		nfp_net_enable_rxvlan_cap(net_hw, &rxvlan_ctrl);
1732 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
1733 			new_ctrl |= rxvlan_ctrl;
1734 		else
1735 			new_ctrl &= ~rxvlan_ctrl;
1736 	}
1737 
1738 	/* QinQ stripping setting */
1739 	if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
1740 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
1741 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1742 		else
1743 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1744 	}
1745 
1746 	if (new_ctrl == hw->ctrl)
1747 		return 0;
1748 
1749 	update = NFP_NET_CFG_UPDATE_GEN;
1750 
1751 	ret = nfp_reconfig(hw, new_ctrl, update);
1752 	if (ret != 0)
1753 		return ret;
1754 
1755 	hw->ctrl = new_ctrl;
1756 
1757 	return 0;
1758 }
1759 
1760 static int
1761 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1762 		struct rte_eth_rss_reta_entry64 *reta_conf,
1763 		uint16_t reta_size)
1764 {
1765 	uint16_t i;
1766 	uint16_t j;
1767 	uint16_t idx;
1768 	uint8_t mask;
1769 	uint32_t reta;
1770 	uint16_t shift;
1771 	struct nfp_hw *hw;
1772 	struct nfp_net_hw *net_hw;
1773 
1774 	net_hw = nfp_net_get_hw(dev);
1775 	hw = &net_hw->super;
1776 
1777 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1778 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
1779 				" doesn't match hardware can supported (%d)",
1780 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1781 		return -EINVAL;
1782 	}
1783 
1784 	/*
1785 	 * Update Redirection Table. There are 128 8bit-entries which can be
1786 	 * manage as 32 32bit-entries.
1787 	 */
1788 	for (i = 0; i < reta_size; i += 4) {
1789 		/* Handling 4 RSS entries per loop */
1790 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1791 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1792 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1793 		if (mask == 0)
1794 			continue;
1795 
1796 		reta = 0;
1797 
1798 		/* If all 4 entries were set, don't need read RETA register */
1799 		if (mask != 0xF)
1800 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1801 
1802 		for (j = 0; j < 4; j++) {
1803 			if ((mask & (0x1 << j)) == 0)
1804 				continue;
1805 
1806 			/* Clearing the entry bits */
1807 			if (mask != 0xF)
1808 				reta &= ~(0xFF << (8 * j));
1809 
1810 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1811 		}
1812 
1813 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
1814 	}
1815 
1816 	return 0;
1817 }
1818 
1819 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1820 int
1821 nfp_net_reta_update(struct rte_eth_dev *dev,
1822 		struct rte_eth_rss_reta_entry64 *reta_conf,
1823 		uint16_t reta_size)
1824 {
1825 	int ret;
1826 	uint32_t update;
1827 	struct nfp_hw *hw;
1828 	struct nfp_net_hw *net_hw;
1829 
1830 	net_hw = nfp_net_get_hw(dev);
1831 	hw = &net_hw->super;
1832 
1833 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1834 		return -EINVAL;
1835 
1836 	ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1837 	if (ret != 0)
1838 		return ret;
1839 
1840 	update = NFP_NET_CFG_UPDATE_RSS;
1841 
1842 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1843 		return -EIO;
1844 
1845 	return 0;
1846 }
1847 
1848 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1849 int
1850 nfp_net_reta_query(struct rte_eth_dev *dev,
1851 		struct rte_eth_rss_reta_entry64 *reta_conf,
1852 		uint16_t reta_size)
1853 {
1854 	uint16_t i;
1855 	uint16_t j;
1856 	uint16_t idx;
1857 	uint8_t mask;
1858 	uint32_t reta;
1859 	uint16_t shift;
1860 	struct nfp_hw *hw;
1861 	struct nfp_net_hw *net_hw;
1862 
1863 	net_hw = nfp_net_get_hw(dev);
1864 	hw = &net_hw->super;
1865 
1866 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1867 		return -EINVAL;
1868 
1869 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1870 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)"
1871 				" doesn't match hardware can supported (%d)",
1872 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1873 		return -EINVAL;
1874 	}
1875 
1876 	/*
1877 	 * Reading Redirection Table. There are 128 8bit-entries which can be
1878 	 * manage as 32 32bit-entries.
1879 	 */
1880 	for (i = 0; i < reta_size; i += 4) {
1881 		/* Handling 4 RSS entries per loop */
1882 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1883 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1884 		mask = (reta_conf[idx].mask >> shift) & 0xF;
1885 
1886 		if (mask == 0)
1887 			continue;
1888 
1889 		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
1890 		for (j = 0; j < 4; j++) {
1891 			if ((mask & (0x1 << j)) == 0)
1892 				continue;
1893 
1894 			reta_conf[idx].reta[shift + j] =
1895 					(uint8_t)((reta >> (8 * j)) & 0xF);
1896 		}
1897 	}
1898 
1899 	return 0;
1900 }
1901 
1902 static int
1903 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1904 		struct rte_eth_rss_conf *rss_conf)
1905 {
1906 	uint8_t i;
1907 	uint8_t key;
1908 	uint64_t rss_hf;
1909 	struct nfp_hw *hw;
1910 	struct nfp_net_hw *net_hw;
1911 	uint32_t cfg_rss_ctrl = 0;
1912 
1913 	net_hw = nfp_net_get_hw(dev);
1914 	hw = &net_hw->super;
1915 
1916 	/* Writing the key byte by byte */
1917 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1918 		memcpy(&key, &rss_conf->rss_key[i], 1);
1919 		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1920 	}
1921 
1922 	rss_hf = rss_conf->rss_hf;
1923 
1924 	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
1925 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1926 
1927 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1928 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1929 
1930 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
1931 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1932 
1933 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0)
1934 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP;
1935 
1936 	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
1937 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1938 
1939 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
1940 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1941 
1942 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
1943 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1944 
1945 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0)
1946 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP;
1947 
1948 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1949 
1950 	if (rte_eth_dev_is_repr(dev))
1951 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_CRC32;
1952 	else
1953 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1954 
1955 	/* Configuring where to apply the RSS hash */
1956 	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1957 
1958 	/* Writing the key size */
1959 	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1960 
1961 	return 0;
1962 }
1963 
1964 int
1965 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1966 		struct rte_eth_rss_conf *rss_conf)
1967 {
1968 	uint32_t update;
1969 	uint64_t rss_hf;
1970 	struct nfp_hw *hw;
1971 	struct nfp_net_hw *net_hw;
1972 
1973 	net_hw = nfp_net_get_hw(dev);
1974 	hw = &net_hw->super;
1975 
1976 	rss_hf = rss_conf->rss_hf;
1977 
1978 	/* Checking if RSS is enabled */
1979 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
1980 		if (rss_hf != 0) {
1981 			PMD_DRV_LOG(ERR, "RSS unsupported");
1982 			return -EINVAL;
1983 		}
1984 
1985 		return 0; /* Nothing to do */
1986 	}
1987 
1988 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1989 		PMD_DRV_LOG(ERR, "RSS hash key too long");
1990 		return -EINVAL;
1991 	}
1992 
1993 	nfp_net_rss_hash_write(dev, rss_conf);
1994 
1995 	update = NFP_NET_CFG_UPDATE_RSS;
1996 
1997 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1998 		return -EIO;
1999 
2000 	return 0;
2001 }
2002 
2003 int
2004 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2005 		struct rte_eth_rss_conf *rss_conf)
2006 {
2007 	uint8_t i;
2008 	uint8_t key;
2009 	uint64_t rss_hf;
2010 	struct nfp_hw *hw;
2011 	uint32_t cfg_rss_ctrl;
2012 	struct nfp_net_hw *net_hw;
2013 
2014 	net_hw = nfp_net_get_hw(dev);
2015 	hw = &net_hw->super;
2016 
2017 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
2018 		return -EINVAL;
2019 
2020 	rss_hf = rss_conf->rss_hf;
2021 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2022 
2023 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
2024 		rss_hf |= RTE_ETH_RSS_IPV4;
2025 
2026 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0)
2027 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2028 
2029 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0)
2030 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
2031 
2032 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0)
2033 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2034 
2035 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0)
2036 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
2037 
2038 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0)
2039 		rss_hf |= RTE_ETH_RSS_IPV6;
2040 
2041 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0)
2042 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP;
2043 
2044 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0)
2045 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
2046 
2047 	/* Propagate current RSS hash functions to caller */
2048 	rss_conf->rss_hf = rss_hf;
2049 
2050 	/* Reading the key size */
2051 	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2052 
2053 	/* Reading the key byte a byte */
2054 	for (i = 0; i < rss_conf->rss_key_len; i++) {
2055 		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2056 		memcpy(&rss_conf->rss_key[i], &key, 1);
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 int
2063 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2064 {
2065 	int ret;
2066 	uint8_t i;
2067 	uint8_t j;
2068 	uint16_t queue = 0;
2069 	struct rte_eth_conf *dev_conf;
2070 	struct rte_eth_rss_conf rss_conf;
2071 	uint16_t rx_queues = dev->data->nb_rx_queues;
2072 	struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2073 
2074 	nfp_reta_conf[0].mask = ~0x0;
2075 	nfp_reta_conf[1].mask = ~0x0;
2076 
2077 	for (i = 0; i < 0x40; i += 8) {
2078 		for (j = i; j < (i + 8); j++) {
2079 			nfp_reta_conf[0].reta[j] = queue;
2080 			nfp_reta_conf[1].reta[j] = queue++;
2081 			queue %= rx_queues;
2082 		}
2083 	}
2084 
2085 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2086 	if (ret != 0)
2087 		return ret;
2088 
2089 	dev_conf = &dev->data->dev_conf;
2090 	if (dev_conf == NULL) {
2091 		PMD_DRV_LOG(ERR, "Wrong rss conf");
2092 		return -EINVAL;
2093 	}
2094 
2095 	rss_conf = dev_conf->rx_adv_conf.rss_conf;
2096 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
2097 
2098 	return ret;
2099 }
2100 
2101 void
2102 nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
2103 {
2104 	uint16_t i;
2105 	struct nfp_net_rxq *this_rx_q;
2106 
2107 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2108 		this_rx_q = dev->data->rx_queues[i];
2109 		nfp_net_reset_rx_queue(this_rx_q);
2110 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2111 	}
2112 }
2113 
2114 void
2115 nfp_net_close_rx_queue(struct rte_eth_dev *dev)
2116 {
2117 	uint16_t i;
2118 	struct nfp_net_rxq *this_rx_q;
2119 
2120 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2121 		this_rx_q = dev->data->rx_queues[i];
2122 		nfp_net_reset_rx_queue(this_rx_q);
2123 		nfp_net_rx_queue_release(dev, i);
2124 	}
2125 }
2126 
2127 void
2128 nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
2129 {
2130 	uint16_t i;
2131 	struct nfp_net_txq *this_tx_q;
2132 
2133 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2134 		this_tx_q = dev->data->tx_queues[i];
2135 		nfp_net_reset_tx_queue(this_tx_q);
2136 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2137 	}
2138 }
2139 
2140 void
2141 nfp_net_close_tx_queue(struct rte_eth_dev *dev)
2142 {
2143 	uint16_t i;
2144 	struct nfp_net_txq *this_tx_q;
2145 
2146 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2147 		this_tx_q = dev->data->tx_queues[i];
2148 		nfp_net_reset_tx_queue(this_tx_q);
2149 		nfp_net_tx_queue_release(dev, i);
2150 	}
2151 }
2152 
2153 int
2154 nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw,
2155 		size_t idx,
2156 		uint16_t port)
2157 {
2158 	int ret;
2159 	uint32_t i;
2160 	struct nfp_hw *hw = &net_hw->super;
2161 
2162 	if (idx >= NFP_NET_N_VXLAN_PORTS) {
2163 		PMD_DRV_LOG(ERR, "The idx value is out of range.");
2164 		return -ERANGE;
2165 	}
2166 
2167 	net_hw->vxlan_ports[idx] = port;
2168 
2169 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2170 		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2171 				(net_hw->vxlan_ports[i + 1] << 16) | net_hw->vxlan_ports[i]);
2172 	}
2173 
2174 	rte_spinlock_lock(&hw->reconfig_lock);
2175 
2176 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VXLAN);
2177 	rte_wmb();
2178 
2179 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VXLAN);
2180 
2181 	rte_spinlock_unlock(&hw->reconfig_lock);
2182 
2183 	return ret;
2184 }
2185 
2186 /*
2187  * The firmware with NFD3 can not handle DMA address requiring more
2188  * than 40 bits.
2189  */
2190 int
2191 nfp_net_check_dma_mask(struct nfp_pf_dev *pf_dev,
2192 		char *name)
2193 {
2194 	if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
2195 			rte_mem_check_dma_mask(40) != 0) {
2196 		PMD_DRV_LOG(ERR, "Device %s can't be used: restricted dma mask to 40 bits!",
2197 				name);
2198 		return -ENODEV;
2199 	}
2200 
2201 	return 0;
2202 }
2203 
2204 int
2205 nfp_net_txrwb_alloc(struct rte_eth_dev *eth_dev)
2206 {
2207 	struct nfp_net_hw *net_hw;
2208 	char mz_name[RTE_MEMZONE_NAMESIZE];
2209 
2210 	net_hw = nfp_net_get_hw(eth_dev);
2211 	snprintf(mz_name, sizeof(mz_name), "%s_TXRWB", eth_dev->data->name);
2212 	net_hw->txrwb_mz = rte_memzone_reserve_aligned(mz_name,
2213 			net_hw->max_tx_queues * sizeof(uint64_t),
2214 			rte_socket_id(),
2215 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2216 	if (net_hw->txrwb_mz == NULL) {
2217 		PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back",
2218 				mz_name);
2219 		return -ENOMEM;
2220 	}
2221 
2222 	return 0;
2223 }
2224 
2225 void
2226 nfp_net_txrwb_free(struct rte_eth_dev *eth_dev)
2227 {
2228 	struct nfp_net_hw *net_hw;
2229 
2230 	net_hw = nfp_net_get_hw(eth_dev);
2231 	if (net_hw->txrwb_mz == NULL)
2232 		return;
2233 
2234 	rte_memzone_free(net_hw->txrwb_mz);
2235 	net_hw->txrwb_mz = NULL;
2236 }
2237 
2238 static void
2239 nfp_net_cfg_read_version(struct nfp_hw *hw,
2240 		struct nfp_pf_dev *pf_dev)
2241 {
2242 	union {
2243 		uint32_t whole;
2244 		struct nfp_net_fw_ver split;
2245 	} version;
2246 
2247 	version.whole = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2248 	pf_dev->ver = version.split;
2249 }
2250 
2251 bool
2252 nfp_net_version_check(struct nfp_hw *hw,
2253 		struct nfp_pf_dev *pf_dev)
2254 {
2255 	nfp_net_cfg_read_version(hw, pf_dev);
2256 	if (!nfp_net_is_valid_nfd_version(pf_dev->ver))
2257 		return false;
2258 
2259 	if (!nfp_net_is_valid_version_class(pf_dev->ver))
2260 		return false;
2261 
2262 	return true;
2263 }
2264 
2265 static void
2266 nfp_net_get_nsp_info(struct nfp_net_hw_priv *hw_priv,
2267 		char *nsp_version)
2268 {
2269 	struct nfp_nsp *nsp;
2270 
2271 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
2272 	if (nsp == NULL)
2273 		return;
2274 
2275 	snprintf(nsp_version, FW_VER_LEN, "%hu.%hu",
2276 			nfp_nsp_get_abi_ver_major(nsp),
2277 			nfp_nsp_get_abi_ver_minor(nsp));
2278 
2279 	nfp_nsp_close(nsp);
2280 }
2281 
2282 void
2283 nfp_net_get_fw_version(struct nfp_cpp *cpp,
2284 		uint32_t *mip_version)
2285 {
2286 	struct nfp_mip *mip;
2287 
2288 	mip = nfp_mip_open(cpp);
2289 	if (mip == NULL) {
2290 		*mip_version = 0;
2291 		return;
2292 	}
2293 
2294 	*mip_version = nfp_mip_fw_version(mip);
2295 
2296 	nfp_mip_close(mip);
2297 }
2298 
2299 static void
2300 nfp_net_get_mip_name(struct nfp_net_hw_priv *hw_priv,
2301 		char *mip_name)
2302 {
2303 	struct nfp_mip *mip;
2304 
2305 	mip = nfp_mip_open(hw_priv->pf_dev->cpp);
2306 	if (mip == NULL)
2307 		return;
2308 
2309 	snprintf(mip_name, FW_VER_LEN, "%s", nfp_mip_name(mip));
2310 
2311 	nfp_mip_close(mip);
2312 }
2313 
2314 static void
2315 nfp_net_get_app_name(struct nfp_net_hw_priv *hw_priv,
2316 		char *app_name)
2317 {
2318 	switch (hw_priv->pf_dev->app_fw_id) {
2319 	case NFP_APP_FW_CORE_NIC:
2320 		snprintf(app_name, FW_VER_LEN, "%s", "nic");
2321 		break;
2322 	case NFP_APP_FW_FLOWER_NIC:
2323 		snprintf(app_name, FW_VER_LEN, "%s", "flower");
2324 		break;
2325 	default:
2326 		snprintf(app_name, FW_VER_LEN, "%s", "unknown");
2327 		break;
2328 	}
2329 }
2330 
2331 int
2332 nfp_net_firmware_version_get(struct rte_eth_dev *dev,
2333 		char *fw_version,
2334 		size_t fw_size)
2335 {
2336 	struct nfp_net_hw *hw;
2337 	struct nfp_pf_dev *pf_dev;
2338 	struct nfp_net_hw_priv *hw_priv;
2339 	char app_name[FW_VER_LEN] = {0};
2340 	char mip_name[FW_VER_LEN] = {0};
2341 	char nsp_version[FW_VER_LEN] = {0};
2342 	char vnic_version[FW_VER_LEN] = {0};
2343 
2344 	if (fw_size < FW_VER_LEN)
2345 		return FW_VER_LEN;
2346 
2347 	hw = nfp_net_get_hw(dev);
2348 	hw_priv = dev->process_private;
2349 	pf_dev = hw_priv->pf_dev;
2350 
2351 	if (hw->fw_version[0] != 0) {
2352 		snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2353 		return 0;
2354 	}
2355 
2356 	if (!rte_eth_dev_is_repr(dev)) {
2357 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
2358 			pf_dev->ver.extend, pf_dev->ver.class,
2359 			pf_dev->ver.major, pf_dev->ver.minor);
2360 	} else {
2361 		snprintf(vnic_version, FW_VER_LEN, "*");
2362 	}
2363 
2364 	nfp_net_get_nsp_info(hw_priv, nsp_version);
2365 	nfp_net_get_mip_name(hw_priv, mip_name);
2366 	nfp_net_get_app_name(hw_priv, app_name);
2367 
2368 	if (nsp_version[0] == 0 || mip_name[0] == 0) {
2369 		snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
2370 			vnic_version, nsp_version, mip_name, app_name);
2371 		return 0;
2372 	}
2373 
2374 	snprintf(hw->fw_version, FW_VER_LEN, "%s %s %s %s",
2375 			vnic_version, nsp_version, mip_name, app_name);
2376 
2377 	snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2378 
2379 	return 0;
2380 }
2381 
2382 bool
2383 nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
2384 {
2385 	uint8_t nfd_version = version.extend;
2386 
2387 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFD3)
2388 		return true;
2389 
2390 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) {
2391 		if (version.major < 5) {
2392 			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
2393 					version.major);
2394 			return false;
2395 		}
2396 
2397 		return true;
2398 	}
2399 
2400 	return false;
2401 }
2402 
2403 bool
2404 nfp_net_is_valid_version_class(struct nfp_net_fw_ver version)
2405 {
2406 	switch (version.class) {
2407 	case NFP_NET_CFG_VERSION_CLASS_GENERIC:
2408 		return true;
2409 	case NFP_NET_CFG_VERSION_CLASS_NO_EMEM:
2410 		return true;
2411 	default:
2412 		return false;
2413 	}
2414 }
2415 
2416 void
2417 nfp_net_ctrl_bar_size_set(struct nfp_pf_dev *pf_dev)
2418 {
2419 	if (pf_dev->ver.class == NFP_NET_CFG_VERSION_CLASS_GENERIC)
2420 		pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_32K;
2421 	else
2422 		pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_8K;
2423 }
2424 
2425 /* Disable rx and tx functions to allow for reconfiguring. */
2426 int
2427 nfp_net_stop(struct rte_eth_dev *dev)
2428 {
2429 	struct nfp_net_hw *hw;
2430 	struct nfp_net_hw_priv *hw_priv;
2431 
2432 	hw = nfp_net_get_hw(dev);
2433 	hw_priv = dev->process_private;
2434 
2435 	nfp_net_disable_queues(dev);
2436 
2437 	/* Clear queues */
2438 	nfp_net_stop_tx_queue(dev);
2439 	nfp_net_stop_rx_queue(dev);
2440 
2441 	nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0);
2442 
2443 	return 0;
2444 }
2445 
2446 static enum rte_eth_fc_mode
2447 nfp_net_get_pause_mode(struct nfp_eth_table_port *eth_port)
2448 {
2449 	enum rte_eth_fc_mode mode;
2450 
2451 	if (eth_port->rx_pause_enabled) {
2452 		if (eth_port->tx_pause_enabled)
2453 			mode = RTE_ETH_FC_FULL;
2454 		else
2455 			mode = RTE_ETH_FC_RX_PAUSE;
2456 	} else {
2457 		if (eth_port->tx_pause_enabled)
2458 			mode = RTE_ETH_FC_TX_PAUSE;
2459 		else
2460 			mode = RTE_ETH_FC_NONE;
2461 	}
2462 
2463 	return mode;
2464 }
2465 
2466 int
2467 nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
2468 		struct rte_eth_fc_conf *fc_conf)
2469 {
2470 	struct nfp_net_hw_priv *hw_priv;
2471 	struct nfp_eth_table *nfp_eth_table;
2472 	struct nfp_eth_table_port *eth_port;
2473 
2474 	hw_priv = dev->process_private;
2475 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2476 		return -EINVAL;
2477 
2478 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2479 	eth_port = &nfp_eth_table->ports[dev->data->port_id];
2480 
2481 	/* Currently only RX/TX switch are supported */
2482 	fc_conf->mode = nfp_net_get_pause_mode(eth_port);
2483 
2484 	return 0;
2485 }
2486 
2487 static int
2488 nfp_net_pause_frame_set(struct nfp_net_hw_priv *hw_priv,
2489 		struct nfp_eth_table_port *eth_port,
2490 		enum rte_eth_fc_mode mode)
2491 {
2492 	int err;
2493 	bool flag;
2494 	struct nfp_nsp *nsp;
2495 
2496 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
2497 	if (nsp == NULL) {
2498 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
2499 		return -EIO;
2500 	}
2501 
2502 	flag = (mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2503 	err = nfp_eth_set_tx_pause(nsp, flag);
2504 	if (err != 0) {
2505 		PMD_DRV_LOG(ERR, "Failed to configure TX pause frame.");
2506 		nfp_eth_config_cleanup_end(nsp);
2507 		return err;
2508 	}
2509 
2510 	flag = (mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2511 	err = nfp_eth_set_rx_pause(nsp, flag);
2512 	if (err != 0) {
2513 		PMD_DRV_LOG(ERR, "Failed to configure RX pause frame.");
2514 		nfp_eth_config_cleanup_end(nsp);
2515 		return err;
2516 	}
2517 
2518 	err = nfp_eth_config_commit_end(nsp);
2519 	if (err != 0) {
2520 		PMD_DRV_LOG(ERR, "Failed to configure pause frame.");
2521 		return err;
2522 	}
2523 
2524 	return 0;
2525 }
2526 
2527 int
2528 nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
2529 		struct rte_eth_fc_conf *fc_conf)
2530 {
2531 	int ret;
2532 	uint8_t idx;
2533 	enum rte_eth_fc_mode set_mode;
2534 	struct nfp_net_hw_priv *hw_priv;
2535 	enum rte_eth_fc_mode original_mode;
2536 	struct nfp_eth_table *nfp_eth_table;
2537 	struct nfp_eth_table_port *eth_port;
2538 
2539 	idx = nfp_net_get_idx(dev);
2540 	hw_priv = dev->process_private;
2541 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2542 		return -EINVAL;
2543 
2544 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2545 	eth_port = &nfp_eth_table->ports[idx];
2546 
2547 	original_mode = nfp_net_get_pause_mode(eth_port);
2548 	set_mode = fc_conf->mode;
2549 
2550 	if (set_mode == original_mode)
2551 		return 0;
2552 
2553 	ret = nfp_net_pause_frame_set(hw_priv, eth_port, set_mode);
2554 	if (ret != 0)
2555 		return ret;
2556 
2557 	/* Update eth_table after modifying RX/TX pause frame mode. */
2558 	eth_port->tx_pause_enabled = (set_mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2559 	eth_port->rx_pause_enabled = (set_mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2560 
2561 	return 0;
2562 }
2563 
2564 int
2565 nfp_net_fec_get_capability(struct rte_eth_dev *dev,
2566 		struct rte_eth_fec_capa *speed_fec_capa,
2567 		__rte_unused unsigned int num)
2568 {
2569 	uint8_t idx;
2570 	uint16_t speed;
2571 	uint32_t supported_fec;
2572 	struct nfp_net_hw_priv *hw_priv;
2573 	struct nfp_eth_table *nfp_eth_table;
2574 	struct nfp_eth_table_port *eth_port;
2575 
2576 	idx = nfp_net_get_idx(dev);
2577 	hw_priv = dev->process_private;
2578 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2579 		return -EINVAL;
2580 
2581 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2582 	eth_port = &nfp_eth_table->ports[idx];
2583 
2584 	speed = eth_port->speed;
2585 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2586 	if (speed == 0 || supported_fec == 0) {
2587 		PMD_DRV_LOG(ERR, "FEC modes supported or Speed is invalid.");
2588 		return -EINVAL;
2589 	}
2590 
2591 	if (speed_fec_capa == NULL)
2592 		return NFP_FEC_CAPA_ENTRY_NUM;
2593 
2594 	speed_fec_capa->speed = speed;
2595 
2596 	if ((supported_fec & NFP_FEC_AUTO) != 0)
2597 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2598 	if ((supported_fec & NFP_FEC_BASER) != 0)
2599 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2600 	if ((supported_fec & NFP_FEC_REED_SOLOMON) != 0)
2601 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2602 	if ((supported_fec & NFP_FEC_DISABLED) != 0)
2603 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2604 
2605 	return NFP_FEC_CAPA_ENTRY_NUM;
2606 }
2607 
2608 static uint32_t
2609 nfp_net_fec_nfp_to_rte(enum nfp_eth_fec fec)
2610 {
2611 	switch (fec) {
2612 	case NFP_FEC_AUTO_BIT:
2613 		return RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2614 	case NFP_FEC_BASER_BIT:
2615 		return RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2616 	case NFP_FEC_REED_SOLOMON_BIT:
2617 		return RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2618 	case NFP_FEC_DISABLED_BIT:
2619 		return RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2620 	default:
2621 		PMD_DRV_LOG(ERR, "FEC mode is invalid.");
2622 		return 0;
2623 	}
2624 }
2625 
2626 int
2627 nfp_net_fec_get(struct rte_eth_dev *dev,
2628 		uint32_t *fec_capa)
2629 {
2630 	uint8_t idx;
2631 	struct nfp_net_hw_priv *hw_priv;
2632 	struct nfp_eth_table *nfp_eth_table;
2633 	struct nfp_eth_table_port *eth_port;
2634 
2635 	idx = nfp_net_get_idx(dev);
2636 	hw_priv = dev->process_private;
2637 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2638 		return -EINVAL;
2639 
2640 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN) {
2641 		nfp_eth_table = nfp_eth_read_ports(hw_priv->pf_dev->cpp);
2642 		hw_priv->pf_dev->nfp_eth_table->ports[idx] = nfp_eth_table->ports[idx];
2643 		free(nfp_eth_table);
2644 	}
2645 
2646 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2647 	eth_port = &nfp_eth_table->ports[idx];
2648 
2649 	if (!nfp_eth_can_support_fec(eth_port)) {
2650 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2651 		return -ENOTSUP;
2652 	}
2653 
2654 	/*
2655 	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
2656 	 * configured FEC mode is returned.
2657 	 * If link is up, current FEC mode is returned.
2658 	 */
2659 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN)
2660 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->fec);
2661 	else
2662 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->act_fec);
2663 
2664 	if (*fec_capa == 0)
2665 		return -EINVAL;
2666 
2667 	return 0;
2668 }
2669 
2670 static enum nfp_eth_fec
2671 nfp_net_fec_rte_to_nfp(uint32_t fec)
2672 {
2673 	switch (fec) {
2674 	case RTE_BIT32(RTE_ETH_FEC_AUTO):
2675 		return NFP_FEC_AUTO_BIT;
2676 	case RTE_BIT32(RTE_ETH_FEC_NOFEC):
2677 		return NFP_FEC_DISABLED_BIT;
2678 	case RTE_BIT32(RTE_ETH_FEC_RS):
2679 		return NFP_FEC_REED_SOLOMON_BIT;
2680 	case RTE_BIT32(RTE_ETH_FEC_BASER):
2681 		return NFP_FEC_BASER_BIT;
2682 	default:
2683 		return NFP_FEC_INVALID_BIT;
2684 	}
2685 }
2686 
2687 int
2688 nfp_net_fec_set(struct rte_eth_dev *dev,
2689 		uint32_t fec_capa)
2690 {
2691 	uint8_t idx;
2692 	enum nfp_eth_fec fec;
2693 	uint32_t supported_fec;
2694 	struct nfp_net_hw_priv *hw_priv;
2695 	struct nfp_eth_table *nfp_eth_table;
2696 	struct nfp_eth_table_port *eth_port;
2697 
2698 	idx = nfp_net_get_idx(dev);
2699 	hw_priv = dev->process_private;
2700 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2701 		return -EINVAL;
2702 
2703 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2704 	eth_port = &nfp_eth_table->ports[idx];
2705 
2706 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2707 	if (supported_fec == 0) {
2708 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2709 		return -ENOTSUP;
2710 	}
2711 
2712 	fec = nfp_net_fec_rte_to_nfp(fec_capa);
2713 	if (fec == NFP_FEC_INVALID_BIT) {
2714 		PMD_DRV_LOG(ERR, "FEC modes is invalid.");
2715 		return -EINVAL;
2716 	}
2717 
2718 	if ((RTE_BIT32(fec) & supported_fec) == 0) {
2719 		PMD_DRV_LOG(ERR, "Unsupported FEC mode is set.");
2720 		return -EIO;
2721 	}
2722 
2723 	return nfp_eth_set_fec(hw_priv->pf_dev->cpp, eth_port->index, fec);
2724 }
2725 
2726 uint32_t
2727 nfp_net_get_port_num(struct nfp_pf_dev *pf_dev,
2728 		struct nfp_eth_table *nfp_eth_table)
2729 {
2730 	if (pf_dev->multi_pf.enabled)
2731 		return 1;
2732 	else
2733 		return nfp_eth_table->count;
2734 }
2735 
2736 uint8_t
2737 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
2738 		uint8_t port_id)
2739 {
2740 	if (pf_dev->multi_pf.enabled)
2741 		return pf_dev->multi_pf.function_id;
2742 
2743 	return port_id;
2744 }
2745 
2746 static int
2747 nfp_net_sriov_check(struct nfp_pf_dev *pf_dev,
2748 		uint16_t cap)
2749 {
2750 	uint16_t cap_vf;
2751 
2752 	cap_vf = nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_CAP);
2753 	if ((cap_vf & cap) != cap)
2754 		return -ENOTSUP;
2755 
2756 	return 0;
2757 }
2758 
2759 static int
2760 nfp_net_sriov_update(struct nfp_net_hw *net_hw,
2761 		struct nfp_pf_dev *pf_dev,
2762 		uint16_t update)
2763 {
2764 	int ret;
2765 
2766 	/* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_base_id to FW. */
2767 	ret = nfp_net_vf_reconfig(net_hw, pf_dev, update, pf_dev->vf_base_id,
2768 			NFP_NET_VF_CFG_MB_VF_NUM);
2769 	if (ret != 0) {
2770 		PMD_INIT_LOG(ERR, "Error nfp VF reconfig");
2771 		return ret;
2772 	}
2773 
2774 	return 0;
2775 }
2776 
2777 static int
2778 nfp_net_vf_queues_config(struct nfp_net_hw *net_hw,
2779 		struct nfp_pf_dev *pf_dev)
2780 {
2781 	int ret;
2782 	uint32_t i;
2783 	uint32_t offset;
2784 
2785 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG);
2786 	if (ret != 0) {
2787 		if (ret == -ENOTSUP) {
2788 			PMD_INIT_LOG(WARNING, "Set VF max queue not supported");
2789 			return 0;
2790 		}
2791 
2792 		PMD_INIT_LOG(ERR, "Set VF max queue failed");
2793 		return ret;
2794 	}
2795 
2796 	offset = NFP_NET_VF_CFG_MB_SZ + pf_dev->max_vfs * NFP_NET_VF_CFG_SZ;
2797 	for (i = 0; i < pf_dev->sriov_vf; i++) {
2798 		ret = nfp_net_vf_reconfig(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG,
2799 				pf_dev->queue_per_vf, pf_dev->vf_base_id + offset + i);
2800 		if (ret != 0) {
2801 			PMD_INIT_LOG(ERR, "Set VF max_queue failed");
2802 			return ret;
2803 		}
2804 	}
2805 
2806 	return 0;
2807 }
2808 
2809 static int
2810 nfp_net_sriov_init(struct nfp_net_hw *net_hw,
2811 		struct nfp_pf_dev *pf_dev)
2812 {
2813 	int ret;
2814 
2815 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_SPLIT);
2816 	if (ret != 0) {
2817 		if (ret == -ENOTSUP) {
2818 			PMD_INIT_LOG(WARNING, "Set VF split not supported");
2819 			return 0;
2820 		}
2821 
2822 		PMD_INIT_LOG(ERR, "Set VF split failed");
2823 		return ret;
2824 	}
2825 
2826 	nn_writeb(pf_dev->sriov_vf, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_VF_CNT);
2827 
2828 	ret = nfp_net_sriov_update(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_SPLIT);
2829 	if (ret != 0) {
2830 		PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed");
2831 		return ret;
2832 	}
2833 
2834 	return 0;
2835 }
2836 
2837 int
2838 nfp_net_vf_config_app_init(struct nfp_net_hw *net_hw,
2839 		struct nfp_pf_dev *pf_dev)
2840 {
2841 	int ret;
2842 
2843 	if (pf_dev->sriov_vf == 0)
2844 		return 0;
2845 
2846 	ret = nfp_net_sriov_init(net_hw, pf_dev);
2847 	if (ret != 0) {
2848 		PMD_INIT_LOG(ERR, "Failed to init sriov module");
2849 		return ret;
2850 	}
2851 
2852 	ret = nfp_net_vf_queues_config(net_hw, pf_dev);
2853 	if (ret != 0) {
2854 		PMD_INIT_LOG(ERR, "Failed to config vf queue");
2855 		return ret;
2856 	}
2857 
2858 	return 0;
2859 }
2860