xref: /dpdk/drivers/net/nfp/nfp_net_common.c (revision c43d2aab426bdc6e22142b4c5667d6d1634248de)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include "nfp_net_common.h"
9 
10 #include <rte_alarm.h>
11 
12 #include "flower/nfp_flower_cmsg.h"
13 #include "flower/nfp_flower_representor.h"
14 #include "nfd3/nfp_nfd3.h"
15 #include "nfdk/nfp_nfdk.h"
16 #include "nfpcore/nfp_mip.h"
17 #include "nfpcore/nfp_nsp.h"
18 #include "nfpcore/nfp_rtsym.h"
19 #include "nfp_logs.h"
20 #include "nfp_net_meta.h"
21 
22 #define NFP_TX_MAX_SEG       UINT8_MAX
23 #define NFP_TX_MAX_MTU_SEG   8
24 
25 #define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
26 #define NFP_NET_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
27 
28 #define DEFAULT_FLBUF_SIZE        9216
29 #define NFP_ETH_OVERHEAD \
30 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
31 
32 /* Only show FEC capability supported by the current speed. */
33 #define NFP_FEC_CAPA_ENTRY_NUM  1
34 
35 enum nfp_xstat_group {
36 	NFP_XSTAT_GROUP_NET,
37 	NFP_XSTAT_GROUP_MAC
38 };
39 
40 struct nfp_xstat {
41 	char name[RTE_ETH_XSTATS_NAME_SIZE];
42 	int offset;
43 	enum nfp_xstat_group group;
44 };
45 
46 #define NFP_XSTAT_NET(_name, _offset) {                 \
47 	.name = _name,                                  \
48 	.offset = NFP_NET_CFG_STATS_##_offset,          \
49 	.group = NFP_XSTAT_GROUP_NET,                   \
50 }
51 
52 #define NFP_XSTAT_MAC(_name, _offset) {                 \
53 	.name = _name,                                  \
54 	.offset = NFP_MAC_STATS_##_offset,              \
55 	.group = NFP_XSTAT_GROUP_MAC,                   \
56 }
57 
58 static const struct nfp_xstat nfp_net_xstats[] = {
59 	/*
60 	 * Basic xstats available on both VF and PF.
61 	 * Note that in case new statistics of group NFP_XSTAT_GROUP_NET
62 	 * are added to this array, they must appear before any statistics
63 	 * of group NFP_XSTAT_GROUP_MAC.
64 	 */
65 	NFP_XSTAT_NET("rx_good_packets_mc", RX_MC_FRAMES),
66 	NFP_XSTAT_NET("tx_good_packets_mc", TX_MC_FRAMES),
67 	NFP_XSTAT_NET("rx_good_packets_bc", RX_BC_FRAMES),
68 	NFP_XSTAT_NET("tx_good_packets_bc", TX_BC_FRAMES),
69 	NFP_XSTAT_NET("rx_good_bytes_uc", RX_UC_OCTETS),
70 	NFP_XSTAT_NET("tx_good_bytes_uc", TX_UC_OCTETS),
71 	NFP_XSTAT_NET("rx_good_bytes_mc", RX_MC_OCTETS),
72 	NFP_XSTAT_NET("tx_good_bytes_mc", TX_MC_OCTETS),
73 	NFP_XSTAT_NET("rx_good_bytes_bc", RX_BC_OCTETS),
74 	NFP_XSTAT_NET("tx_good_bytes_bc", TX_BC_OCTETS),
75 	NFP_XSTAT_NET("tx_missed_erros", TX_DISCARDS),
76 	NFP_XSTAT_NET("bpf_pass_pkts", APP0_FRAMES),
77 	NFP_XSTAT_NET("bpf_pass_bytes", APP0_BYTES),
78 	NFP_XSTAT_NET("bpf_app1_pkts", APP1_FRAMES),
79 	NFP_XSTAT_NET("bpf_app1_bytes", APP1_BYTES),
80 	NFP_XSTAT_NET("bpf_app2_pkts", APP2_FRAMES),
81 	NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES),
82 	NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES),
83 	NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES),
84 	/*
85 	 * MAC xstats available only on PF. These statistics are not available for VFs as the
86 	 * PF is not initialized when the VF is initialized as it is still bound to the kernel
87 	 * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order
88 	 * to get the pointer to the start of the MAC statistics counters.
89 	 */
90 	NFP_XSTAT_MAC("mac.rx_octets", RX_IN_OCTS),
91 	NFP_XSTAT_MAC("mac.rx_frame_too_long_errors", RX_FRAME_TOO_LONG_ERRORS),
92 	NFP_XSTAT_MAC("mac.rx_range_length_errors", RX_RANGE_LENGTH_ERRORS),
93 	NFP_XSTAT_MAC("mac.rx_vlan_received_ok", RX_VLAN_RECEIVED_OK),
94 	NFP_XSTAT_MAC("mac.rx_errors", RX_IN_ERRORS),
95 	NFP_XSTAT_MAC("mac.rx_broadcast_pkts", RX_IN_BROADCAST_PKTS),
96 	NFP_XSTAT_MAC("mac.rx_drop_events", RX_DROP_EVENTS),
97 	NFP_XSTAT_MAC("mac.rx_alignment_errors", RX_ALIGNMENT_ERRORS),
98 	NFP_XSTAT_MAC("mac.rx_pause_mac_ctrl_frames", RX_PAUSE_MAC_CTRL_FRAMES),
99 	NFP_XSTAT_MAC("mac.rx_frames_received_ok", RX_FRAMES_RECEIVED_OK),
100 	NFP_XSTAT_MAC("mac.rx_frame_check_sequence_errors", RX_FRAME_CHECK_SEQ_ERRORS),
101 	NFP_XSTAT_MAC("mac.rx_unicast_pkts", RX_UNICAST_PKTS),
102 	NFP_XSTAT_MAC("mac.rx_multicast_pkts", RX_MULTICAST_PKTS),
103 	NFP_XSTAT_MAC("mac.rx_pkts", RX_PKTS),
104 	NFP_XSTAT_MAC("mac.rx_undersize_pkts", RX_UNDERSIZE_PKTS),
105 	NFP_XSTAT_MAC("mac.rx_pkts_64_octets", RX_PKTS_64_OCTS),
106 	NFP_XSTAT_MAC("mac.rx_pkts_65_to_127_octets", RX_PKTS_65_TO_127_OCTS),
107 	NFP_XSTAT_MAC("mac.rx_pkts_128_to_255_octets", RX_PKTS_128_TO_255_OCTS),
108 	NFP_XSTAT_MAC("mac.rx_pkts_256_to_511_octets", RX_PKTS_256_TO_511_OCTS),
109 	NFP_XSTAT_MAC("mac.rx_pkts_512_to_1023_octets", RX_PKTS_512_TO_1023_OCTS),
110 	NFP_XSTAT_MAC("mac.rx_pkts_1024_to_1518_octets", RX_PKTS_1024_TO_1518_OCTS),
111 	NFP_XSTAT_MAC("mac.rx_pkts_1519_to_max_octets", RX_PKTS_1519_TO_MAX_OCTS),
112 	NFP_XSTAT_MAC("mac.rx_jabbers", RX_JABBERS),
113 	NFP_XSTAT_MAC("mac.rx_fragments", RX_FRAGMENTS),
114 	NFP_XSTAT_MAC("mac.rx_oversize_pkts", RX_OVERSIZE_PKTS),
115 	NFP_XSTAT_MAC("mac.rx_pause_frames_class0", RX_PAUSE_FRAMES_CLASS0),
116 	NFP_XSTAT_MAC("mac.rx_pause_frames_class1", RX_PAUSE_FRAMES_CLASS1),
117 	NFP_XSTAT_MAC("mac.rx_pause_frames_class2", RX_PAUSE_FRAMES_CLASS2),
118 	NFP_XSTAT_MAC("mac.rx_pause_frames_class3", RX_PAUSE_FRAMES_CLASS3),
119 	NFP_XSTAT_MAC("mac.rx_pause_frames_class4", RX_PAUSE_FRAMES_CLASS4),
120 	NFP_XSTAT_MAC("mac.rx_pause_frames_class5", RX_PAUSE_FRAMES_CLASS5),
121 	NFP_XSTAT_MAC("mac.rx_pause_frames_class6", RX_PAUSE_FRAMES_CLASS6),
122 	NFP_XSTAT_MAC("mac.rx_pause_frames_class7", RX_PAUSE_FRAMES_CLASS7),
123 	NFP_XSTAT_MAC("mac.rx_mac_ctrl_frames_received", RX_MAC_CTRL_FRAMES_REC),
124 	NFP_XSTAT_MAC("mac.rx_mac_head_drop", RX_MAC_HEAD_DROP),
125 	NFP_XSTAT_MAC("mac.tx_queue_drop", TX_QUEUE_DROP),
126 	NFP_XSTAT_MAC("mac.tx_octets", TX_OUT_OCTS),
127 	NFP_XSTAT_MAC("mac.tx_vlan_transmitted_ok", TX_VLAN_TRANSMITTED_OK),
128 	NFP_XSTAT_MAC("mac.tx_errors", TX_OUT_ERRORS),
129 	NFP_XSTAT_MAC("mac.tx_broadcast_pkts", TX_BROADCAST_PKTS),
130 	NFP_XSTAT_MAC("mac.tx_pause_mac_ctrl_frames", TX_PAUSE_MAC_CTRL_FRAMES),
131 	NFP_XSTAT_MAC("mac.tx_frames_transmitted_ok", TX_FRAMES_TRANSMITTED_OK),
132 	NFP_XSTAT_MAC("mac.tx_unicast_pkts", TX_UNICAST_PKTS),
133 	NFP_XSTAT_MAC("mac.tx_multicast_pkts", TX_MULTICAST_PKTS),
134 	NFP_XSTAT_MAC("mac.tx_pkts_64_octets", TX_PKTS_64_OCTS),
135 	NFP_XSTAT_MAC("mac.tx_pkts_65_to_127_octets", TX_PKTS_65_TO_127_OCTS),
136 	NFP_XSTAT_MAC("mac.tx_pkts_128_to_255_octets", TX_PKTS_128_TO_255_OCTS),
137 	NFP_XSTAT_MAC("mac.tx_pkts_256_to_511_octets", TX_PKTS_256_TO_511_OCTS),
138 	NFP_XSTAT_MAC("mac.tx_pkts_512_to_1023_octets", TX_PKTS_512_TO_1023_OCTS),
139 	NFP_XSTAT_MAC("mac.tx_pkts_1024_to_1518_octets", TX_PKTS_1024_TO_1518_OCTS),
140 	NFP_XSTAT_MAC("mac.tx_pkts_1519_to_max_octets", TX_PKTS_1519_TO_MAX_OCTS),
141 	NFP_XSTAT_MAC("mac.tx_pause_frames_class0", TX_PAUSE_FRAMES_CLASS0),
142 	NFP_XSTAT_MAC("mac.tx_pause_frames_class1", TX_PAUSE_FRAMES_CLASS1),
143 	NFP_XSTAT_MAC("mac.tx_pause_frames_class2", TX_PAUSE_FRAMES_CLASS2),
144 	NFP_XSTAT_MAC("mac.tx_pause_frames_class3", TX_PAUSE_FRAMES_CLASS3),
145 	NFP_XSTAT_MAC("mac.tx_pause_frames_class4", TX_PAUSE_FRAMES_CLASS4),
146 	NFP_XSTAT_MAC("mac.tx_pause_frames_class5", TX_PAUSE_FRAMES_CLASS5),
147 	NFP_XSTAT_MAC("mac.tx_pause_frames_class6", TX_PAUSE_FRAMES_CLASS6),
148 	NFP_XSTAT_MAC("mac.tx_pause_frames_class7", TX_PAUSE_FRAMES_CLASS7),
149 };
150 
151 static const uint32_t nfp_net_link_speed_nfp2rte[] = {
152 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
153 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
154 	[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
155 	[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
156 	[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
157 	[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
158 	[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
159 	[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
160 };
161 
162 static bool
163 nfp_net_is_pf(struct rte_eth_dev *dev)
164 {
165 	if (rte_eth_dev_is_repr(dev))
166 		return nfp_flower_repr_is_pf(dev);
167 
168 	return ((struct nfp_net_hw_priv *)dev->process_private)->is_pf;
169 }
170 
171 static size_t
172 nfp_net_link_speed_rte2nfp(uint32_t speed)
173 {
174 	size_t i;
175 
176 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
177 		if (speed == nfp_net_link_speed_nfp2rte[i])
178 			return i;
179 	}
180 
181 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
182 }
183 
184 static uint32_t
185 nfp_net_link_speed_nfp2rte_check(uint32_t speed)
186 {
187 	size_t i;
188 
189 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
190 		if (speed == nfp_net_link_speed_nfp2rte[i])
191 			return nfp_net_link_speed_nfp2rte[i];
192 	}
193 
194 	return RTE_ETH_SPEED_NUM_NONE;
195 }
196 
197 void
198 nfp_net_notify_port_speed(struct nfp_net_hw *hw,
199 		struct rte_eth_link *link)
200 {
201 	/*
202 	 * Read the link status from NFP_NET_CFG_STS. If the link is down
203 	 * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to
204 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
205 	 */
206 	if (link->link_status == RTE_ETH_LINK_DOWN) {
207 		nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
208 				NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
209 		return;
210 	}
211 
212 	/*
213 	 * Link is up so write the link speed from the eth_table to
214 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
215 	 */
216 	nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
217 			nfp_net_link_speed_rte2nfp(link->link_speed));
218 }
219 
220 /**
221  * Reconfigure the firmware of VF configure
222  *
223  * @param net_hw
224  *   Device to reconfigure
225  * @param pf_dev
226  *   Get the Device info
227  * @param update
228  *   The value for the mailbox VF command
229  * @param value
230  *   The value of update
231  * @param offset
232  *   The offset in the VF configure table
233  *
234  * @return
235  *   - (0) if OK to reconfigure vf configure.
236  *   - (-EIO) if I/O err and fail to configure the vf configure
237  */
238 static int
239 nfp_net_vf_reconfig(struct nfp_net_hw *net_hw,
240 		struct nfp_pf_dev *pf_dev,
241 		uint16_t update,
242 		uint8_t value,
243 		uint32_t offset)
244 {
245 	int ret;
246 	struct nfp_hw *hw;
247 
248 	hw = &net_hw->super;
249 	rte_spinlock_lock(&hw->reconfig_lock);
250 
251 	/* Write update info to mailbox in VF config symbol */
252 	nn_writeb(value, pf_dev->vf_cfg_tbl_bar + offset);
253 	nn_writew(update, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_UPD);
254 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VF);
255 
256 	rte_wmb();
257 
258 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VF);
259 
260 	rte_spinlock_unlock(&hw->reconfig_lock);
261 
262 	if (ret != 0)
263 		return -EIO;
264 
265 	return nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_RET);
266 }
267 
268 /**
269  * Reconfigure the firmware via the mailbox
270  *
271  * @param net_hw
272  *   Device to reconfigure
273  * @param mbox_cmd
274  *   The value for the mailbox command
275  *
276  * @return
277  *   - (0) if OK to reconfigure by the mailbox.
278  *   - (-EIO) if I/O err and fail to reconfigure by the mailbox
279  */
280 int
281 nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
282 		uint32_t mbox_cmd)
283 {
284 	int ret;
285 	uint32_t mbox;
286 
287 	mbox = net_hw->tlv_caps.mbox_off;
288 
289 	rte_spinlock_lock(&net_hw->super.reconfig_lock);
290 
291 	nn_cfg_writeq(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
292 	nn_cfg_writel(&net_hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
293 
294 	rte_wmb();
295 
296 	ret = nfp_reconfig_real(&net_hw->super, NFP_NET_CFG_UPDATE_MBOX);
297 
298 	rte_spinlock_unlock(&net_hw->super.reconfig_lock);
299 
300 	if (ret != 0) {
301 		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x.",
302 				mbox_cmd, NFP_NET_CFG_UPDATE_MBOX);
303 		return -EIO;
304 	}
305 
306 	return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
307 }
308 
309 struct nfp_net_hw *
310 nfp_net_get_hw(const struct rte_eth_dev *dev)
311 {
312 	struct nfp_net_hw *hw;
313 
314 	if (rte_eth_dev_is_repr(dev)) {
315 		struct nfp_flower_representor *repr;
316 		repr = dev->data->dev_private;
317 		hw = repr->app_fw_flower->pf_hw;
318 	} else {
319 		hw = dev->data->dev_private;
320 	}
321 
322 	return hw;
323 }
324 
325 uint8_t
326 nfp_net_get_idx(const struct rte_eth_dev *dev)
327 {
328 	uint8_t idx;
329 
330 	if (rte_eth_dev_is_repr(dev)) {
331 		struct nfp_flower_representor *repr;
332 		repr = dev->data->dev_private;
333 		idx = repr->idx;
334 	} else {
335 		struct nfp_net_hw *hw;
336 		hw = dev->data->dev_private;
337 		idx = hw->idx;
338 	}
339 
340 	return idx;
341 }
342 
343 /*
344  * Configure an Ethernet device.
345  *
346  * This function must be invoked first before any other function in the Ethernet API.
347  * This function can also be re-invoked when a device is in the stopped state.
348  *
349  * A DPDK app sends info about how many queues to use and how  those queues
350  * need to be configured. This is used by the DPDK core and it makes sure no
351  * more queues than those advertised by the driver are requested.
352  * This function is called after that internal process.
353  */
354 int
355 nfp_net_configure(struct rte_eth_dev *dev)
356 {
357 	struct nfp_net_hw *hw;
358 	struct rte_eth_conf *dev_conf;
359 	struct rte_eth_rxmode *rxmode;
360 	struct rte_eth_txmode *txmode;
361 
362 	hw = nfp_net_get_hw(dev);
363 	dev_conf = &dev->data->dev_conf;
364 	rxmode = &dev_conf->rxmode;
365 	txmode = &dev_conf->txmode;
366 
367 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0)
368 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
369 
370 	/* Checking TX mode */
371 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
372 		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported.");
373 		return -EINVAL;
374 	}
375 
376 	/* Checking RX mode */
377 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
378 			(hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
379 		PMD_DRV_LOG(ERR, "RSS not supported.");
380 		return -EINVAL;
381 	}
382 
383 	/* Checking MTU set */
384 	if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) {
385 		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u).",
386 				rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD);
387 		return -ERANGE;
388 	}
389 
390 	return 0;
391 }
392 
393 void
394 nfp_net_log_device_information(const struct nfp_net_hw *hw,
395 		struct nfp_pf_dev *pf_dev)
396 {
397 	uint32_t cap = hw->super.cap;
398 	uint32_t cap_ext = hw->super.cap_ext;
399 
400 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d.",
401 			pf_dev->ver.major, pf_dev->ver.minor, hw->max_mtu);
402 
403 	PMD_INIT_LOG(INFO, "CAP: %#x.", cap);
404 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
405 			cap & NFP_NET_CFG_CTRL_ENABLE        ? "ENABLE "      : "",
406 			cap & NFP_NET_CFG_CTRL_PROMISC       ? "PROMISC "     : "",
407 			cap & NFP_NET_CFG_CTRL_L2BC          ? "L2BCFILT "    : "",
408 			cap & NFP_NET_CFG_CTRL_L2MC          ? "L2MCFILT "    : "",
409 			cap & NFP_NET_CFG_CTRL_RXCSUM        ? "RXCSUM "      : "",
410 			cap & NFP_NET_CFG_CTRL_TXCSUM        ? "TXCSUM "      : "",
411 			cap & NFP_NET_CFG_CTRL_RXVLAN        ? "RXVLAN "      : "",
412 			cap & NFP_NET_CFG_CTRL_TXVLAN        ? "TXVLAN "      : "",
413 			cap & NFP_NET_CFG_CTRL_SCATTER       ? "SCATTER "     : "",
414 			cap & NFP_NET_CFG_CTRL_GATHER        ? "GATHER "      : "",
415 			cap & NFP_NET_CFG_CTRL_LSO           ? "TSO "         : "",
416 			cap & NFP_NET_CFG_CTRL_RXQINQ        ? "RXQINQ "      : "",
417 			cap & NFP_NET_CFG_CTRL_RXVLAN_V2     ? "RXVLANv2 "    : "",
418 			cap & NFP_NET_CFG_CTRL_RINGCFG       ? "RINGCFG "     : "",
419 			cap & NFP_NET_CFG_CTRL_RSS           ? "RSS "         : "",
420 			cap & NFP_NET_CFG_CTRL_IRQMOD        ? "IRQMOD "      : "",
421 			cap & NFP_NET_CFG_CTRL_RINGPRIO      ? "RINGPRIO "    : "",
422 			cap & NFP_NET_CFG_CTRL_MSIXAUTO      ? "MSIXAUTO "    : "",
423 			cap & NFP_NET_CFG_CTRL_TXRWB         ? "TXRWB "       : "",
424 			cap & NFP_NET_CFG_CTRL_L2SWITCH      ? "L2SWITCH "    : "",
425 			cap & NFP_NET_CFG_CTRL_TXVLAN_V2     ? "TXVLANv2 "    : "",
426 			cap & NFP_NET_CFG_CTRL_VXLAN         ? "VXLAN "       : "",
427 			cap & NFP_NET_CFG_CTRL_NVGRE         ? "NVGRE "       : "",
428 			cap & NFP_NET_CFG_CTRL_MSIX_TX_OFF   ? "MSIX_TX_OFF " : "",
429 			cap & NFP_NET_CFG_CTRL_LSO2          ? "TSOv2 "       : "",
430 			cap & NFP_NET_CFG_CTRL_RSS2          ? "RSSv2 "       : "",
431 			cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ? "CSUM "        : "",
432 			cap & NFP_NET_CFG_CTRL_LIVE_ADDR     ? "LIVE_ADDR "   : "",
433 			cap & NFP_NET_CFG_CTRL_USO           ? "USO"          : "");
434 
435 	PMD_INIT_LOG(INFO, "CAP_WORD1: %#x.", cap_ext);
436 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s",
437 			cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE        ? "PKT_TYPE "        : "",
438 			cap_ext & NFP_NET_CFG_CTRL_IPSEC           ? "IPSEC "           : "",
439 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP ? "IPSEC_SM "        : "",
440 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP ? "IPSEC_LM "        : "",
441 			cap_ext & NFP_NET_CFG_CTRL_MULTI_PF        ? "MULTI_PF "        : "",
442 			cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER      ? "FLOW_STEER "      : "",
443 			cap_ext & NFP_NET_CFG_CTRL_IN_ORDER        ? "VIRTIO_IN_ORDER " : "");
444 
445 	PMD_INIT_LOG(INFO, "The max_rx_queues: %u, max_tx_queues: %u.",
446 			hw->max_rx_queues, hw->max_tx_queues);
447 }
448 
449 static inline void
450 nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw,
451 		uint32_t *ctrl)
452 {
453 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
454 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2;
455 	else if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN) != 0)
456 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
457 }
458 
459 void
460 nfp_net_enable_queues(struct rte_eth_dev *dev)
461 {
462 	struct nfp_net_hw *hw;
463 
464 	hw = nfp_net_get_hw(dev);
465 
466 	nfp_enable_queues(&hw->super, dev->data->nb_rx_queues,
467 			dev->data->nb_tx_queues);
468 }
469 
470 void
471 nfp_net_disable_queues(struct rte_eth_dev *dev)
472 {
473 	struct nfp_net_hw *net_hw;
474 
475 	net_hw = nfp_net_get_hw(dev);
476 
477 	nfp_disable_queues(&net_hw->super);
478 }
479 
480 void
481 nfp_net_params_setup(struct nfp_net_hw *hw)
482 {
483 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, hw->mtu);
484 	nn_cfg_writel(&hw->super, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
485 }
486 
487 void
488 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
489 {
490 	hw->super.qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
491 }
492 
493 int
494 nfp_net_set_mac_addr(struct rte_eth_dev *dev,
495 		struct rte_ether_addr *mac_addr)
496 {
497 	uint32_t update;
498 	uint32_t new_ctrl;
499 	struct nfp_hw *hw;
500 	struct nfp_net_hw *net_hw;
501 
502 	net_hw = nfp_net_get_hw(dev);
503 	hw = &net_hw->super;
504 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
505 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
506 		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled.");
507 		return -EBUSY;
508 	}
509 
510 	if (rte_is_valid_assigned_ether_addr(mac_addr) == 0) {
511 		PMD_DRV_LOG(ERR, "Invalid MAC address.");
512 		return -EINVAL;
513 	}
514 
515 	/* Writing new MAC to the specific port BAR address */
516 	nfp_write_mac(hw, (uint8_t *)mac_addr);
517 
518 	update = NFP_NET_CFG_UPDATE_MACADDR;
519 	new_ctrl = hw->ctrl;
520 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
521 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
522 		new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
523 
524 	/* Signal the NIC about the change */
525 	if (nfp_reconfig(hw, new_ctrl, update) != 0) {
526 		PMD_DRV_LOG(ERR, "MAC address update failed.");
527 		return -EIO;
528 	}
529 
530 	hw->ctrl = new_ctrl;
531 
532 	return 0;
533 }
534 
535 int
536 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
537 		struct rte_intr_handle *intr_handle)
538 {
539 	uint16_t i;
540 	struct nfp_net_hw *hw;
541 
542 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
543 				dev->data->nb_rx_queues) != 0) {
544 		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec.",
545 				dev->data->nb_rx_queues);
546 		return -ENOMEM;
547 	}
548 
549 	hw = nfp_net_get_hw(dev);
550 
551 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
552 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO.");
553 		/* UIO just supports one queue and no LSC */
554 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
555 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
556 			return -1;
557 	} else {
558 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO.");
559 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
560 			/*
561 			 * The first msix vector is reserved for non
562 			 * efd interrupts.
563 			 */
564 			nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(i), i + 1);
565 			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
566 				return -1;
567 		}
568 	}
569 
570 	/* Avoiding TX interrupts */
571 	hw->super.ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
572 	return 0;
573 }
574 
575 uint32_t
576 nfp_check_offloads(struct rte_eth_dev *dev)
577 {
578 	uint32_t cap;
579 	uint32_t ctrl = 0;
580 	uint64_t rx_offload;
581 	uint64_t tx_offload;
582 	struct nfp_net_hw *hw;
583 	struct rte_eth_conf *dev_conf;
584 
585 	hw = nfp_net_get_hw(dev);
586 	cap = hw->super.cap;
587 
588 	dev_conf = &dev->data->dev_conf;
589 	rx_offload = dev_conf->rxmode.offloads;
590 	tx_offload = dev_conf->txmode.offloads;
591 
592 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
593 		if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
594 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
595 	}
596 
597 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
598 		nfp_net_enable_rxvlan_cap(hw, &ctrl);
599 
600 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
601 		if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
602 			ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
603 	}
604 
605 	hw->mtu = dev->data->mtu;
606 
607 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
608 		if ((cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
609 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
610 		else if ((cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
611 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
612 	}
613 
614 	/* L2 broadcast */
615 	if ((cap & NFP_NET_CFG_CTRL_L2BC) != 0)
616 		ctrl |= NFP_NET_CFG_CTRL_L2BC;
617 
618 	/* L2 multicast */
619 	if ((cap & NFP_NET_CFG_CTRL_L2MC) != 0)
620 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
621 
622 	/* TX checksum offload */
623 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
624 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
625 			(tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
626 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
627 
628 	/* LSO offload */
629 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
630 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_TSO) != 0 ||
631 			(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
632 		if ((cap & NFP_NET_CFG_CTRL_LSO) != 0)
633 			ctrl |= NFP_NET_CFG_CTRL_LSO;
634 		else if ((cap & NFP_NET_CFG_CTRL_LSO2) != 0)
635 			ctrl |= NFP_NET_CFG_CTRL_LSO2;
636 	}
637 
638 	/* RX gather */
639 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
640 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
641 
642 	return ctrl;
643 }
644 
645 int
646 nfp_net_promisc_enable(struct rte_eth_dev *dev)
647 {
648 	int ret;
649 	uint32_t update;
650 	uint32_t new_ctrl;
651 	struct nfp_hw *hw;
652 	struct nfp_net_hw *net_hw;
653 
654 	net_hw = nfp_net_get_hw(dev);
655 
656 	hw = &net_hw->super;
657 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
658 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported.");
659 		return -ENOTSUP;
660 	}
661 
662 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
663 		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled.");
664 		return 0;
665 	}
666 
667 	new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
668 	update = NFP_NET_CFG_UPDATE_GEN;
669 
670 	ret = nfp_reconfig(hw, new_ctrl, update);
671 	if (ret != 0)
672 		return ret;
673 
674 	hw->ctrl = new_ctrl;
675 
676 	return 0;
677 }
678 
679 int
680 nfp_net_promisc_disable(struct rte_eth_dev *dev)
681 {
682 	int ret;
683 	uint32_t update;
684 	uint32_t new_ctrl;
685 	struct nfp_hw *hw;
686 	struct nfp_net_hw *net_hw;
687 
688 	net_hw = nfp_net_get_hw(dev);
689 	hw = &net_hw->super;
690 
691 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
692 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported.");
693 		return -ENOTSUP;
694 	}
695 
696 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
697 		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled.");
698 		return 0;
699 	}
700 
701 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
702 	update = NFP_NET_CFG_UPDATE_GEN;
703 
704 	ret = nfp_reconfig(hw, new_ctrl, update);
705 	if (ret != 0)
706 		return ret;
707 
708 	hw->ctrl = new_ctrl;
709 
710 	return 0;
711 }
712 
713 static int
714 nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev,
715 		bool enable)
716 {
717 	int ret;
718 	uint32_t update;
719 	struct nfp_hw *hw;
720 	uint32_t cap_extend;
721 	uint32_t ctrl_extend;
722 	uint32_t new_ctrl_extend;
723 	struct nfp_net_hw *net_hw;
724 
725 	net_hw = nfp_net_get_hw(dev);
726 	hw = &net_hw->super;
727 
728 	cap_extend = hw->cap_ext;
729 	if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) {
730 		PMD_DRV_LOG(DEBUG, "Allmulticast mode not supported.");
731 		return -ENOTSUP;
732 	}
733 
734 	/*
735 	 * Allmulticast mode enabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 0.
736 	 * Allmulticast mode disabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 1.
737 	 */
738 	ctrl_extend = hw->ctrl_ext;
739 	if (enable) {
740 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0)
741 			return 0;
742 
743 		new_ctrl_extend = ctrl_extend & ~NFP_NET_CFG_CTRL_MCAST_FILTER;
744 	} else {
745 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) != 0)
746 			return 0;
747 
748 		new_ctrl_extend = ctrl_extend | NFP_NET_CFG_CTRL_MCAST_FILTER;
749 	}
750 
751 	update = NFP_NET_CFG_UPDATE_GEN;
752 
753 	ret = nfp_ext_reconfig(hw, new_ctrl_extend, update);
754 	if (ret != 0)
755 		return ret;
756 
757 	hw->ctrl_ext = new_ctrl_extend;
758 	return 0;
759 }
760 
761 int
762 nfp_net_allmulticast_enable(struct rte_eth_dev *dev)
763 {
764 	return nfp_net_set_allmulticast_mode(dev, true);
765 }
766 
767 int
768 nfp_net_allmulticast_disable(struct rte_eth_dev *dev)
769 {
770 	return nfp_net_set_allmulticast_mode(dev, false);
771 }
772 
773 static void
774 nfp_net_pf_speed_update(struct rte_eth_dev *dev,
775 		struct nfp_net_hw_priv *hw_priv,
776 		struct rte_eth_link *link)
777 {
778 	uint8_t idx;
779 	enum nfp_eth_aneg aneg;
780 	struct nfp_pf_dev *pf_dev;
781 	struct nfp_eth_table *nfp_eth_table;
782 	struct nfp_eth_table_port *eth_port;
783 
784 	pf_dev = hw_priv->pf_dev;
785 	idx = nfp_net_get_idx(dev);
786 	aneg = pf_dev->nfp_eth_table->ports[idx].aneg;
787 
788 	/* Compare whether the current status has changed. */
789 	if (pf_dev->speed_updated || aneg == NFP_ANEG_AUTO) {
790 		nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
791 		if (nfp_eth_table == NULL) {
792 			PMD_DRV_LOG(DEBUG, "Failed to get nfp_eth_table.");
793 		} else {
794 			pf_dev->nfp_eth_table->ports[idx] = nfp_eth_table->ports[idx];
795 			free(nfp_eth_table);
796 			pf_dev->speed_updated = false;
797 		}
798 	}
799 
800 	nfp_eth_table = pf_dev->nfp_eth_table;
801 	eth_port = &nfp_eth_table->ports[idx];
802 
803 	link->link_speed = nfp_net_link_speed_nfp2rte_check(eth_port->speed);
804 
805 	if (dev->data->dev_conf.link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
806 			eth_port->supp_aneg)
807 		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
808 }
809 
810 static void
811 nfp_net_vf_speed_update(struct rte_eth_link *link,
812 		uint32_t link_status)
813 {
814 	size_t link_rate_index;
815 
816 	/*
817 	 * Shift and mask link_status so that it is effectively the value
818 	 * at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
819 	 */
820 	link_rate_index = (link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
821 			NFP_NET_CFG_STS_LINK_RATE_MASK;
822 	if (link_rate_index < RTE_DIM(nfp_net_link_speed_nfp2rte))
823 		link->link_speed = nfp_net_link_speed_nfp2rte[link_rate_index];
824 	else
825 		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
826 }
827 
828 int
829 nfp_net_link_update_common(struct rte_eth_dev *dev,
830 		struct rte_eth_link *link,
831 		uint32_t link_status)
832 {
833 	int ret;
834 	struct nfp_net_hw_priv *hw_priv;
835 
836 	hw_priv = dev->process_private;
837 	if (link->link_status == RTE_ETH_LINK_UP) {
838 		if (nfp_net_is_pf(dev))
839 			nfp_net_pf_speed_update(dev, hw_priv, link);
840 		else
841 			nfp_net_vf_speed_update(link, link_status);
842 	}
843 
844 	ret = rte_eth_linkstatus_set(dev, link);
845 	if (ret == 0) {
846 		if (link->link_status == RTE_ETH_LINK_UP)
847 			PMD_DRV_LOG(INFO, "NIC Link is Up.");
848 		else
849 			PMD_DRV_LOG(INFO, "NIC Link is Down.");
850 	}
851 
852 	return ret;
853 }
854 
855 /*
856  * Return 0 means link status changed, -1 means not changed
857  *
858  * Wait to complete is needed as it can take up to 9 seconds to get the Link
859  * status.
860  */
861 int
862 nfp_net_link_update(struct rte_eth_dev *dev,
863 		__rte_unused int wait_to_complete)
864 {
865 	int ret;
866 	struct nfp_net_hw *hw;
867 	uint32_t nn_link_status;
868 	struct rte_eth_link link;
869 	struct nfp_net_hw_priv *hw_priv;
870 
871 	hw = nfp_net_get_hw(dev);
872 	hw_priv = dev->process_private;
873 
874 	memset(&link, 0, sizeof(struct rte_eth_link));
875 
876 	/* Read link status */
877 	nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
878 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
879 		link.link_status = RTE_ETH_LINK_UP;
880 
881 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
882 
883 	ret = nfp_net_link_update_common(dev, &link, nn_link_status);
884 	if (ret == -EIO)
885 		return ret;
886 
887 	/*
888 	 * Notify the port to update the speed value in the CTRL BAR from NSP.
889 	 * Not applicable for VFs as the associated PF is still attached to the
890 	 * kernel driver.
891 	 */
892 	if (hw_priv != NULL && hw_priv->is_pf)
893 		nfp_net_notify_port_speed(hw, &link);
894 
895 	return ret;
896 }
897 
898 int
899 nfp_net_stats_get(struct rte_eth_dev *dev,
900 		struct rte_eth_stats *stats)
901 {
902 	uint16_t i;
903 	struct nfp_net_hw *hw;
904 	struct rte_eth_stats nfp_dev_stats;
905 
906 	if (stats == NULL)
907 		return -EINVAL;
908 
909 	hw = nfp_net_get_hw(dev);
910 
911 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
912 
913 	/* Reading per RX ring stats */
914 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
915 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
916 			break;
917 
918 		nfp_dev_stats.q_ipackets[i] =
919 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
920 		nfp_dev_stats.q_ipackets[i] -=
921 				hw->eth_stats_base.q_ipackets[i];
922 
923 		nfp_dev_stats.q_ibytes[i] =
924 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
925 		nfp_dev_stats.q_ibytes[i] -=
926 				hw->eth_stats_base.q_ibytes[i];
927 	}
928 
929 	/* Reading per TX ring stats */
930 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
931 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
932 			break;
933 
934 		nfp_dev_stats.q_opackets[i] =
935 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
936 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
937 
938 		nfp_dev_stats.q_obytes[i] =
939 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
940 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
941 	}
942 
943 	nfp_dev_stats.ipackets = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
944 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
945 
946 	nfp_dev_stats.ibytes = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
947 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
948 
949 	nfp_dev_stats.opackets =
950 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
951 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
952 
953 	nfp_dev_stats.obytes =
954 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
955 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
956 
957 	/* Reading general device stats */
958 	nfp_dev_stats.ierrors =
959 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
960 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
961 
962 	nfp_dev_stats.oerrors =
963 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
964 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
965 
966 	/* RX ring mbuf allocation failures */
967 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
968 
969 	nfp_dev_stats.imissed =
970 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
971 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
972 
973 	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
974 	return 0;
975 }
976 
977 /*
978  * hw->eth_stats_base records the per counter starting point.
979  * Lets update it now.
980  */
981 int
982 nfp_net_stats_reset(struct rte_eth_dev *dev)
983 {
984 	uint16_t i;
985 	struct nfp_net_hw *hw;
986 
987 	hw = nfp_net_get_hw(dev);
988 
989 	/* Reading per RX ring stats */
990 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
991 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
992 			break;
993 
994 		hw->eth_stats_base.q_ipackets[i] =
995 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
996 
997 		hw->eth_stats_base.q_ibytes[i] =
998 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
999 	}
1000 
1001 	/* Reading per TX ring stats */
1002 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1003 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1004 			break;
1005 
1006 		hw->eth_stats_base.q_opackets[i] =
1007 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
1008 
1009 		hw->eth_stats_base.q_obytes[i] =
1010 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1011 	}
1012 
1013 	hw->eth_stats_base.ipackets =
1014 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
1015 
1016 	hw->eth_stats_base.ibytes =
1017 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
1018 
1019 	hw->eth_stats_base.opackets =
1020 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
1021 
1022 	hw->eth_stats_base.obytes =
1023 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
1024 
1025 	/* Reading general device stats */
1026 	hw->eth_stats_base.ierrors =
1027 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
1028 
1029 	hw->eth_stats_base.oerrors =
1030 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
1031 
1032 	/* RX ring mbuf allocation failures */
1033 	dev->data->rx_mbuf_alloc_failed = 0;
1034 
1035 	hw->eth_stats_base.imissed =
1036 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
1037 
1038 	return 0;
1039 }
1040 
1041 uint32_t
1042 nfp_net_xstats_size(const struct rte_eth_dev *dev)
1043 {
1044 	uint32_t count;
1045 	bool vf_flag = false;
1046 	struct nfp_net_hw *hw;
1047 	struct nfp_flower_representor *repr;
1048 	const uint32_t size = RTE_DIM(nfp_net_xstats);
1049 
1050 	if (rte_eth_dev_is_repr(dev)) {
1051 		repr = dev->data->dev_private;
1052 		if (nfp_flower_repr_is_vf(repr))
1053 			vf_flag = true;
1054 	} else {
1055 		hw = dev->data->dev_private;
1056 		if (hw->mac_stats == NULL)
1057 			vf_flag = true;
1058 	}
1059 
1060 	/* If the device is a VF or VF-repr, then there will be no MAC stats */
1061 	if (vf_flag) {
1062 		for (count = 0; count < size; count++) {
1063 			if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC)
1064 				break;
1065 		}
1066 
1067 		return count;
1068 	}
1069 
1070 	return size;
1071 }
1072 
1073 static const struct nfp_xstat *
1074 nfp_net_xstats_info(const struct rte_eth_dev *dev,
1075 		uint32_t index)
1076 {
1077 	if (index >= nfp_net_xstats_size(dev)) {
1078 		PMD_DRV_LOG(ERR, "The xstat index out of bounds.");
1079 		return NULL;
1080 	}
1081 
1082 	return &nfp_net_xstats[index];
1083 }
1084 
1085 static uint64_t
1086 nfp_net_xstats_value(const struct rte_eth_dev *dev,
1087 		uint32_t index,
1088 		bool raw)
1089 {
1090 	uint64_t value;
1091 	uint8_t *mac_stats;
1092 	struct nfp_net_hw *hw;
1093 	struct nfp_xstat xstat;
1094 	struct rte_eth_xstat *xstats_base;
1095 	struct nfp_flower_representor *repr;
1096 
1097 	if (rte_eth_dev_is_repr(dev)) {
1098 		repr = dev->data->dev_private;
1099 		hw = repr->app_fw_flower->pf_hw;
1100 
1101 		mac_stats = repr->mac_stats;
1102 		xstats_base = repr->repr_xstats_base;
1103 	} else {
1104 		hw = dev->data->dev_private;
1105 
1106 		mac_stats = hw->mac_stats;
1107 		xstats_base = hw->eth_xstats_base;
1108 	}
1109 
1110 	xstat = nfp_net_xstats[index];
1111 
1112 	if (xstat.group == NFP_XSTAT_GROUP_MAC)
1113 		value = nn_readq(mac_stats + xstat.offset);
1114 	else
1115 		value = nn_cfg_readq(&hw->super, xstat.offset);
1116 
1117 	if (raw)
1118 		return value;
1119 
1120 	/*
1121 	 * A baseline value of each statistic counter is recorded when stats are "reset".
1122 	 * Thus, the value returned by this function need to be decremented by this
1123 	 * baseline value. The result is the count of this statistic since the last time
1124 	 * it was "reset".
1125 	 */
1126 	return value - xstats_base[index].value;
1127 }
1128 
1129 /* NOTE: All callers ensure dev is always set. */
1130 int
1131 nfp_net_xstats_get_names(struct rte_eth_dev *dev,
1132 		struct rte_eth_xstat_name *xstats_names,
1133 		unsigned int size)
1134 {
1135 	uint32_t id;
1136 	uint32_t nfp_size;
1137 	uint32_t read_size;
1138 
1139 	nfp_size = nfp_net_xstats_size(dev);
1140 
1141 	if (xstats_names == NULL)
1142 		return nfp_size;
1143 
1144 	/* Read at most NFP xstats number of names. */
1145 	read_size = RTE_MIN(size, nfp_size);
1146 
1147 	for (id = 0; id < read_size; id++)
1148 		rte_strlcpy(xstats_names[id].name, nfp_net_xstats[id].name,
1149 				RTE_ETH_XSTATS_NAME_SIZE);
1150 
1151 	return read_size;
1152 }
1153 
1154 /* NOTE: All callers ensure dev is always set. */
1155 int
1156 nfp_net_xstats_get(struct rte_eth_dev *dev,
1157 		struct rte_eth_xstat *xstats,
1158 		unsigned int n)
1159 {
1160 	uint32_t id;
1161 	uint32_t nfp_size;
1162 	uint32_t read_size;
1163 
1164 	nfp_size = nfp_net_xstats_size(dev);
1165 
1166 	if (xstats == NULL)
1167 		return nfp_size;
1168 
1169 	/* Read at most NFP xstats number of values. */
1170 	read_size = RTE_MIN(n, nfp_size);
1171 
1172 	for (id = 0; id < read_size; id++) {
1173 		xstats[id].id = id;
1174 		xstats[id].value = nfp_net_xstats_value(dev, id, false);
1175 	}
1176 
1177 	return read_size;
1178 }
1179 
1180 /*
1181  * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
1182  * ids, xstats_names and size are valid, and non-NULL.
1183  */
1184 int
1185 nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
1186 		const uint64_t *ids,
1187 		struct rte_eth_xstat_name *xstats_names,
1188 		unsigned int size)
1189 {
1190 	uint32_t i;
1191 	uint32_t read_size;
1192 
1193 	/* Read at most NFP xstats number of names. */
1194 	read_size = RTE_MIN(size, nfp_net_xstats_size(dev));
1195 
1196 	for (i = 0; i < read_size; i++) {
1197 		const struct nfp_xstat *xstat;
1198 
1199 		/* Make sure ID is valid for device. */
1200 		xstat = nfp_net_xstats_info(dev, ids[i]);
1201 		if (xstat == NULL)
1202 			return -EINVAL;
1203 
1204 		rte_strlcpy(xstats_names[i].name, xstat->name,
1205 				RTE_ETH_XSTATS_NAME_SIZE);
1206 	}
1207 
1208 	return read_size;
1209 }
1210 
1211 /*
1212  * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
1213  * ids, values and n are valid, and non-NULL.
1214  */
1215 int
1216 nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
1217 		const uint64_t *ids,
1218 		uint64_t *values,
1219 		unsigned int n)
1220 {
1221 	uint32_t i;
1222 	uint32_t read_size;
1223 
1224 	/* Read at most NFP xstats number of values. */
1225 	read_size = RTE_MIN(n, nfp_net_xstats_size(dev));
1226 
1227 	for (i = 0; i < read_size; i++) {
1228 		const struct nfp_xstat *xstat;
1229 
1230 		/* Make sure index is valid for device. */
1231 		xstat = nfp_net_xstats_info(dev, ids[i]);
1232 		if (xstat == NULL)
1233 			return -EINVAL;
1234 
1235 		values[i] = nfp_net_xstats_value(dev, ids[i], false);
1236 	}
1237 
1238 	return read_size;
1239 }
1240 
1241 int
1242 nfp_net_xstats_reset(struct rte_eth_dev *dev)
1243 {
1244 	uint32_t id;
1245 	uint32_t read_size;
1246 	struct nfp_net_hw *hw;
1247 	struct rte_eth_xstat *xstats_base;
1248 	struct nfp_flower_representor *repr;
1249 
1250 	read_size = nfp_net_xstats_size(dev);
1251 
1252 	if (rte_eth_dev_is_repr(dev)) {
1253 		repr = dev->data->dev_private;
1254 		xstats_base = repr->repr_xstats_base;
1255 	} else {
1256 		hw = dev->data->dev_private;
1257 		xstats_base = hw->eth_xstats_base;
1258 	}
1259 
1260 	for (id = 0; id < read_size; id++) {
1261 		xstats_base[id].id = id;
1262 		xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
1263 	}
1264 
1265 	/* Successfully reset xstats, now call function to reset basic stats. */
1266 	if (rte_eth_dev_is_repr(dev))
1267 		return nfp_flower_repr_stats_reset(dev);
1268 	else
1269 		return nfp_net_stats_reset(dev);
1270 }
1271 
1272 void
1273 nfp_net_rx_desc_limits(struct nfp_net_hw_priv *hw_priv,
1274 		uint16_t *min_rx_desc,
1275 		uint16_t *max_rx_desc)
1276 {
1277 	*max_rx_desc = hw_priv->dev_info->max_qc_size;
1278 	*min_rx_desc = hw_priv->dev_info->min_qc_size;
1279 }
1280 
1281 void
1282 nfp_net_tx_desc_limits(struct nfp_net_hw_priv *hw_priv,
1283 		uint16_t *min_tx_desc,
1284 		uint16_t *max_tx_desc)
1285 {
1286 	uint16_t tx_dpp;
1287 
1288 	if (hw_priv->pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
1289 		tx_dpp = NFD3_TX_DESC_PER_PKT;
1290 	else
1291 		tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT;
1292 
1293 	*max_tx_desc = hw_priv->dev_info->max_qc_size / tx_dpp;
1294 	*min_tx_desc = hw_priv->dev_info->min_qc_size / tx_dpp;
1295 }
1296 
1297 int
1298 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1299 {
1300 	uint32_t cap;
1301 	uint32_t cap_extend;
1302 	uint16_t min_rx_desc;
1303 	uint16_t max_rx_desc;
1304 	uint16_t min_tx_desc;
1305 	uint16_t max_tx_desc;
1306 	struct nfp_net_hw *hw;
1307 	struct nfp_net_hw_priv *hw_priv;
1308 
1309 	hw = nfp_net_get_hw(dev);
1310 	hw_priv = dev->process_private;
1311 	if (hw_priv == NULL)
1312 		return -EINVAL;
1313 
1314 	nfp_net_rx_desc_limits(hw_priv, &min_rx_desc, &max_rx_desc);
1315 	nfp_net_tx_desc_limits(hw_priv, &min_tx_desc, &max_tx_desc);
1316 
1317 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1318 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1319 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1320 	/*
1321 	 * The maximum rx packet length is set to the maximum layer 3 MTU,
1322 	 * plus layer 2, CRC and VLAN headers.
1323 	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
1324 	 * which was set by the firmware loaded onto the card.
1325 	 */
1326 	dev_info->max_rx_pktlen = hw->max_mtu + NFP_ETH_OVERHEAD;
1327 	dev_info->max_mtu = hw->max_mtu;
1328 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1329 	/* Next should change when PF support is implemented */
1330 	dev_info->max_mac_addrs = 1;
1331 
1332 	cap = hw->super.cap;
1333 
1334 	if ((cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
1335 		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1336 
1337 	if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
1338 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
1339 
1340 	if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
1341 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1342 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1343 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
1344 
1345 	if ((cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
1346 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1347 
1348 	if ((cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
1349 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1350 				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1351 				RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1352 
1353 	if ((cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
1354 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1355 		if ((cap & NFP_NET_CFG_CTRL_USO) != 0)
1356 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_TSO;
1357 		if ((cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
1358 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
1359 	}
1360 
1361 	if ((cap & NFP_NET_CFG_CTRL_GATHER) != 0)
1362 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1363 
1364 	cap_extend = hw->super.cap_ext;
1365 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) {
1366 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1367 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1368 	}
1369 
1370 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1371 		.rx_thresh = {
1372 			.pthresh = DEFAULT_RX_PTHRESH,
1373 			.hthresh = DEFAULT_RX_HTHRESH,
1374 			.wthresh = DEFAULT_RX_WTHRESH,
1375 		},
1376 		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1377 		.rx_drop_en = 0,
1378 	};
1379 
1380 	dev_info->default_txconf = (struct rte_eth_txconf) {
1381 		.tx_thresh = {
1382 			.pthresh = DEFAULT_TX_PTHRESH,
1383 			.hthresh = DEFAULT_TX_HTHRESH,
1384 			.wthresh = DEFAULT_TX_WTHRESH,
1385 		},
1386 		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1387 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1388 	};
1389 
1390 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1391 		.nb_max = max_rx_desc,
1392 		.nb_min = min_rx_desc,
1393 		.nb_align = NFP_ALIGN_RING_DESC,
1394 	};
1395 
1396 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1397 		.nb_max = max_tx_desc,
1398 		.nb_min = min_tx_desc,
1399 		.nb_align = NFP_ALIGN_RING_DESC,
1400 		.nb_seg_max = NFP_TX_MAX_SEG,
1401 		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1402 	};
1403 
1404 	if ((cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
1405 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1406 		dev_info->flow_type_rss_offloads = NFP_NET_RSS_CAP;
1407 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1408 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1409 	}
1410 
1411 	/* Only PF supports getting speed capability. */
1412 	if (hw_priv->is_pf)
1413 		dev_info->speed_capa = hw_priv->pf_dev->speed_capa;
1414 
1415 	return 0;
1416 }
1417 
1418 int
1419 nfp_net_common_init(struct nfp_pf_dev *pf_dev,
1420 		struct nfp_net_hw *hw)
1421 {
1422 	const int stride = 4;
1423 	struct rte_pci_device *pci_dev;
1424 
1425 	pci_dev = pf_dev->pci_dev;
1426 	hw->device_id = pci_dev->id.device_id;
1427 	hw->vendor_id = pci_dev->id.vendor_id;
1428 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1429 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1430 
1431 	hw->max_rx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_RXRINGS);
1432 	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
1433 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
1434 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
1435 				"pairs for use.", pci_dev->name);
1436 		return -ENODEV;
1437 	}
1438 
1439 	if (nfp_net_check_dma_mask(pf_dev, pci_dev->name) != 0)
1440 		return -ENODEV;
1441 
1442 	/* Get some of the read-only fields from the config BAR */
1443 	hw->super.cap = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP);
1444 	hw->super.cap_ext = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP_WORD1);
1445 	hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
1446 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
1447 
1448 	nfp_net_meta_init_format(hw, pf_dev);
1449 
1450 	/* Read the Rx offset configured from firmware */
1451 	if (pf_dev->ver.major < 2)
1452 		hw->rx_offset = NFP_NET_RX_OFFSET;
1453 	else
1454 		hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET);
1455 
1456 	hw->super.ctrl = 0;
1457 	hw->stride_rx = stride;
1458 	hw->stride_tx = stride;
1459 
1460 	return 0;
1461 }
1462 
1463 const uint32_t *
1464 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1465 {
1466 	struct nfp_net_hw *net_hw;
1467 	static const uint32_t ptypes[] = {
1468 		RTE_PTYPE_L2_ETHER,
1469 		RTE_PTYPE_L3_IPV4,
1470 		RTE_PTYPE_L3_IPV4_EXT,
1471 		RTE_PTYPE_L3_IPV6,
1472 		RTE_PTYPE_L3_IPV6_EXT,
1473 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1474 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1475 		RTE_PTYPE_L4_TCP,
1476 		RTE_PTYPE_L4_UDP,
1477 		RTE_PTYPE_L4_FRAG,
1478 		RTE_PTYPE_L4_NONFRAG,
1479 		RTE_PTYPE_L4_ICMP,
1480 		RTE_PTYPE_L4_SCTP,
1481 		RTE_PTYPE_TUNNEL_VXLAN,
1482 		RTE_PTYPE_TUNNEL_NVGRE,
1483 		RTE_PTYPE_TUNNEL_GENEVE,
1484 		RTE_PTYPE_INNER_L2_ETHER,
1485 		RTE_PTYPE_INNER_L3_IPV4,
1486 		RTE_PTYPE_INNER_L3_IPV4_EXT,
1487 		RTE_PTYPE_INNER_L3_IPV6,
1488 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1489 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1490 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1491 		RTE_PTYPE_INNER_L4_TCP,
1492 		RTE_PTYPE_INNER_L4_UDP,
1493 		RTE_PTYPE_INNER_L4_FRAG,
1494 		RTE_PTYPE_INNER_L4_NONFRAG,
1495 		RTE_PTYPE_INNER_L4_ICMP,
1496 		RTE_PTYPE_INNER_L4_SCTP,
1497 	};
1498 
1499 	if (dev->rx_pkt_burst == NULL)
1500 		return NULL;
1501 
1502 	net_hw = dev->data->dev_private;
1503 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1504 		return NULL;
1505 
1506 	*no_of_elements = RTE_DIM(ptypes);
1507 	return ptypes;
1508 }
1509 
1510 int
1511 nfp_net_ptypes_set(struct rte_eth_dev *dev,
1512 		uint32_t ptype_mask)
1513 {
1514 	int ret;
1515 	uint32_t update;
1516 	uint32_t ctrl_ext;
1517 	struct nfp_hw *hw;
1518 	struct nfp_net_hw *net_hw;
1519 
1520 	net_hw = dev->data->dev_private;
1521 	hw = &net_hw->super;
1522 
1523 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1524 		return -ENOTSUP;
1525 
1526 	ctrl_ext = hw->ctrl_ext;
1527 	if (ptype_mask == 0) {
1528 		if ((ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1529 			return 0;
1530 
1531 		ctrl_ext &= ~NFP_NET_CFG_CTRL_PKT_TYPE;
1532 	} else {
1533 		if ((ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
1534 			return 0;
1535 
1536 		ctrl_ext |= NFP_NET_CFG_CTRL_PKT_TYPE;
1537 	}
1538 
1539 	update = NFP_NET_CFG_UPDATE_GEN;
1540 
1541 	ret = nfp_ext_reconfig(hw, ctrl_ext, update);
1542 	if (ret != 0)
1543 		return ret;
1544 
1545 	hw->ctrl_ext = ctrl_ext;
1546 
1547 	return 0;
1548 }
1549 
1550 int
1551 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
1552 		uint16_t queue_id)
1553 {
1554 	uint16_t base = 0;
1555 	struct nfp_net_hw *hw;
1556 	struct rte_pci_device *pci_dev;
1557 
1558 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1559 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1560 		base = 1;
1561 
1562 	/* Make sure all updates are written before un-masking */
1563 	rte_wmb();
1564 
1565 	hw = nfp_net_get_hw(dev);
1566 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id),
1567 			NFP_NET_CFG_ICR_UNMASKED);
1568 	return 0;
1569 }
1570 
1571 int
1572 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
1573 		uint16_t queue_id)
1574 {
1575 	uint16_t base = 0;
1576 	struct nfp_net_hw *hw;
1577 	struct rte_pci_device *pci_dev;
1578 
1579 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1580 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1581 		base = 1;
1582 
1583 	/* Make sure all updates are written before un-masking */
1584 	rte_wmb();
1585 
1586 	hw = nfp_net_get_hw(dev);
1587 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
1588 
1589 	return 0;
1590 }
1591 
1592 static void
1593 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1594 {
1595 	struct rte_eth_link link;
1596 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1597 
1598 	rte_eth_linkstatus_get(dev, &link);
1599 	if (link.link_status != 0)
1600 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s.",
1601 				dev->data->port_id, link.link_speed,
1602 				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1603 				"full-duplex" : "half-duplex");
1604 	else
1605 		PMD_DRV_LOG(INFO, " Port %d: Link Down.", dev->data->port_id);
1606 
1607 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1608 			pci_dev->addr.domain, pci_dev->addr.bus,
1609 			pci_dev->addr.devid, pci_dev->addr.function);
1610 }
1611 
1612 /*
1613  * Unmask an interrupt
1614  *
1615  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1616  * clear the ICR for the entry.
1617  */
1618 void
1619 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1620 {
1621 	struct nfp_net_hw *hw;
1622 	struct rte_pci_device *pci_dev;
1623 
1624 	hw = nfp_net_get_hw(dev);
1625 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1626 
1627 	/* Make sure all updates are written before un-masking */
1628 	rte_wmb();
1629 
1630 	if ((hw->super.ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
1631 		/* If MSI-X auto-masking is used, clear the entry */
1632 		rte_intr_ack(pci_dev->intr_handle);
1633 	} else {
1634 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1635 				NFP_NET_CFG_ICR_UNMASKED);
1636 	}
1637 }
1638 
1639 /**
1640  * Interrupt handler which shall be registered for alarm callback for delayed
1641  * handling specific interrupt to wait for the stable nic state. As the NIC
1642  * interrupt state is not stable for nfp after link is just down, it needs
1643  * to wait 4 seconds to get the stable status.
1644  *
1645  * @param param
1646  *   The address of parameter (struct rte_eth_dev *)
1647  */
1648 void
1649 nfp_net_dev_interrupt_delayed_handler(void *param)
1650 {
1651 	struct rte_eth_dev *dev = param;
1652 
1653 	nfp_net_link_update(dev, 0);
1654 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1655 
1656 	nfp_net_dev_link_status_print(dev);
1657 
1658 	/* Unmasking */
1659 	nfp_net_irq_unmask(dev);
1660 }
1661 
1662 void
1663 nfp_net_dev_interrupt_handler(void *param)
1664 {
1665 	int64_t timeout;
1666 	struct rte_eth_link link;
1667 	struct rte_eth_dev *dev = param;
1668 
1669 	PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1670 
1671 	rte_eth_linkstatus_get(dev, &link);
1672 
1673 	nfp_net_link_update(dev, 0);
1674 
1675 	/* Likely to up */
1676 	if (link.link_status == 0) {
1677 		/* Handle it 1 sec later, wait it being stable */
1678 		timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1679 	} else {  /* Likely to down */
1680 		/* Handle it 4 sec later, wait it being stable */
1681 		timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1682 	}
1683 
1684 	if (rte_eal_alarm_set(timeout * 1000,
1685 			nfp_net_dev_interrupt_delayed_handler,
1686 			(void *)dev) != 0) {
1687 		PMD_INIT_LOG(ERR, "Error setting alarm.");
1688 		/* Unmasking */
1689 		nfp_net_irq_unmask(dev);
1690 	}
1691 }
1692 
1693 int
1694 nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
1695 		uint16_t mtu)
1696 {
1697 	struct nfp_net_hw *hw;
1698 
1699 	hw = nfp_net_get_hw(dev);
1700 
1701 	/* MTU setting is forbidden if port is started */
1702 	if (dev->data->dev_started) {
1703 		PMD_DRV_LOG(ERR, "Port %d must be stopped before configuration.",
1704 				dev->data->port_id);
1705 		return -EBUSY;
1706 	}
1707 
1708 	/* MTU larger than current mbufsize not supported */
1709 	if (mtu > hw->flbufsz) {
1710 		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported.",
1711 				mtu, hw->flbufsz);
1712 		return -ERANGE;
1713 	}
1714 
1715 	/* Writing to configuration space */
1716 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, mtu);
1717 
1718 	hw->mtu = mtu;
1719 
1720 	return 0;
1721 }
1722 
1723 int
1724 nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
1725 		int mask)
1726 {
1727 	int ret;
1728 	uint32_t update;
1729 	uint32_t new_ctrl;
1730 	struct nfp_hw *hw;
1731 	uint64_t rx_offload;
1732 	struct nfp_net_hw *net_hw;
1733 	uint32_t rxvlan_ctrl = 0;
1734 
1735 	net_hw = nfp_net_get_hw(dev);
1736 	hw = &net_hw->super;
1737 	rx_offload = dev->data->dev_conf.rxmode.offloads;
1738 	new_ctrl = hw->ctrl;
1739 
1740 	/* VLAN stripping setting */
1741 	if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
1742 		nfp_net_enable_rxvlan_cap(net_hw, &rxvlan_ctrl);
1743 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
1744 			new_ctrl |= rxvlan_ctrl;
1745 		else
1746 			new_ctrl &= ~rxvlan_ctrl;
1747 	}
1748 
1749 	/* QinQ stripping setting */
1750 	if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
1751 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
1752 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1753 		else
1754 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1755 	}
1756 
1757 	if (new_ctrl == hw->ctrl)
1758 		return 0;
1759 
1760 	update = NFP_NET_CFG_UPDATE_GEN;
1761 
1762 	ret = nfp_reconfig(hw, new_ctrl, update);
1763 	if (ret != 0)
1764 		return ret;
1765 
1766 	hw->ctrl = new_ctrl;
1767 
1768 	return 0;
1769 }
1770 
1771 static int
1772 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1773 		struct rte_eth_rss_reta_entry64 *reta_conf,
1774 		uint16_t reta_size)
1775 {
1776 	uint16_t i;
1777 	uint16_t j;
1778 	uint16_t idx;
1779 	uint8_t mask;
1780 	uint32_t reta;
1781 	uint16_t shift;
1782 	struct nfp_hw *hw;
1783 	struct nfp_net_hw *net_hw;
1784 
1785 	net_hw = nfp_net_get_hw(dev);
1786 	hw = &net_hw->super;
1787 
1788 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1789 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
1790 				" does not match hardware can supported (%d).",
1791 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1792 		return -EINVAL;
1793 	}
1794 
1795 	/*
1796 	 * Update Redirection Table. There are 128 8bit-entries which can be
1797 	 * manage as 32 32bit-entries.
1798 	 */
1799 	for (i = 0; i < reta_size; i += 4) {
1800 		/* Handling 4 RSS entries per loop */
1801 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1802 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1803 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1804 		if (mask == 0)
1805 			continue;
1806 
1807 		reta = 0;
1808 
1809 		/* If all 4 entries were set, don't need read RETA register */
1810 		if (mask != 0xF)
1811 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1812 
1813 		for (j = 0; j < 4; j++) {
1814 			if ((mask & (0x1 << j)) == 0)
1815 				continue;
1816 
1817 			/* Clearing the entry bits */
1818 			if (mask != 0xF)
1819 				reta &= ~(0xFF << (8 * j));
1820 
1821 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1822 		}
1823 
1824 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1831 int
1832 nfp_net_reta_update(struct rte_eth_dev *dev,
1833 		struct rte_eth_rss_reta_entry64 *reta_conf,
1834 		uint16_t reta_size)
1835 {
1836 	int ret;
1837 	uint32_t update;
1838 	struct nfp_hw *hw;
1839 	struct nfp_net_hw *net_hw;
1840 
1841 	net_hw = nfp_net_get_hw(dev);
1842 	hw = &net_hw->super;
1843 
1844 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1845 		return -EINVAL;
1846 
1847 	ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1848 	if (ret != 0)
1849 		return ret;
1850 
1851 	update = NFP_NET_CFG_UPDATE_RSS;
1852 
1853 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1854 		return -EIO;
1855 
1856 	return 0;
1857 }
1858 
1859 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1860 int
1861 nfp_net_reta_query(struct rte_eth_dev *dev,
1862 		struct rte_eth_rss_reta_entry64 *reta_conf,
1863 		uint16_t reta_size)
1864 {
1865 	uint16_t i;
1866 	uint16_t j;
1867 	uint16_t idx;
1868 	uint8_t mask;
1869 	uint32_t reta;
1870 	uint16_t shift;
1871 	struct nfp_hw *hw;
1872 	struct nfp_net_hw *net_hw;
1873 
1874 	net_hw = nfp_net_get_hw(dev);
1875 	hw = &net_hw->super;
1876 
1877 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1878 		return -EINVAL;
1879 
1880 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1881 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)"
1882 				" does not match hardware can supported (%d).",
1883 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1884 		return -EINVAL;
1885 	}
1886 
1887 	/*
1888 	 * Reading Redirection Table. There are 128 8bit-entries which can be
1889 	 * manage as 32 32bit-entries.
1890 	 */
1891 	for (i = 0; i < reta_size; i += 4) {
1892 		/* Handling 4 RSS entries per loop */
1893 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1894 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1895 		mask = (reta_conf[idx].mask >> shift) & 0xF;
1896 
1897 		if (mask == 0)
1898 			continue;
1899 
1900 		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
1901 		for (j = 0; j < 4; j++) {
1902 			if ((mask & (0x1 << j)) == 0)
1903 				continue;
1904 
1905 			reta_conf[idx].reta[shift + j] =
1906 					(uint8_t)((reta >> (8 * j)) & 0xF);
1907 		}
1908 	}
1909 
1910 	return 0;
1911 }
1912 
1913 static int
1914 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1915 		struct rte_eth_rss_conf *rss_conf)
1916 {
1917 	uint8_t i;
1918 	uint8_t key;
1919 	uint64_t rss_hf;
1920 	struct nfp_hw *hw;
1921 	struct nfp_net_hw *net_hw;
1922 	uint32_t cfg_rss_ctrl = 0;
1923 
1924 	net_hw = nfp_net_get_hw(dev);
1925 	hw = &net_hw->super;
1926 
1927 	/* Writing the key byte by byte */
1928 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1929 		memcpy(&key, &rss_conf->rss_key[i], 1);
1930 		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1931 	}
1932 
1933 	rss_hf = rss_conf->rss_hf;
1934 
1935 	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
1936 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1937 
1938 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1939 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1940 
1941 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
1942 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1943 
1944 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0)
1945 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP;
1946 
1947 	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
1948 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1949 
1950 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
1951 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1952 
1953 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
1954 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1955 
1956 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0)
1957 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP;
1958 
1959 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1960 
1961 	if (rte_eth_dev_is_repr(dev))
1962 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_CRC32;
1963 	else
1964 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1965 
1966 	/* Configuring where to apply the RSS hash */
1967 	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1968 
1969 	/* Writing the key size */
1970 	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1971 
1972 	return 0;
1973 }
1974 
1975 int
1976 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1977 		struct rte_eth_rss_conf *rss_conf)
1978 {
1979 	uint32_t update;
1980 	uint64_t rss_hf;
1981 	struct nfp_hw *hw;
1982 	struct nfp_net_hw *net_hw;
1983 
1984 	net_hw = nfp_net_get_hw(dev);
1985 	hw = &net_hw->super;
1986 
1987 	rss_hf = rss_conf->rss_hf;
1988 
1989 	/* Checking if RSS is enabled */
1990 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
1991 		if (rss_hf != 0) {
1992 			PMD_DRV_LOG(ERR, "RSS unsupported.");
1993 			return -EINVAL;
1994 		}
1995 
1996 		return 0; /* Nothing to do */
1997 	}
1998 
1999 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2000 		PMD_DRV_LOG(ERR, "RSS hash key too long.");
2001 		return -EINVAL;
2002 	}
2003 
2004 	nfp_net_rss_hash_write(dev, rss_conf);
2005 
2006 	update = NFP_NET_CFG_UPDATE_RSS;
2007 
2008 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
2009 		return -EIO;
2010 
2011 	return 0;
2012 }
2013 
2014 int
2015 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2016 		struct rte_eth_rss_conf *rss_conf)
2017 {
2018 	uint8_t i;
2019 	uint8_t key;
2020 	uint64_t rss_hf;
2021 	struct nfp_hw *hw;
2022 	uint32_t cfg_rss_ctrl;
2023 	struct nfp_net_hw *net_hw;
2024 
2025 	net_hw = nfp_net_get_hw(dev);
2026 	hw = &net_hw->super;
2027 
2028 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
2029 		return -EINVAL;
2030 
2031 	rss_hf = rss_conf->rss_hf;
2032 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2033 
2034 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
2035 		rss_hf |= RTE_ETH_RSS_IPV4;
2036 
2037 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0)
2038 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2039 
2040 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0)
2041 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
2042 
2043 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0)
2044 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2045 
2046 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0)
2047 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
2048 
2049 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0)
2050 		rss_hf |= RTE_ETH_RSS_IPV6;
2051 
2052 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0)
2053 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP;
2054 
2055 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0)
2056 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
2057 
2058 	/* Propagate current RSS hash functions to caller */
2059 	rss_conf->rss_hf = rss_hf;
2060 
2061 	/* Reading the key size */
2062 	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2063 
2064 	/* Reading the key byte a byte */
2065 	for (i = 0; i < rss_conf->rss_key_len; i++) {
2066 		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2067 		memcpy(&rss_conf->rss_key[i], &key, 1);
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 int
2074 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2075 {
2076 	int ret;
2077 	uint8_t i;
2078 	uint8_t j;
2079 	uint16_t queue = 0;
2080 	struct rte_eth_conf *dev_conf;
2081 	struct rte_eth_rss_conf rss_conf;
2082 	uint16_t rx_queues = dev->data->nb_rx_queues;
2083 	struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2084 
2085 	nfp_reta_conf[0].mask = ~0x0;
2086 	nfp_reta_conf[1].mask = ~0x0;
2087 
2088 	for (i = 0; i < 0x40; i += 8) {
2089 		for (j = i; j < (i + 8); j++) {
2090 			nfp_reta_conf[0].reta[j] = queue;
2091 			nfp_reta_conf[1].reta[j] = queue++;
2092 			queue %= rx_queues;
2093 		}
2094 	}
2095 
2096 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2097 	if (ret != 0)
2098 		return ret;
2099 
2100 	dev_conf = &dev->data->dev_conf;
2101 	if (dev_conf == NULL) {
2102 		PMD_DRV_LOG(ERR, "Wrong rss conf.");
2103 		return -EINVAL;
2104 	}
2105 
2106 	rss_conf = dev_conf->rx_adv_conf.rss_conf;
2107 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
2108 
2109 	return ret;
2110 }
2111 
2112 void
2113 nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
2114 {
2115 	uint16_t i;
2116 	struct nfp_net_rxq *this_rx_q;
2117 
2118 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2119 		this_rx_q = dev->data->rx_queues[i];
2120 		nfp_net_reset_rx_queue(this_rx_q);
2121 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2122 	}
2123 }
2124 
2125 void
2126 nfp_net_close_rx_queue(struct rte_eth_dev *dev)
2127 {
2128 	uint16_t i;
2129 	struct nfp_net_rxq *this_rx_q;
2130 
2131 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2132 		this_rx_q = dev->data->rx_queues[i];
2133 		nfp_net_reset_rx_queue(this_rx_q);
2134 		nfp_net_rx_queue_release(dev, i);
2135 	}
2136 }
2137 
2138 void
2139 nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
2140 {
2141 	uint16_t i;
2142 	struct nfp_net_txq *this_tx_q;
2143 
2144 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2145 		this_tx_q = dev->data->tx_queues[i];
2146 		nfp_net_reset_tx_queue(this_tx_q);
2147 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2148 	}
2149 }
2150 
2151 void
2152 nfp_net_close_tx_queue(struct rte_eth_dev *dev)
2153 {
2154 	uint16_t i;
2155 	struct nfp_net_txq *this_tx_q;
2156 
2157 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2158 		this_tx_q = dev->data->tx_queues[i];
2159 		nfp_net_reset_tx_queue(this_tx_q);
2160 		nfp_net_tx_queue_release(dev, i);
2161 	}
2162 }
2163 
2164 int
2165 nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw,
2166 		size_t idx,
2167 		uint16_t port,
2168 		uint32_t ctrl)
2169 {
2170 	uint32_t i;
2171 	struct nfp_hw *hw = &net_hw->super;
2172 
2173 	if (idx >= NFP_NET_N_VXLAN_PORTS) {
2174 		PMD_DRV_LOG(ERR, "The idx value is out of range.");
2175 		return -ERANGE;
2176 	}
2177 
2178 	net_hw->vxlan_ports[idx] = port;
2179 
2180 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2181 		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2182 				(net_hw->vxlan_ports[i + 1] << 16) | net_hw->vxlan_ports[i]);
2183 	}
2184 
2185 	return nfp_reconfig(hw, ctrl, NFP_NET_CFG_UPDATE_VXLAN);
2186 }
2187 
2188 /*
2189  * The firmware with NFD3 can not handle DMA address requiring more
2190  * than 40 bits.
2191  */
2192 int
2193 nfp_net_check_dma_mask(struct nfp_pf_dev *pf_dev,
2194 		char *name)
2195 {
2196 	if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
2197 			rte_mem_check_dma_mask(40) != 0) {
2198 		PMD_DRV_LOG(ERR, "Device %s can not be used: restricted dma mask to 40 bits!",
2199 				name);
2200 		return -ENODEV;
2201 	}
2202 
2203 	return 0;
2204 }
2205 
2206 int
2207 nfp_net_txrwb_alloc(struct rte_eth_dev *eth_dev)
2208 {
2209 	struct nfp_net_hw *net_hw;
2210 	char mz_name[RTE_MEMZONE_NAMESIZE];
2211 
2212 	net_hw = nfp_net_get_hw(eth_dev);
2213 	snprintf(mz_name, sizeof(mz_name), "%s_TXRWB", eth_dev->data->name);
2214 	net_hw->txrwb_mz = rte_memzone_reserve_aligned(mz_name,
2215 			net_hw->max_tx_queues * sizeof(uint64_t),
2216 			rte_socket_id(),
2217 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2218 	if (net_hw->txrwb_mz == NULL) {
2219 		PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back.",
2220 				mz_name);
2221 		return -ENOMEM;
2222 	}
2223 
2224 	return 0;
2225 }
2226 
2227 void
2228 nfp_net_txrwb_free(struct rte_eth_dev *eth_dev)
2229 {
2230 	struct nfp_net_hw *net_hw;
2231 
2232 	net_hw = nfp_net_get_hw(eth_dev);
2233 	if (net_hw->txrwb_mz == NULL)
2234 		return;
2235 
2236 	rte_memzone_free(net_hw->txrwb_mz);
2237 	net_hw->txrwb_mz = NULL;
2238 }
2239 
2240 static void
2241 nfp_net_cfg_read_version(struct nfp_hw *hw,
2242 		struct nfp_pf_dev *pf_dev)
2243 {
2244 	union {
2245 		uint32_t whole;
2246 		struct nfp_net_fw_ver split;
2247 	} version;
2248 
2249 	version.whole = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2250 	pf_dev->ver = version.split;
2251 }
2252 
2253 bool
2254 nfp_net_version_check(struct nfp_hw *hw,
2255 		struct nfp_pf_dev *pf_dev)
2256 {
2257 	nfp_net_cfg_read_version(hw, pf_dev);
2258 	if (!nfp_net_is_valid_nfd_version(pf_dev->ver))
2259 		return false;
2260 
2261 	if (!nfp_net_is_valid_version_class(pf_dev->ver))
2262 		return false;
2263 
2264 	return true;
2265 }
2266 
2267 static void
2268 nfp_net_get_nsp_info(struct nfp_net_hw_priv *hw_priv,
2269 		char *nsp_version)
2270 {
2271 	struct nfp_nsp *nsp;
2272 
2273 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
2274 	if (nsp == NULL)
2275 		return;
2276 
2277 	snprintf(nsp_version, FW_VER_LEN, "%hu.%hu",
2278 			nfp_nsp_get_abi_ver_major(nsp),
2279 			nfp_nsp_get_abi_ver_minor(nsp));
2280 
2281 	nfp_nsp_close(nsp);
2282 }
2283 
2284 void
2285 nfp_net_get_fw_version(struct nfp_cpp *cpp,
2286 		uint32_t *mip_version)
2287 {
2288 	struct nfp_mip *mip;
2289 
2290 	mip = nfp_mip_open(cpp);
2291 	if (mip == NULL) {
2292 		*mip_version = 0;
2293 		return;
2294 	}
2295 
2296 	*mip_version = nfp_mip_fw_version(mip);
2297 
2298 	nfp_mip_close(mip);
2299 }
2300 
2301 static void
2302 nfp_net_get_mip_name(struct nfp_net_hw_priv *hw_priv,
2303 		char *mip_name)
2304 {
2305 	struct nfp_mip *mip;
2306 
2307 	mip = nfp_mip_open(hw_priv->pf_dev->cpp);
2308 	if (mip == NULL)
2309 		return;
2310 
2311 	strlcpy(mip_name, nfp_mip_name(mip), FW_VER_LEN);
2312 
2313 	nfp_mip_close(mip);
2314 }
2315 
2316 static void
2317 nfp_net_get_app_name(struct nfp_net_hw_priv *hw_priv,
2318 		char *app_name)
2319 {
2320 	switch (hw_priv->pf_dev->app_fw_id) {
2321 	case NFP_APP_FW_CORE_NIC:
2322 		strlcpy(app_name, "nic", FW_VER_LEN);
2323 		break;
2324 	case NFP_APP_FW_FLOWER_NIC:
2325 		strlcpy(app_name, "flower", FW_VER_LEN);
2326 		break;
2327 	default:
2328 		strlcpy(app_name, "unknown", FW_VER_LEN);
2329 		break;
2330 	}
2331 }
2332 
2333 int
2334 nfp_net_firmware_version_get(struct rte_eth_dev *dev,
2335 		char *fw_version,
2336 		size_t fw_size)
2337 {
2338 	struct nfp_net_hw *hw;
2339 	struct nfp_pf_dev *pf_dev;
2340 	struct nfp_net_hw_priv *hw_priv;
2341 	char app_name[FW_VER_LEN] = {0};
2342 	char mip_name[FW_VER_LEN] = {0};
2343 	char nsp_version[FW_VER_LEN] = {0};
2344 	char vnic_version[FW_VER_LEN] = {0};
2345 
2346 	if (fw_size < FW_VER_LEN)
2347 		return FW_VER_LEN;
2348 
2349 	hw = nfp_net_get_hw(dev);
2350 	hw_priv = dev->process_private;
2351 	pf_dev = hw_priv->pf_dev;
2352 
2353 	if (hw->fw_version[0] != 0) {
2354 		snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2355 		return 0;
2356 	}
2357 
2358 	if (!rte_eth_dev_is_repr(dev)) {
2359 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
2360 			pf_dev->ver.extend, pf_dev->ver.class,
2361 			pf_dev->ver.major, pf_dev->ver.minor);
2362 	} else {
2363 		snprintf(vnic_version, FW_VER_LEN, "*");
2364 	}
2365 
2366 	nfp_net_get_nsp_info(hw_priv, nsp_version);
2367 	nfp_net_get_mip_name(hw_priv, mip_name);
2368 	nfp_net_get_app_name(hw_priv, app_name);
2369 
2370 	if (nsp_version[0] == 0 || mip_name[0] == 0) {
2371 		snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
2372 			vnic_version, nsp_version, mip_name, app_name);
2373 		return 0;
2374 	}
2375 
2376 	snprintf(hw->fw_version, FW_VER_LEN, "%s %s %s %s",
2377 			vnic_version, nsp_version, mip_name, app_name);
2378 
2379 	snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2380 
2381 	return 0;
2382 }
2383 
2384 bool
2385 nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
2386 {
2387 	uint8_t nfd_version = version.extend;
2388 
2389 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFD3)
2390 		return true;
2391 
2392 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) {
2393 		if (version.major < 5) {
2394 			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d.",
2395 					version.major);
2396 			return false;
2397 		}
2398 
2399 		return true;
2400 	}
2401 
2402 	return false;
2403 }
2404 
2405 bool
2406 nfp_net_is_valid_version_class(struct nfp_net_fw_ver version)
2407 {
2408 	switch (version.class) {
2409 	case NFP_NET_CFG_VERSION_CLASS_GENERIC:
2410 		return true;
2411 	case NFP_NET_CFG_VERSION_CLASS_NO_EMEM:
2412 		return true;
2413 	default:
2414 		return false;
2415 	}
2416 }
2417 
2418 void
2419 nfp_net_ctrl_bar_size_set(struct nfp_pf_dev *pf_dev)
2420 {
2421 	if (pf_dev->ver.class == NFP_NET_CFG_VERSION_CLASS_GENERIC)
2422 		pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_32K;
2423 	else
2424 		pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_8K;
2425 }
2426 
2427 /* Disable rx and tx functions to allow for reconfiguring. */
2428 int
2429 nfp_net_stop(struct rte_eth_dev *dev)
2430 {
2431 	int ret;
2432 	struct nfp_net_hw *hw;
2433 	struct nfp_net_hw_priv *hw_priv;
2434 
2435 	hw = nfp_net_get_hw(dev);
2436 	hw_priv = dev->process_private;
2437 
2438 	nfp_net_disable_queues(dev);
2439 
2440 	/* Clear queues */
2441 	nfp_net_stop_tx_queue(dev);
2442 	nfp_net_stop_rx_queue(dev);
2443 
2444 	ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0);
2445 	if (ret < 0)
2446 		return ret;
2447 
2448 	return 0;
2449 }
2450 
2451 static enum rte_eth_fc_mode
2452 nfp_net_get_pause_mode(struct nfp_eth_table_port *eth_port)
2453 {
2454 	enum rte_eth_fc_mode mode;
2455 
2456 	if (eth_port->rx_pause_enabled) {
2457 		if (eth_port->tx_pause_enabled)
2458 			mode = RTE_ETH_FC_FULL;
2459 		else
2460 			mode = RTE_ETH_FC_RX_PAUSE;
2461 	} else {
2462 		if (eth_port->tx_pause_enabled)
2463 			mode = RTE_ETH_FC_TX_PAUSE;
2464 		else
2465 			mode = RTE_ETH_FC_NONE;
2466 	}
2467 
2468 	return mode;
2469 }
2470 
2471 int
2472 nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
2473 		struct rte_eth_fc_conf *fc_conf)
2474 {
2475 	struct nfp_net_hw_priv *hw_priv;
2476 	struct nfp_eth_table *nfp_eth_table;
2477 	struct nfp_eth_table_port *eth_port;
2478 
2479 	hw_priv = dev->process_private;
2480 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2481 		return -EINVAL;
2482 
2483 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2484 	eth_port = &nfp_eth_table->ports[dev->data->port_id];
2485 
2486 	/* Currently only RX/TX switch are supported */
2487 	fc_conf->mode = nfp_net_get_pause_mode(eth_port);
2488 
2489 	return 0;
2490 }
2491 
2492 static int
2493 nfp_net_pause_frame_set(struct nfp_net_hw_priv *hw_priv,
2494 		struct nfp_eth_table_port *eth_port,
2495 		enum rte_eth_fc_mode mode)
2496 {
2497 	int err;
2498 	bool flag;
2499 	struct nfp_nsp *nsp;
2500 
2501 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
2502 	if (nsp == NULL) {
2503 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
2504 		return -EIO;
2505 	}
2506 
2507 	flag = (mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2508 	err = nfp_eth_set_tx_pause(nsp, flag);
2509 	if (err != 0) {
2510 		PMD_DRV_LOG(ERR, "Failed to configure TX pause frame.");
2511 		nfp_eth_config_cleanup_end(nsp);
2512 		return err;
2513 	}
2514 
2515 	flag = (mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2516 	err = nfp_eth_set_rx_pause(nsp, flag);
2517 	if (err != 0) {
2518 		PMD_DRV_LOG(ERR, "Failed to configure RX pause frame.");
2519 		nfp_eth_config_cleanup_end(nsp);
2520 		return err;
2521 	}
2522 
2523 	err = nfp_eth_config_commit_end(nsp);
2524 	if (err < 0) {
2525 		PMD_DRV_LOG(ERR, "Failed to configure pause frame.");
2526 		return err;
2527 	}
2528 
2529 	return 0;
2530 }
2531 
2532 int
2533 nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
2534 		struct rte_eth_fc_conf *fc_conf)
2535 {
2536 	int ret;
2537 	uint8_t idx;
2538 	enum rte_eth_fc_mode set_mode;
2539 	struct nfp_net_hw_priv *hw_priv;
2540 	enum rte_eth_fc_mode original_mode;
2541 	struct nfp_eth_table *nfp_eth_table;
2542 	struct nfp_eth_table_port *eth_port;
2543 
2544 	idx = nfp_net_get_idx(dev);
2545 	hw_priv = dev->process_private;
2546 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2547 		return -EINVAL;
2548 
2549 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2550 	eth_port = &nfp_eth_table->ports[idx];
2551 
2552 	original_mode = nfp_net_get_pause_mode(eth_port);
2553 	set_mode = fc_conf->mode;
2554 
2555 	if (set_mode == original_mode)
2556 		return 0;
2557 
2558 	ret = nfp_net_pause_frame_set(hw_priv, eth_port, set_mode);
2559 	if (ret != 0)
2560 		return ret;
2561 
2562 	/* Update eth_table after modifying RX/TX pause frame mode. */
2563 	eth_port->tx_pause_enabled = (set_mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2564 	eth_port->rx_pause_enabled = (set_mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2565 
2566 	return 0;
2567 }
2568 
2569 int
2570 nfp_net_fec_get_capability(struct rte_eth_dev *dev,
2571 		struct rte_eth_fec_capa *speed_fec_capa,
2572 		__rte_unused unsigned int num)
2573 {
2574 	uint8_t idx;
2575 	uint16_t speed;
2576 	uint32_t supported_fec;
2577 	struct nfp_net_hw_priv *hw_priv;
2578 	struct nfp_eth_table *nfp_eth_table;
2579 	struct nfp_eth_table_port *eth_port;
2580 
2581 	idx = nfp_net_get_idx(dev);
2582 	hw_priv = dev->process_private;
2583 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2584 		return -EINVAL;
2585 
2586 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2587 	eth_port = &nfp_eth_table->ports[idx];
2588 
2589 	speed = eth_port->speed;
2590 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2591 	if (speed == 0 || supported_fec == 0) {
2592 		PMD_DRV_LOG(ERR, "FEC modes supported or Speed is invalid.");
2593 		return -EINVAL;
2594 	}
2595 
2596 	if (speed_fec_capa == NULL)
2597 		return NFP_FEC_CAPA_ENTRY_NUM;
2598 
2599 	speed_fec_capa->speed = speed;
2600 
2601 	if ((supported_fec & NFP_FEC_AUTO) != 0)
2602 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2603 	if ((supported_fec & NFP_FEC_BASER) != 0)
2604 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2605 	if ((supported_fec & NFP_FEC_REED_SOLOMON) != 0)
2606 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2607 	if ((supported_fec & NFP_FEC_DISABLED) != 0)
2608 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2609 
2610 	return NFP_FEC_CAPA_ENTRY_NUM;
2611 }
2612 
2613 static uint32_t
2614 nfp_net_fec_nfp_to_rte(enum nfp_eth_fec fec)
2615 {
2616 	switch (fec) {
2617 	case NFP_FEC_AUTO_BIT:
2618 		return RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2619 	case NFP_FEC_BASER_BIT:
2620 		return RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2621 	case NFP_FEC_REED_SOLOMON_BIT:
2622 		return RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2623 	case NFP_FEC_DISABLED_BIT:
2624 		return RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2625 	default:
2626 		PMD_DRV_LOG(ERR, "FEC mode is invalid.");
2627 		return 0;
2628 	}
2629 }
2630 
2631 int
2632 nfp_net_fec_get(struct rte_eth_dev *dev,
2633 		uint32_t *fec_capa)
2634 {
2635 	uint8_t idx;
2636 	struct nfp_net_hw_priv *hw_priv;
2637 	struct nfp_eth_table *nfp_eth_table;
2638 	struct nfp_eth_table_port *eth_port;
2639 
2640 	idx = nfp_net_get_idx(dev);
2641 	hw_priv = dev->process_private;
2642 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2643 		return -EINVAL;
2644 
2645 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN) {
2646 		nfp_eth_table = nfp_eth_read_ports(hw_priv->pf_dev->cpp);
2647 		hw_priv->pf_dev->nfp_eth_table->ports[idx] = nfp_eth_table->ports[idx];
2648 		free(nfp_eth_table);
2649 	}
2650 
2651 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2652 	eth_port = &nfp_eth_table->ports[idx];
2653 
2654 	if (!nfp_eth_can_support_fec(eth_port)) {
2655 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2656 		return -ENOTSUP;
2657 	}
2658 
2659 	/*
2660 	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
2661 	 * configured FEC mode is returned.
2662 	 * If link is up, current FEC mode is returned.
2663 	 */
2664 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN)
2665 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->fec);
2666 	else
2667 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->act_fec);
2668 
2669 	if (*fec_capa == 0)
2670 		return -EINVAL;
2671 
2672 	return 0;
2673 }
2674 
2675 static enum nfp_eth_fec
2676 nfp_net_fec_rte_to_nfp(uint32_t fec)
2677 {
2678 	switch (fec) {
2679 	case RTE_BIT32(RTE_ETH_FEC_AUTO):
2680 		return NFP_FEC_AUTO_BIT;
2681 	case RTE_BIT32(RTE_ETH_FEC_NOFEC):
2682 		return NFP_FEC_DISABLED_BIT;
2683 	case RTE_BIT32(RTE_ETH_FEC_RS):
2684 		return NFP_FEC_REED_SOLOMON_BIT;
2685 	case RTE_BIT32(RTE_ETH_FEC_BASER):
2686 		return NFP_FEC_BASER_BIT;
2687 	default:
2688 		return NFP_FEC_INVALID_BIT;
2689 	}
2690 }
2691 
2692 int
2693 nfp_net_fec_set(struct rte_eth_dev *dev,
2694 		uint32_t fec_capa)
2695 {
2696 	int ret;
2697 	uint8_t idx;
2698 	enum nfp_eth_fec fec;
2699 	uint32_t supported_fec;
2700 	struct nfp_net_hw_priv *hw_priv;
2701 	struct nfp_eth_table *nfp_eth_table;
2702 	struct nfp_eth_table_port *eth_port;
2703 
2704 	idx = nfp_net_get_idx(dev);
2705 	hw_priv = dev->process_private;
2706 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2707 		return -EINVAL;
2708 
2709 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2710 	eth_port = &nfp_eth_table->ports[idx];
2711 
2712 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2713 	if (supported_fec == 0) {
2714 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2715 		return -ENOTSUP;
2716 	}
2717 
2718 	fec = nfp_net_fec_rte_to_nfp(fec_capa);
2719 	if (fec == NFP_FEC_INVALID_BIT) {
2720 		PMD_DRV_LOG(ERR, "FEC modes is invalid.");
2721 		return -EINVAL;
2722 	}
2723 
2724 	if ((RTE_BIT32(fec) & supported_fec) == 0) {
2725 		PMD_DRV_LOG(ERR, "Unsupported FEC mode is set.");
2726 		return -EIO;
2727 	}
2728 
2729 	ret = nfp_eth_set_fec(hw_priv->pf_dev->cpp, eth_port->index, fec);
2730 	if (ret < 0) {
2731 		PMD_DRV_LOG(ERR, "NFP set FEC mode failed.");
2732 		return ret;
2733 	}
2734 
2735 	return 0;
2736 }
2737 
2738 uint32_t
2739 nfp_net_get_phyports_from_nsp(struct nfp_pf_dev *pf_dev)
2740 {
2741 	if (pf_dev->multi_pf.enabled)
2742 		return 1;
2743 	else
2744 		return pf_dev->nfp_eth_table->count;
2745 }
2746 
2747 uint32_t
2748 nfp_net_get_phyports_from_fw(struct nfp_pf_dev *pf_dev)
2749 {
2750 	int ret = 0;
2751 	uint8_t total_phyports;
2752 	char pf_name[RTE_ETH_NAME_MAX_LEN];
2753 
2754 	/* Read the number of vNIC's created for the PF */
2755 	snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports",
2756 			pf_dev->multi_pf.function_id);
2757 	total_phyports = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &ret);
2758 	if (ret != 0 || total_phyports == 0 || total_phyports > 8) {
2759 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name);
2760 		return 0;
2761 	}
2762 
2763 	return total_phyports;
2764 }
2765 
2766 uint8_t
2767 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
2768 		uint8_t port_id)
2769 {
2770 	if (pf_dev->multi_pf.enabled)
2771 		return pf_dev->multi_pf.function_id;
2772 
2773 	return port_id;
2774 }
2775 
2776 static int
2777 nfp_net_sriov_check(struct nfp_pf_dev *pf_dev,
2778 		uint16_t cap)
2779 {
2780 	uint16_t cap_vf;
2781 
2782 	cap_vf = nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_CAP);
2783 	if ((cap_vf & cap) != cap)
2784 		return -ENOTSUP;
2785 
2786 	return 0;
2787 }
2788 
2789 static int
2790 nfp_net_sriov_update(struct nfp_net_hw *net_hw,
2791 		struct nfp_pf_dev *pf_dev,
2792 		uint16_t update)
2793 {
2794 	int ret;
2795 
2796 	/* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_base_id to FW. */
2797 	ret = nfp_net_vf_reconfig(net_hw, pf_dev, update, pf_dev->vf_base_id,
2798 			NFP_NET_VF_CFG_MB_VF_NUM);
2799 	if (ret != 0) {
2800 		PMD_INIT_LOG(ERR, "Error nfp VF reconfig.");
2801 		return ret;
2802 	}
2803 
2804 	return 0;
2805 }
2806 
2807 static int
2808 nfp_net_vf_queues_config(struct nfp_net_hw *net_hw,
2809 		struct nfp_pf_dev *pf_dev)
2810 {
2811 	int ret;
2812 	uint32_t i;
2813 	uint32_t offset;
2814 
2815 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG);
2816 	if (ret != 0) {
2817 		if (ret == -ENOTSUP) {
2818 			PMD_INIT_LOG(DEBUG, "Set VF max queue not supported.");
2819 			return 0;
2820 		}
2821 
2822 		PMD_INIT_LOG(ERR, "Set VF max queue failed.");
2823 		return ret;
2824 	}
2825 
2826 	offset = NFP_NET_VF_CFG_MB_SZ + pf_dev->max_vfs * NFP_NET_VF_CFG_SZ;
2827 	for (i = 0; i < pf_dev->sriov_vf; i++) {
2828 		ret = nfp_net_vf_reconfig(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG,
2829 				pf_dev->queue_per_vf, pf_dev->vf_base_id + offset + i);
2830 		if (ret != 0) {
2831 			PMD_INIT_LOG(ERR, "Set VF max_queue failed.");
2832 			return ret;
2833 		}
2834 	}
2835 
2836 	return 0;
2837 }
2838 
2839 static int
2840 nfp_net_sriov_init(struct nfp_net_hw *net_hw,
2841 		struct nfp_pf_dev *pf_dev)
2842 {
2843 	int ret;
2844 
2845 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_SPLIT);
2846 	if (ret != 0) {
2847 		if (ret == -ENOTSUP) {
2848 			PMD_INIT_LOG(DEBUG, "Set VF split not supported.");
2849 			return 0;
2850 		}
2851 
2852 		PMD_INIT_LOG(ERR, "Set VF split failed.");
2853 		return ret;
2854 	}
2855 
2856 	nn_writeb(pf_dev->sriov_vf, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_VF_CNT);
2857 
2858 	ret = nfp_net_sriov_update(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_SPLIT);
2859 	if (ret != 0) {
2860 		PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed.");
2861 		return ret;
2862 	}
2863 
2864 	return 0;
2865 }
2866 
2867 int
2868 nfp_net_vf_config_app_init(struct nfp_net_hw *net_hw,
2869 		struct nfp_pf_dev *pf_dev)
2870 {
2871 	int ret;
2872 
2873 	if (pf_dev->sriov_vf == 0)
2874 		return 0;
2875 
2876 	ret = nfp_net_sriov_init(net_hw, pf_dev);
2877 	if (ret != 0) {
2878 		PMD_INIT_LOG(ERR, "Failed to init sriov module.");
2879 		return ret;
2880 	}
2881 
2882 	ret = nfp_net_vf_queues_config(net_hw, pf_dev);
2883 	if (ret != 0) {
2884 		PMD_INIT_LOG(ERR, "Failed to config vf queue.");
2885 		return ret;
2886 	}
2887 
2888 	return 0;
2889 }
2890 
2891 static inline bool
2892 nfp_net_meta_has_no_port_type(__rte_unused struct nfp_net_meta_parsed *meta)
2893 {
2894 	return true;
2895 }
2896 
2897 static inline bool
2898 nfp_net_meta_is_not_pf_port(__rte_unused struct nfp_net_meta_parsed *meta)
2899 {
2900 	return false;
2901 }
2902 
2903 static inline bool
2904 nfp_net_meta_is_pf_port(struct nfp_net_meta_parsed *meta)
2905 {
2906 	return nfp_flower_port_is_phy_port(meta->port_id);
2907 }
2908 
2909 bool
2910 nfp_net_recv_pkt_meta_check_register(struct nfp_net_hw_priv *hw_priv)
2911 {
2912 	struct nfp_pf_dev *pf_dev;
2913 
2914 	pf_dev = hw_priv->pf_dev;
2915 	if (!hw_priv->is_pf) {
2916 		pf_dev->recv_pkt_meta_check_t = nfp_net_meta_has_no_port_type;
2917 		return true;
2918 	}
2919 
2920 	switch (pf_dev->app_fw_id) {
2921 	case NFP_APP_FW_CORE_NIC:
2922 		pf_dev->recv_pkt_meta_check_t = nfp_net_meta_has_no_port_type;
2923 		break;
2924 	case NFP_APP_FW_FLOWER_NIC:
2925 		if (pf_dev->multi_pf.enabled)
2926 			pf_dev->recv_pkt_meta_check_t = nfp_net_meta_is_pf_port;
2927 		else
2928 			pf_dev->recv_pkt_meta_check_t = nfp_net_meta_is_not_pf_port;
2929 		break;
2930 	default:
2931 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded.");
2932 		return false;
2933 	}
2934 
2935 	return true;
2936 }
2937 
2938 static int
2939 nfp_net_get_nfp_index(struct rte_eth_dev *dev)
2940 {
2941 	int nfp_idx;
2942 
2943 	if (rte_eth_dev_is_repr(dev)) {
2944 		struct nfp_flower_representor *repr;
2945 		repr = dev->data->dev_private;
2946 		nfp_idx = repr->nfp_idx;
2947 	} else {
2948 		struct nfp_net_hw *net_hw;
2949 		net_hw = dev->data->dev_private;
2950 		nfp_idx = net_hw->nfp_idx;
2951 	}
2952 
2953 	return nfp_idx;
2954 }
2955 
2956 int
2957 nfp_net_get_eeprom_len(__rte_unused struct rte_eth_dev *dev)
2958 {
2959 	return RTE_ETHER_ADDR_LEN;
2960 }
2961 
2962 static int
2963 nfp_net_get_port_mac_hwinfo(struct nfp_net_hw_priv *hw_priv,
2964 		uint32_t index,
2965 		struct rte_ether_addr *mac_addr)
2966 {
2967 	int ret;
2968 	char hwinfo[32];
2969 	struct nfp_nsp *nsp;
2970 
2971 	snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac", index);
2972 
2973 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
2974 	if (nsp == NULL)
2975 		return -EOPNOTSUPP;
2976 
2977 	ret = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo));
2978 	nfp_nsp_close(nsp);
2979 
2980 	if (ret != 0) {
2981 		PMD_DRV_LOG(ERR, "Read persistent MAC address failed for eth_index %u.", index);
2982 		return ret;
2983 	}
2984 
2985 	ret = rte_ether_unformat_addr(hwinfo, mac_addr);
2986 	if (ret != 0) {
2987 		PMD_DRV_LOG(ERR, "Can not parse persistent MAC address.");
2988 		return -EOPNOTSUPP;
2989 	}
2990 
2991 	return 0;
2992 }
2993 
2994 static int
2995 nfp_net_set_port_mac_hwinfo(struct nfp_net_hw_priv *hw_priv,
2996 		uint32_t index,
2997 		struct rte_ether_addr *mac_addr)
2998 {
2999 	int ret;
3000 	char hwinfo_mac[32];
3001 	struct nfp_nsp *nsp;
3002 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
3003 
3004 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, mac_addr);
3005 	snprintf(hwinfo_mac, sizeof(hwinfo_mac), "eth%u.mac=%s", index, buf);
3006 
3007 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
3008 	if (nsp == NULL)
3009 		return -EOPNOTSUPP;
3010 
3011 	ret = nfp_nsp_hwinfo_set(nsp, hwinfo_mac, sizeof(hwinfo_mac));
3012 	nfp_nsp_close(nsp);
3013 
3014 	if (ret != 0) {
3015 		PMD_DRV_LOG(ERR, "HWinfo set failed: %d.", ret);
3016 		return ret;
3017 	}
3018 
3019 	return 0;
3020 }
3021 
3022 int
3023 nfp_net_get_eeprom(struct rte_eth_dev *dev,
3024 		struct rte_dev_eeprom_info *eeprom)
3025 {
3026 	int ret;
3027 	uint32_t nfp_idx;
3028 	struct nfp_net_hw *net_hw;
3029 	struct rte_ether_addr mac_addr;
3030 	struct nfp_net_hw_priv *hw_priv;
3031 
3032 	if (eeprom->length == 0)
3033 		return -EINVAL;
3034 
3035 	hw_priv = dev->process_private;
3036 	nfp_idx = nfp_net_get_nfp_index(dev);
3037 
3038 	ret = nfp_net_get_port_mac_hwinfo(hw_priv, nfp_idx, &mac_addr);
3039 	if (ret != 0)
3040 		return -EOPNOTSUPP;
3041 
3042 	net_hw = nfp_net_get_hw(dev);
3043 	eeprom->magic = net_hw->vendor_id | (net_hw->device_id << 16);
3044 	memcpy(eeprom->data, mac_addr.addr_bytes + eeprom->offset, eeprom->length);
3045 
3046 	return 0;
3047 }
3048 
3049 int
3050 nfp_net_set_eeprom(struct rte_eth_dev *dev,
3051 		struct rte_dev_eeprom_info *eeprom)
3052 {
3053 	int ret;
3054 	uint32_t nfp_idx;
3055 	struct nfp_net_hw *net_hw;
3056 	struct rte_ether_addr mac_addr;
3057 	struct nfp_net_hw_priv *hw_priv;
3058 
3059 	if (eeprom->length == 0)
3060 		return -EINVAL;
3061 
3062 	net_hw = nfp_net_get_hw(dev);
3063 	if (eeprom->magic != (uint32_t)(net_hw->vendor_id | (net_hw->device_id << 16)))
3064 		return -EINVAL;
3065 
3066 	hw_priv = dev->process_private;
3067 	nfp_idx = nfp_net_get_nfp_index(dev);
3068 	ret = nfp_net_get_port_mac_hwinfo(hw_priv, nfp_idx, &mac_addr);
3069 	if (ret != 0)
3070 		return -EOPNOTSUPP;
3071 
3072 	memcpy(mac_addr.addr_bytes + eeprom->offset, eeprom->data, eeprom->length);
3073 	ret = nfp_net_set_port_mac_hwinfo(hw_priv, nfp_idx, &mac_addr);
3074 	if (ret != 0)
3075 		return -EOPNOTSUPP;
3076 
3077 	return 0;
3078 }
3079 
3080 int
3081 nfp_net_get_module_info(struct rte_eth_dev *dev,
3082 		struct rte_eth_dev_module_info *info)
3083 {
3084 	int ret = 0;
3085 	uint8_t data;
3086 	uint32_t idx;
3087 	uint32_t read_len;
3088 	struct nfp_nsp *nsp;
3089 	struct nfp_net_hw_priv *hw_priv;
3090 	struct nfp_eth_table_port *eth_port;
3091 
3092 	hw_priv = dev->process_private;
3093 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
3094 	if (nsp == NULL) {
3095 		PMD_DRV_LOG(ERR, "Unable to open NSP.");
3096 		return -EIO;
3097 	}
3098 
3099 	if (!nfp_nsp_has_read_module_eeprom(nsp)) {
3100 		PMD_DRV_LOG(ERR, "Read module eeprom not supported. Please update flash.");
3101 		ret = -EOPNOTSUPP;
3102 		goto exit_close_nsp;
3103 	}
3104 
3105 	idx = nfp_net_get_idx(dev);
3106 	eth_port = &hw_priv->pf_dev->nfp_eth_table->ports[idx];
3107 	switch (eth_port->interface) {
3108 	case NFP_INTERFACE_SFP:
3109 		/* FALLTHROUGH */
3110 	case NFP_INTERFACE_SFP28:
3111 		/* Read which revision the transceiver compiles with */
3112 		ret = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
3113 				SFP_SFF8472_COMPLIANCE, &data, 1, &read_len);
3114 		if (ret != 0)
3115 			goto exit_close_nsp;
3116 
3117 		if (data == 0) {
3118 			info->type = RTE_ETH_MODULE_SFF_8079;
3119 			info->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
3120 		} else {
3121 			info->type = RTE_ETH_MODULE_SFF_8472;
3122 			info->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
3123 		}
3124 		break;
3125 	case NFP_INTERFACE_QSFP:
3126 		/* Read which revision the transceiver compiles with */
3127 		ret = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
3128 				SFP_SFF_REV_COMPLIANCE, &data, 1, &read_len);
3129 		if (ret != 0)
3130 			goto exit_close_nsp;
3131 
3132 		if (data == 0) {
3133 			info->type = RTE_ETH_MODULE_SFF_8436;
3134 			info->eeprom_len = RTE_ETH_MODULE_SFF_8436_MAX_LEN;
3135 		} else {
3136 			info->type = RTE_ETH_MODULE_SFF_8636;
3137 			info->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
3138 		}
3139 		break;
3140 	case NFP_INTERFACE_QSFP28:
3141 		info->type = RTE_ETH_MODULE_SFF_8636;
3142 		info->eeprom_len = RTE_ETH_MODULE_SFF_8636_MAX_LEN;
3143 		break;
3144 	default:
3145 		PMD_DRV_LOG(ERR, "Unsupported module %#x detected.",
3146 				eth_port->interface);
3147 		ret = -EINVAL;
3148 	}
3149 
3150 exit_close_nsp:
3151 	nfp_nsp_close(nsp);
3152 	return ret;
3153 }
3154 
3155 int
3156 nfp_net_get_module_eeprom(struct rte_eth_dev *dev,
3157 		struct rte_dev_eeprom_info *info)
3158 {
3159 	int ret = 0;
3160 	uint32_t idx;
3161 	struct nfp_nsp *nsp;
3162 	struct nfp_net_hw_priv *hw_priv;
3163 	struct nfp_eth_table_port *eth_port;
3164 
3165 	hw_priv = dev->process_private;
3166 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
3167 	if (nsp == NULL) {
3168 		PMD_DRV_LOG(ERR, "Unable to open NSP.");
3169 		return -EIO;
3170 	}
3171 
3172 	if (!nfp_nsp_has_read_module_eeprom(nsp)) {
3173 		PMD_DRV_LOG(ERR, "Read module eeprom not supported. Please update flash.");
3174 		ret = -EOPNOTSUPP;
3175 		goto exit_close_nsp;
3176 	}
3177 
3178 	idx = nfp_net_get_idx(dev);
3179 	eth_port = &hw_priv->pf_dev->nfp_eth_table->ports[idx];
3180 	ret = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index, info->offset,
3181 			info->data, info->length, &info->length);
3182 	if (ret != 0) {
3183 		if (info->length)
3184 			PMD_DRV_LOG(ERR, "Incomplete read from module EEPROM: %d.", ret);
3185 		else
3186 			PMD_DRV_LOG(ERR, "Read from module EEPROM failed: %d.", ret);
3187 	}
3188 
3189 exit_close_nsp:
3190 	nfp_nsp_close(nsp);
3191 	return ret;
3192 }
3193 
3194 static int
3195 nfp_net_led_control(struct rte_eth_dev *dev,
3196 		bool is_on)
3197 {
3198 	int ret;
3199 	uint32_t nfp_idx;
3200 	struct nfp_net_hw_priv *hw_priv;
3201 
3202 	hw_priv = dev->process_private;
3203 	nfp_idx = nfp_net_get_nfp_index(dev);
3204 
3205 	ret = nfp_eth_set_idmode(hw_priv->pf_dev->cpp, nfp_idx, is_on);
3206 	if (ret < 0) {
3207 		PMD_DRV_LOG(ERR, "Set nfp idmode failed.");
3208 		return ret;
3209 	}
3210 
3211 	return 0;
3212 }
3213 
3214 int
3215 nfp_net_led_on(struct rte_eth_dev *dev)
3216 {
3217 	return nfp_net_led_control(dev, true);
3218 }
3219 
3220 int
3221 nfp_net_led_off(struct rte_eth_dev *dev)
3222 {
3223 	return nfp_net_led_control(dev, false);
3224 }
3225