xref: /dpdk/drivers/net/nfp/nfp_net_common.c (revision 21a66096bb44a4468353782c36fc85913520dc6c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include "nfp_net_common.h"
9 
10 #include <rte_alarm.h>
11 
12 #include "flower/nfp_flower_cmsg.h"
13 #include "flower/nfp_flower_representor.h"
14 #include "nfd3/nfp_nfd3.h"
15 #include "nfdk/nfp_nfdk.h"
16 #include "nfpcore/nfp_mip.h"
17 #include "nfpcore/nfp_nsp.h"
18 #include "nfpcore/nfp_rtsym.h"
19 #include "nfp_logs.h"
20 #include "nfp_net_meta.h"
21 
22 #define NFP_TX_MAX_SEG       UINT8_MAX
23 #define NFP_TX_MAX_MTU_SEG   8
24 
25 #define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
26 #define NFP_NET_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
27 
28 #define DEFAULT_FLBUF_SIZE        9216
29 #define NFP_ETH_OVERHEAD \
30 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
31 
32 /* Only show FEC capability supported by the current speed. */
33 #define NFP_FEC_CAPA_ENTRY_NUM  1
34 
35 enum nfp_xstat_group {
36 	NFP_XSTAT_GROUP_NET,
37 	NFP_XSTAT_GROUP_MAC
38 };
39 
40 struct nfp_xstat {
41 	char name[RTE_ETH_XSTATS_NAME_SIZE];
42 	int offset;
43 	enum nfp_xstat_group group;
44 };
45 
46 #define NFP_XSTAT_NET(_name, _offset) {                 \
47 	.name = _name,                                  \
48 	.offset = NFP_NET_CFG_STATS_##_offset,          \
49 	.group = NFP_XSTAT_GROUP_NET,                   \
50 }
51 
52 #define NFP_XSTAT_MAC(_name, _offset) {                 \
53 	.name = _name,                                  \
54 	.offset = NFP_MAC_STATS_##_offset,              \
55 	.group = NFP_XSTAT_GROUP_MAC,                   \
56 }
57 
58 static const struct nfp_xstat nfp_net_xstats[] = {
59 	/*
60 	 * Basic xstats available on both VF and PF.
61 	 * Note that in case new statistics of group NFP_XSTAT_GROUP_NET
62 	 * are added to this array, they must appear before any statistics
63 	 * of group NFP_XSTAT_GROUP_MAC.
64 	 */
65 	NFP_XSTAT_NET("rx_good_packets_mc", RX_MC_FRAMES),
66 	NFP_XSTAT_NET("tx_good_packets_mc", TX_MC_FRAMES),
67 	NFP_XSTAT_NET("rx_good_packets_bc", RX_BC_FRAMES),
68 	NFP_XSTAT_NET("tx_good_packets_bc", TX_BC_FRAMES),
69 	NFP_XSTAT_NET("rx_good_bytes_uc", RX_UC_OCTETS),
70 	NFP_XSTAT_NET("tx_good_bytes_uc", TX_UC_OCTETS),
71 	NFP_XSTAT_NET("rx_good_bytes_mc", RX_MC_OCTETS),
72 	NFP_XSTAT_NET("tx_good_bytes_mc", TX_MC_OCTETS),
73 	NFP_XSTAT_NET("rx_good_bytes_bc", RX_BC_OCTETS),
74 	NFP_XSTAT_NET("tx_good_bytes_bc", TX_BC_OCTETS),
75 	NFP_XSTAT_NET("tx_missed_erros", TX_DISCARDS),
76 	NFP_XSTAT_NET("bpf_pass_pkts", APP0_FRAMES),
77 	NFP_XSTAT_NET("bpf_pass_bytes", APP0_BYTES),
78 	NFP_XSTAT_NET("bpf_app1_pkts", APP1_FRAMES),
79 	NFP_XSTAT_NET("bpf_app1_bytes", APP1_BYTES),
80 	NFP_XSTAT_NET("bpf_app2_pkts", APP2_FRAMES),
81 	NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES),
82 	NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES),
83 	NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES),
84 	/*
85 	 * MAC xstats available only on PF. These statistics are not available for VFs as the
86 	 * PF is not initialized when the VF is initialized as it is still bound to the kernel
87 	 * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order
88 	 * to get the pointer to the start of the MAC statistics counters.
89 	 */
90 	NFP_XSTAT_MAC("mac.rx_octets", RX_IN_OCTS),
91 	NFP_XSTAT_MAC("mac.rx_frame_too_long_errors", RX_FRAME_TOO_LONG_ERRORS),
92 	NFP_XSTAT_MAC("mac.rx_range_length_errors", RX_RANGE_LENGTH_ERRORS),
93 	NFP_XSTAT_MAC("mac.rx_vlan_received_ok", RX_VLAN_RECEIVED_OK),
94 	NFP_XSTAT_MAC("mac.rx_errors", RX_IN_ERRORS),
95 	NFP_XSTAT_MAC("mac.rx_broadcast_pkts", RX_IN_BROADCAST_PKTS),
96 	NFP_XSTAT_MAC("mac.rx_drop_events", RX_DROP_EVENTS),
97 	NFP_XSTAT_MAC("mac.rx_alignment_errors", RX_ALIGNMENT_ERRORS),
98 	NFP_XSTAT_MAC("mac.rx_pause_mac_ctrl_frames", RX_PAUSE_MAC_CTRL_FRAMES),
99 	NFP_XSTAT_MAC("mac.rx_frames_received_ok", RX_FRAMES_RECEIVED_OK),
100 	NFP_XSTAT_MAC("mac.rx_frame_check_sequence_errors", RX_FRAME_CHECK_SEQ_ERRORS),
101 	NFP_XSTAT_MAC("mac.rx_unicast_pkts", RX_UNICAST_PKTS),
102 	NFP_XSTAT_MAC("mac.rx_multicast_pkts", RX_MULTICAST_PKTS),
103 	NFP_XSTAT_MAC("mac.rx_pkts", RX_PKTS),
104 	NFP_XSTAT_MAC("mac.rx_undersize_pkts", RX_UNDERSIZE_PKTS),
105 	NFP_XSTAT_MAC("mac.rx_pkts_64_octets", RX_PKTS_64_OCTS),
106 	NFP_XSTAT_MAC("mac.rx_pkts_65_to_127_octets", RX_PKTS_65_TO_127_OCTS),
107 	NFP_XSTAT_MAC("mac.rx_pkts_128_to_255_octets", RX_PKTS_128_TO_255_OCTS),
108 	NFP_XSTAT_MAC("mac.rx_pkts_256_to_511_octets", RX_PKTS_256_TO_511_OCTS),
109 	NFP_XSTAT_MAC("mac.rx_pkts_512_to_1023_octets", RX_PKTS_512_TO_1023_OCTS),
110 	NFP_XSTAT_MAC("mac.rx_pkts_1024_to_1518_octets", RX_PKTS_1024_TO_1518_OCTS),
111 	NFP_XSTAT_MAC("mac.rx_pkts_1519_to_max_octets", RX_PKTS_1519_TO_MAX_OCTS),
112 	NFP_XSTAT_MAC("mac.rx_jabbers", RX_JABBERS),
113 	NFP_XSTAT_MAC("mac.rx_fragments", RX_FRAGMENTS),
114 	NFP_XSTAT_MAC("mac.rx_oversize_pkts", RX_OVERSIZE_PKTS),
115 	NFP_XSTAT_MAC("mac.rx_pause_frames_class0", RX_PAUSE_FRAMES_CLASS0),
116 	NFP_XSTAT_MAC("mac.rx_pause_frames_class1", RX_PAUSE_FRAMES_CLASS1),
117 	NFP_XSTAT_MAC("mac.rx_pause_frames_class2", RX_PAUSE_FRAMES_CLASS2),
118 	NFP_XSTAT_MAC("mac.rx_pause_frames_class3", RX_PAUSE_FRAMES_CLASS3),
119 	NFP_XSTAT_MAC("mac.rx_pause_frames_class4", RX_PAUSE_FRAMES_CLASS4),
120 	NFP_XSTAT_MAC("mac.rx_pause_frames_class5", RX_PAUSE_FRAMES_CLASS5),
121 	NFP_XSTAT_MAC("mac.rx_pause_frames_class6", RX_PAUSE_FRAMES_CLASS6),
122 	NFP_XSTAT_MAC("mac.rx_pause_frames_class7", RX_PAUSE_FRAMES_CLASS7),
123 	NFP_XSTAT_MAC("mac.rx_mac_ctrl_frames_received", RX_MAC_CTRL_FRAMES_REC),
124 	NFP_XSTAT_MAC("mac.rx_mac_head_drop", RX_MAC_HEAD_DROP),
125 	NFP_XSTAT_MAC("mac.tx_queue_drop", TX_QUEUE_DROP),
126 	NFP_XSTAT_MAC("mac.tx_octets", TX_OUT_OCTS),
127 	NFP_XSTAT_MAC("mac.tx_vlan_transmitted_ok", TX_VLAN_TRANSMITTED_OK),
128 	NFP_XSTAT_MAC("mac.tx_errors", TX_OUT_ERRORS),
129 	NFP_XSTAT_MAC("mac.tx_broadcast_pkts", TX_BROADCAST_PKTS),
130 	NFP_XSTAT_MAC("mac.tx_pause_mac_ctrl_frames", TX_PAUSE_MAC_CTRL_FRAMES),
131 	NFP_XSTAT_MAC("mac.tx_frames_transmitted_ok", TX_FRAMES_TRANSMITTED_OK),
132 	NFP_XSTAT_MAC("mac.tx_unicast_pkts", TX_UNICAST_PKTS),
133 	NFP_XSTAT_MAC("mac.tx_multicast_pkts", TX_MULTICAST_PKTS),
134 	NFP_XSTAT_MAC("mac.tx_pkts_64_octets", TX_PKTS_64_OCTS),
135 	NFP_XSTAT_MAC("mac.tx_pkts_65_to_127_octets", TX_PKTS_65_TO_127_OCTS),
136 	NFP_XSTAT_MAC("mac.tx_pkts_128_to_255_octets", TX_PKTS_128_TO_255_OCTS),
137 	NFP_XSTAT_MAC("mac.tx_pkts_256_to_511_octets", TX_PKTS_256_TO_511_OCTS),
138 	NFP_XSTAT_MAC("mac.tx_pkts_512_to_1023_octets", TX_PKTS_512_TO_1023_OCTS),
139 	NFP_XSTAT_MAC("mac.tx_pkts_1024_to_1518_octets", TX_PKTS_1024_TO_1518_OCTS),
140 	NFP_XSTAT_MAC("mac.tx_pkts_1519_to_max_octets", TX_PKTS_1519_TO_MAX_OCTS),
141 	NFP_XSTAT_MAC("mac.tx_pause_frames_class0", TX_PAUSE_FRAMES_CLASS0),
142 	NFP_XSTAT_MAC("mac.tx_pause_frames_class1", TX_PAUSE_FRAMES_CLASS1),
143 	NFP_XSTAT_MAC("mac.tx_pause_frames_class2", TX_PAUSE_FRAMES_CLASS2),
144 	NFP_XSTAT_MAC("mac.tx_pause_frames_class3", TX_PAUSE_FRAMES_CLASS3),
145 	NFP_XSTAT_MAC("mac.tx_pause_frames_class4", TX_PAUSE_FRAMES_CLASS4),
146 	NFP_XSTAT_MAC("mac.tx_pause_frames_class5", TX_PAUSE_FRAMES_CLASS5),
147 	NFP_XSTAT_MAC("mac.tx_pause_frames_class6", TX_PAUSE_FRAMES_CLASS6),
148 	NFP_XSTAT_MAC("mac.tx_pause_frames_class7", TX_PAUSE_FRAMES_CLASS7),
149 };
150 
151 static const uint32_t nfp_net_link_speed_nfp2rte[] = {
152 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
153 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
154 	[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
155 	[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
156 	[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
157 	[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
158 	[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
159 	[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
160 };
161 
162 static size_t
163 nfp_net_link_speed_rte2nfp(uint32_t speed)
164 {
165 	size_t i;
166 
167 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
168 		if (speed == nfp_net_link_speed_nfp2rte[i])
169 			return i;
170 	}
171 
172 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
173 }
174 
175 static uint32_t
176 nfp_net_link_speed_nfp2rte_check(uint32_t speed)
177 {
178 	size_t i;
179 
180 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
181 		if (speed == nfp_net_link_speed_nfp2rte[i])
182 			return nfp_net_link_speed_nfp2rte[i];
183 	}
184 
185 	return RTE_ETH_SPEED_NUM_NONE;
186 }
187 
188 void
189 nfp_net_notify_port_speed(struct nfp_net_hw *hw,
190 		struct rte_eth_link *link)
191 {
192 	/*
193 	 * Read the link status from NFP_NET_CFG_STS. If the link is down
194 	 * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to
195 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
196 	 */
197 	if (link->link_status == RTE_ETH_LINK_DOWN) {
198 		nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
199 				NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
200 		return;
201 	}
202 
203 	/*
204 	 * Link is up so write the link speed from the eth_table to
205 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
206 	 */
207 	nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
208 			nfp_net_link_speed_rte2nfp(link->link_speed));
209 }
210 
211 /**
212  * Reconfigure the firmware of VF configure
213  *
214  * @param net_hw
215  *   Device to reconfigure
216  * @param pf_dev
217  *   Get the Device info
218  * @param update
219  *   The value for the mailbox VF command
220  * @param value
221  *   The value of update
222  * @param offset
223  *   The offset in the VF configure table
224  *
225  * @return
226  *   - (0) if OK to reconfigure vf configure.
227  *   - (-EIO) if I/O err and fail to configure the vf configure
228  */
229 static int
230 nfp_net_vf_reconfig(struct nfp_net_hw *net_hw,
231 		struct nfp_pf_dev *pf_dev,
232 		uint16_t update,
233 		uint8_t value,
234 		uint32_t offset)
235 {
236 	int ret;
237 	struct nfp_hw *hw;
238 
239 	hw = &net_hw->super;
240 	rte_spinlock_lock(&hw->reconfig_lock);
241 
242 	/* Write update info to mailbox in VF config symbol */
243 	nn_writeb(value, pf_dev->vf_cfg_tbl_bar + offset);
244 	nn_writew(update, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_UPD);
245 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VF);
246 
247 	rte_wmb();
248 
249 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VF);
250 
251 	rte_spinlock_unlock(&hw->reconfig_lock);
252 
253 	if (ret != 0)
254 		return -EIO;
255 
256 	return nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_RET);
257 }
258 
259 /**
260  * Reconfigure the firmware via the mailbox
261  *
262  * @param net_hw
263  *   Device to reconfigure
264  * @param mbox_cmd
265  *   The value for the mailbox command
266  *
267  * @return
268  *   - (0) if OK to reconfigure by the mailbox.
269  *   - (-EIO) if I/O err and fail to reconfigure by the mailbox
270  */
271 int
272 nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
273 		uint32_t mbox_cmd)
274 {
275 	int ret;
276 	uint32_t mbox;
277 
278 	mbox = net_hw->tlv_caps.mbox_off;
279 
280 	rte_spinlock_lock(&net_hw->super.reconfig_lock);
281 
282 	nn_cfg_writeq(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
283 	nn_cfg_writel(&net_hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
284 
285 	rte_wmb();
286 
287 	ret = nfp_reconfig_real(&net_hw->super, NFP_NET_CFG_UPDATE_MBOX);
288 
289 	rte_spinlock_unlock(&net_hw->super.reconfig_lock);
290 
291 	if (ret != 0) {
292 		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x.",
293 				mbox_cmd, NFP_NET_CFG_UPDATE_MBOX);
294 		return -EIO;
295 	}
296 
297 	return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
298 }
299 
300 struct nfp_net_hw *
301 nfp_net_get_hw(const struct rte_eth_dev *dev)
302 {
303 	struct nfp_net_hw *hw;
304 
305 	if (rte_eth_dev_is_repr(dev)) {
306 		struct nfp_flower_representor *repr;
307 		repr = dev->data->dev_private;
308 		hw = repr->app_fw_flower->pf_hw;
309 	} else {
310 		hw = dev->data->dev_private;
311 	}
312 
313 	return hw;
314 }
315 
316 uint8_t
317 nfp_net_get_idx(const struct rte_eth_dev *dev)
318 {
319 	uint8_t idx;
320 
321 	if (rte_eth_dev_is_repr(dev)) {
322 		struct nfp_flower_representor *repr;
323 		repr = dev->data->dev_private;
324 		idx = repr->idx;
325 	} else {
326 		struct nfp_net_hw *hw;
327 		hw = dev->data->dev_private;
328 		idx = hw->idx;
329 	}
330 
331 	return idx;
332 }
333 
334 /*
335  * Configure an Ethernet device.
336  *
337  * This function must be invoked first before any other function in the Ethernet API.
338  * This function can also be re-invoked when a device is in the stopped state.
339  *
340  * A DPDK app sends info about how many queues to use and how  those queues
341  * need to be configured. This is used by the DPDK core and it makes sure no
342  * more queues than those advertised by the driver are requested.
343  * This function is called after that internal process.
344  */
345 int
346 nfp_net_configure(struct rte_eth_dev *dev)
347 {
348 	struct nfp_net_hw *hw;
349 	struct rte_eth_conf *dev_conf;
350 	struct rte_eth_rxmode *rxmode;
351 	struct rte_eth_txmode *txmode;
352 
353 	hw = nfp_net_get_hw(dev);
354 	dev_conf = &dev->data->dev_conf;
355 	rxmode = &dev_conf->rxmode;
356 	txmode = &dev_conf->txmode;
357 
358 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0)
359 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
360 
361 	/* Checking TX mode */
362 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
363 		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported.");
364 		return -EINVAL;
365 	}
366 
367 	/* Checking RX mode */
368 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
369 			(hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
370 		PMD_DRV_LOG(ERR, "RSS not supported.");
371 		return -EINVAL;
372 	}
373 
374 	/* Checking MTU set */
375 	if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) {
376 		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u).",
377 				rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD);
378 		return -ERANGE;
379 	}
380 
381 	return 0;
382 }
383 
384 void
385 nfp_net_log_device_information(const struct nfp_net_hw *hw,
386 		struct nfp_pf_dev *pf_dev)
387 {
388 	uint32_t cap = hw->super.cap;
389 	uint32_t cap_ext = hw->super.cap_ext;
390 
391 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d.",
392 			pf_dev->ver.major, pf_dev->ver.minor, hw->max_mtu);
393 
394 	PMD_INIT_LOG(INFO, "CAP: %#x.", cap);
395 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
396 			cap & NFP_NET_CFG_CTRL_ENABLE        ? "ENABLE "      : "",
397 			cap & NFP_NET_CFG_CTRL_PROMISC       ? "PROMISC "     : "",
398 			cap & NFP_NET_CFG_CTRL_L2BC          ? "L2BCFILT "    : "",
399 			cap & NFP_NET_CFG_CTRL_L2MC          ? "L2MCFILT "    : "",
400 			cap & NFP_NET_CFG_CTRL_RXCSUM        ? "RXCSUM "      : "",
401 			cap & NFP_NET_CFG_CTRL_TXCSUM        ? "TXCSUM "      : "",
402 			cap & NFP_NET_CFG_CTRL_RXVLAN        ? "RXVLAN "      : "",
403 			cap & NFP_NET_CFG_CTRL_TXVLAN        ? "TXVLAN "      : "",
404 			cap & NFP_NET_CFG_CTRL_SCATTER       ? "SCATTER "     : "",
405 			cap & NFP_NET_CFG_CTRL_GATHER        ? "GATHER "      : "",
406 			cap & NFP_NET_CFG_CTRL_LSO           ? "TSO "         : "",
407 			cap & NFP_NET_CFG_CTRL_RXQINQ        ? "RXQINQ "      : "",
408 			cap & NFP_NET_CFG_CTRL_RXVLAN_V2     ? "RXVLANv2 "    : "",
409 			cap & NFP_NET_CFG_CTRL_RINGCFG       ? "RINGCFG "     : "",
410 			cap & NFP_NET_CFG_CTRL_RSS           ? "RSS "         : "",
411 			cap & NFP_NET_CFG_CTRL_IRQMOD        ? "IRQMOD "      : "",
412 			cap & NFP_NET_CFG_CTRL_RINGPRIO      ? "RINGPRIO "    : "",
413 			cap & NFP_NET_CFG_CTRL_MSIXAUTO      ? "MSIXAUTO "    : "",
414 			cap & NFP_NET_CFG_CTRL_TXRWB         ? "TXRWB "       : "",
415 			cap & NFP_NET_CFG_CTRL_L2SWITCH      ? "L2SWITCH "    : "",
416 			cap & NFP_NET_CFG_CTRL_TXVLAN_V2     ? "TXVLANv2 "    : "",
417 			cap & NFP_NET_CFG_CTRL_VXLAN         ? "VXLAN "       : "",
418 			cap & NFP_NET_CFG_CTRL_NVGRE         ? "NVGRE "       : "",
419 			cap & NFP_NET_CFG_CTRL_MSIX_TX_OFF   ? "MSIX_TX_OFF " : "",
420 			cap & NFP_NET_CFG_CTRL_LSO2          ? "TSOv2 "       : "",
421 			cap & NFP_NET_CFG_CTRL_RSS2          ? "RSSv2 "       : "",
422 			cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ? "CSUM "        : "",
423 			cap & NFP_NET_CFG_CTRL_LIVE_ADDR     ? "LIVE_ADDR "   : "",
424 			cap & NFP_NET_CFG_CTRL_USO           ? "USO"          : "");
425 
426 	PMD_INIT_LOG(INFO, "CAP_WORD1: %#x.", cap_ext);
427 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s",
428 			cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE        ? "PKT_TYPE "        : "",
429 			cap_ext & NFP_NET_CFG_CTRL_IPSEC           ? "IPSEC "           : "",
430 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP ? "IPSEC_SM "        : "",
431 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP ? "IPSEC_LM "        : "",
432 			cap_ext & NFP_NET_CFG_CTRL_MULTI_PF        ? "MULTI_PF "        : "",
433 			cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER      ? "FLOW_STEER "      : "",
434 			cap_ext & NFP_NET_CFG_CTRL_IN_ORDER        ? "VIRTIO_IN_ORDER " : "");
435 
436 	PMD_INIT_LOG(INFO, "The max_rx_queues: %u, max_tx_queues: %u.",
437 			hw->max_rx_queues, hw->max_tx_queues);
438 }
439 
440 static inline void
441 nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw,
442 		uint32_t *ctrl)
443 {
444 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
445 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2;
446 	else if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN) != 0)
447 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
448 }
449 
450 void
451 nfp_net_enable_queues(struct rte_eth_dev *dev)
452 {
453 	struct nfp_net_hw *hw;
454 
455 	hw = nfp_net_get_hw(dev);
456 
457 	nfp_enable_queues(&hw->super, dev->data->nb_rx_queues,
458 			dev->data->nb_tx_queues);
459 }
460 
461 void
462 nfp_net_disable_queues(struct rte_eth_dev *dev)
463 {
464 	struct nfp_net_hw *net_hw;
465 
466 	net_hw = nfp_net_get_hw(dev);
467 
468 	nfp_disable_queues(&net_hw->super);
469 }
470 
471 void
472 nfp_net_params_setup(struct nfp_net_hw *hw)
473 {
474 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, hw->mtu);
475 	nn_cfg_writel(&hw->super, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
476 }
477 
478 void
479 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
480 {
481 	hw->super.qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
482 }
483 
484 int
485 nfp_net_set_mac_addr(struct rte_eth_dev *dev,
486 		struct rte_ether_addr *mac_addr)
487 {
488 	uint32_t update;
489 	uint32_t new_ctrl;
490 	struct nfp_hw *hw;
491 	struct nfp_net_hw *net_hw;
492 
493 	net_hw = nfp_net_get_hw(dev);
494 	hw = &net_hw->super;
495 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
496 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
497 		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled.");
498 		return -EBUSY;
499 	}
500 
501 	if (rte_is_valid_assigned_ether_addr(mac_addr) == 0) {
502 		PMD_DRV_LOG(ERR, "Invalid MAC address.");
503 		return -EINVAL;
504 	}
505 
506 	/* Writing new MAC to the specific port BAR address */
507 	nfp_write_mac(hw, (uint8_t *)mac_addr);
508 
509 	update = NFP_NET_CFG_UPDATE_MACADDR;
510 	new_ctrl = hw->ctrl;
511 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
512 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
513 		new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
514 
515 	/* Signal the NIC about the change */
516 	if (nfp_reconfig(hw, new_ctrl, update) != 0) {
517 		PMD_DRV_LOG(ERR, "MAC address update failed.");
518 		return -EIO;
519 	}
520 
521 	hw->ctrl = new_ctrl;
522 
523 	return 0;
524 }
525 
526 int
527 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
528 		struct rte_intr_handle *intr_handle)
529 {
530 	uint16_t i;
531 	struct nfp_net_hw *hw;
532 
533 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
534 				dev->data->nb_rx_queues) != 0) {
535 		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec.",
536 				dev->data->nb_rx_queues);
537 		return -ENOMEM;
538 	}
539 
540 	hw = nfp_net_get_hw(dev);
541 
542 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
543 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO.");
544 		/* UIO just supports one queue and no LSC */
545 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
546 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
547 			return -1;
548 	} else {
549 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO.");
550 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
551 			/*
552 			 * The first msix vector is reserved for non
553 			 * efd interrupts.
554 			 */
555 			nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(i), i + 1);
556 			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
557 				return -1;
558 		}
559 	}
560 
561 	/* Avoiding TX interrupts */
562 	hw->super.ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
563 	return 0;
564 }
565 
566 uint32_t
567 nfp_check_offloads(struct rte_eth_dev *dev)
568 {
569 	uint32_t cap;
570 	uint32_t ctrl = 0;
571 	uint64_t rx_offload;
572 	uint64_t tx_offload;
573 	struct nfp_net_hw *hw;
574 	struct rte_eth_conf *dev_conf;
575 
576 	hw = nfp_net_get_hw(dev);
577 	cap = hw->super.cap;
578 
579 	dev_conf = &dev->data->dev_conf;
580 	rx_offload = dev_conf->rxmode.offloads;
581 	tx_offload = dev_conf->txmode.offloads;
582 
583 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
584 		if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
585 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
586 	}
587 
588 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
589 		nfp_net_enable_rxvlan_cap(hw, &ctrl);
590 
591 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
592 		if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
593 			ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
594 	}
595 
596 	hw->mtu = dev->data->mtu;
597 
598 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
599 		if ((cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
600 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
601 		else if ((cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
602 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
603 	}
604 
605 	/* L2 broadcast */
606 	if ((cap & NFP_NET_CFG_CTRL_L2BC) != 0)
607 		ctrl |= NFP_NET_CFG_CTRL_L2BC;
608 
609 	/* L2 multicast */
610 	if ((cap & NFP_NET_CFG_CTRL_L2MC) != 0)
611 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
612 
613 	/* TX checksum offload */
614 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
615 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
616 			(tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
617 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
618 
619 	/* LSO offload */
620 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
621 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_TSO) != 0 ||
622 			(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
623 		if ((cap & NFP_NET_CFG_CTRL_LSO) != 0)
624 			ctrl |= NFP_NET_CFG_CTRL_LSO;
625 		else if ((cap & NFP_NET_CFG_CTRL_LSO2) != 0)
626 			ctrl |= NFP_NET_CFG_CTRL_LSO2;
627 	}
628 
629 	/* RX gather */
630 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
631 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
632 
633 	return ctrl;
634 }
635 
636 int
637 nfp_net_promisc_enable(struct rte_eth_dev *dev)
638 {
639 	int ret;
640 	uint32_t update;
641 	uint32_t new_ctrl;
642 	struct nfp_hw *hw;
643 	struct nfp_net_hw *net_hw;
644 
645 	net_hw = nfp_net_get_hw(dev);
646 
647 	hw = &net_hw->super;
648 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
649 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported.");
650 		return -ENOTSUP;
651 	}
652 
653 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
654 		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled.");
655 		return 0;
656 	}
657 
658 	new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
659 	update = NFP_NET_CFG_UPDATE_GEN;
660 
661 	ret = nfp_reconfig(hw, new_ctrl, update);
662 	if (ret != 0)
663 		return ret;
664 
665 	hw->ctrl = new_ctrl;
666 
667 	return 0;
668 }
669 
670 int
671 nfp_net_promisc_disable(struct rte_eth_dev *dev)
672 {
673 	int ret;
674 	uint32_t update;
675 	uint32_t new_ctrl;
676 	struct nfp_hw *hw;
677 	struct nfp_net_hw *net_hw;
678 
679 	net_hw = nfp_net_get_hw(dev);
680 	hw = &net_hw->super;
681 
682 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
683 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported.");
684 		return -ENOTSUP;
685 	}
686 
687 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
688 		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled.");
689 		return 0;
690 	}
691 
692 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
693 	update = NFP_NET_CFG_UPDATE_GEN;
694 
695 	ret = nfp_reconfig(hw, new_ctrl, update);
696 	if (ret != 0)
697 		return ret;
698 
699 	hw->ctrl = new_ctrl;
700 
701 	return 0;
702 }
703 
704 static int
705 nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev,
706 		bool enable)
707 {
708 	int ret;
709 	uint32_t update;
710 	struct nfp_hw *hw;
711 	uint32_t cap_extend;
712 	uint32_t ctrl_extend;
713 	uint32_t new_ctrl_extend;
714 	struct nfp_net_hw *net_hw;
715 
716 	net_hw = nfp_net_get_hw(dev);
717 	hw = &net_hw->super;
718 
719 	cap_extend = hw->cap_ext;
720 	if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) {
721 		PMD_DRV_LOG(DEBUG, "Allmulticast mode not supported.");
722 		return -ENOTSUP;
723 	}
724 
725 	/*
726 	 * Allmulticast mode enabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 0.
727 	 * Allmulticast mode disabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 1.
728 	 */
729 	ctrl_extend = hw->ctrl_ext;
730 	if (enable) {
731 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0)
732 			return 0;
733 
734 		new_ctrl_extend = ctrl_extend & ~NFP_NET_CFG_CTRL_MCAST_FILTER;
735 	} else {
736 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) != 0)
737 			return 0;
738 
739 		new_ctrl_extend = ctrl_extend | NFP_NET_CFG_CTRL_MCAST_FILTER;
740 	}
741 
742 	update = NFP_NET_CFG_UPDATE_GEN;
743 
744 	ret = nfp_ext_reconfig(hw, new_ctrl_extend, update);
745 	if (ret != 0)
746 		return ret;
747 
748 	hw->ctrl_ext = new_ctrl_extend;
749 	return 0;
750 }
751 
752 int
753 nfp_net_allmulticast_enable(struct rte_eth_dev *dev)
754 {
755 	return nfp_net_set_allmulticast_mode(dev, true);
756 }
757 
758 int
759 nfp_net_allmulticast_disable(struct rte_eth_dev *dev)
760 {
761 	return nfp_net_set_allmulticast_mode(dev, false);
762 }
763 
764 static void
765 nfp_net_pf_speed_update(struct rte_eth_dev *dev,
766 		struct nfp_net_hw_priv *hw_priv,
767 		struct rte_eth_link *link)
768 {
769 	uint8_t idx;
770 	enum nfp_eth_aneg aneg;
771 	struct nfp_pf_dev *pf_dev;
772 	struct nfp_eth_table *nfp_eth_table;
773 	struct nfp_eth_table_port *eth_port;
774 
775 	pf_dev = hw_priv->pf_dev;
776 	idx = nfp_net_get_idx(dev);
777 	aneg = pf_dev->nfp_eth_table->ports[idx].aneg;
778 
779 	/* Compare whether the current status has changed. */
780 	if (pf_dev->speed_updated || aneg == NFP_ANEG_AUTO) {
781 		nfp_eth_table = nfp_eth_read_ports(pf_dev->cpp);
782 		if (nfp_eth_table == NULL) {
783 			PMD_DRV_LOG(DEBUG, "Failed to get nfp_eth_table.");
784 		} else {
785 			pf_dev->nfp_eth_table->ports[idx] = nfp_eth_table->ports[idx];
786 			free(nfp_eth_table);
787 			pf_dev->speed_updated = false;
788 		}
789 	}
790 
791 	nfp_eth_table = pf_dev->nfp_eth_table;
792 	eth_port = &nfp_eth_table->ports[idx];
793 
794 	link->link_speed = nfp_net_link_speed_nfp2rte_check(eth_port->speed);
795 
796 	if (dev->data->dev_conf.link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
797 			eth_port->supp_aneg)
798 		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
799 }
800 
801 static void
802 nfp_net_vf_speed_update(struct rte_eth_link *link,
803 		uint32_t link_status)
804 {
805 	size_t link_rate_index;
806 
807 	/*
808 	 * Shift and mask link_status so that it is effectively the value
809 	 * at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
810 	 */
811 	link_rate_index = (link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
812 			NFP_NET_CFG_STS_LINK_RATE_MASK;
813 	if (link_rate_index < RTE_DIM(nfp_net_link_speed_nfp2rte))
814 		link->link_speed = nfp_net_link_speed_nfp2rte[link_rate_index];
815 	else
816 		link->link_speed = RTE_ETH_SPEED_NUM_NONE;
817 }
818 
819 int
820 nfp_net_link_update_common(struct rte_eth_dev *dev,
821 		struct rte_eth_link *link,
822 		uint32_t link_status)
823 {
824 	int ret;
825 	struct nfp_net_hw_priv *hw_priv;
826 
827 	hw_priv = dev->process_private;
828 	if (link->link_status == RTE_ETH_LINK_UP) {
829 		if (hw_priv->is_pf)
830 			nfp_net_pf_speed_update(dev, hw_priv, link);
831 		else
832 			nfp_net_vf_speed_update(link, link_status);
833 	}
834 
835 	ret = rte_eth_linkstatus_set(dev, link);
836 	if (ret == 0) {
837 		if (link->link_status == RTE_ETH_LINK_UP)
838 			PMD_DRV_LOG(INFO, "NIC Link is Up.");
839 		else
840 			PMD_DRV_LOG(INFO, "NIC Link is Down.");
841 	}
842 
843 	return ret;
844 }
845 
846 /*
847  * Return 0 means link status changed, -1 means not changed
848  *
849  * Wait to complete is needed as it can take up to 9 seconds to get the Link
850  * status.
851  */
852 int
853 nfp_net_link_update(struct rte_eth_dev *dev,
854 		__rte_unused int wait_to_complete)
855 {
856 	int ret;
857 	struct nfp_net_hw *hw;
858 	uint32_t nn_link_status;
859 	struct rte_eth_link link;
860 	struct nfp_net_hw_priv *hw_priv;
861 
862 	hw = nfp_net_get_hw(dev);
863 	hw_priv = dev->process_private;
864 
865 	memset(&link, 0, sizeof(struct rte_eth_link));
866 
867 	/* Read link status */
868 	nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
869 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
870 		link.link_status = RTE_ETH_LINK_UP;
871 
872 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
873 
874 	ret = nfp_net_link_update_common(dev, &link, nn_link_status);
875 	if (ret == -EIO)
876 		return ret;
877 
878 	/*
879 	 * Notify the port to update the speed value in the CTRL BAR from NSP.
880 	 * Not applicable for VFs as the associated PF is still attached to the
881 	 * kernel driver.
882 	 */
883 	if (hw_priv != NULL && hw_priv->is_pf)
884 		nfp_net_notify_port_speed(hw, &link);
885 
886 	return ret;
887 }
888 
889 int
890 nfp_net_stats_get(struct rte_eth_dev *dev,
891 		struct rte_eth_stats *stats)
892 {
893 	uint16_t i;
894 	struct nfp_net_hw *hw;
895 	struct rte_eth_stats nfp_dev_stats;
896 
897 	if (stats == NULL)
898 		return -EINVAL;
899 
900 	hw = nfp_net_get_hw(dev);
901 
902 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
903 
904 	/* Reading per RX ring stats */
905 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
906 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
907 			break;
908 
909 		nfp_dev_stats.q_ipackets[i] =
910 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
911 		nfp_dev_stats.q_ipackets[i] -=
912 				hw->eth_stats_base.q_ipackets[i];
913 
914 		nfp_dev_stats.q_ibytes[i] =
915 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
916 		nfp_dev_stats.q_ibytes[i] -=
917 				hw->eth_stats_base.q_ibytes[i];
918 	}
919 
920 	/* Reading per TX ring stats */
921 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
922 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
923 			break;
924 
925 		nfp_dev_stats.q_opackets[i] =
926 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
927 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
928 
929 		nfp_dev_stats.q_obytes[i] =
930 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
931 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
932 	}
933 
934 	nfp_dev_stats.ipackets = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
935 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
936 
937 	nfp_dev_stats.ibytes = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
938 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
939 
940 	nfp_dev_stats.opackets =
941 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
942 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
943 
944 	nfp_dev_stats.obytes =
945 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
946 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
947 
948 	/* Reading general device stats */
949 	nfp_dev_stats.ierrors =
950 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
951 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
952 
953 	nfp_dev_stats.oerrors =
954 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
955 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
956 
957 	/* RX ring mbuf allocation failures */
958 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
959 
960 	nfp_dev_stats.imissed =
961 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
962 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
963 
964 	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
965 	return 0;
966 }
967 
968 /*
969  * hw->eth_stats_base records the per counter starting point.
970  * Lets update it now.
971  */
972 int
973 nfp_net_stats_reset(struct rte_eth_dev *dev)
974 {
975 	uint16_t i;
976 	struct nfp_net_hw *hw;
977 
978 	hw = nfp_net_get_hw(dev);
979 
980 	/* Reading per RX ring stats */
981 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
982 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
983 			break;
984 
985 		hw->eth_stats_base.q_ipackets[i] =
986 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
987 
988 		hw->eth_stats_base.q_ibytes[i] =
989 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
990 	}
991 
992 	/* Reading per TX ring stats */
993 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
994 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
995 			break;
996 
997 		hw->eth_stats_base.q_opackets[i] =
998 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
999 
1000 		hw->eth_stats_base.q_obytes[i] =
1001 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1002 	}
1003 
1004 	hw->eth_stats_base.ipackets =
1005 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
1006 
1007 	hw->eth_stats_base.ibytes =
1008 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
1009 
1010 	hw->eth_stats_base.opackets =
1011 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
1012 
1013 	hw->eth_stats_base.obytes =
1014 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
1015 
1016 	/* Reading general device stats */
1017 	hw->eth_stats_base.ierrors =
1018 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
1019 
1020 	hw->eth_stats_base.oerrors =
1021 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
1022 
1023 	/* RX ring mbuf allocation failures */
1024 	dev->data->rx_mbuf_alloc_failed = 0;
1025 
1026 	hw->eth_stats_base.imissed =
1027 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
1028 
1029 	return 0;
1030 }
1031 
1032 uint32_t
1033 nfp_net_xstats_size(const struct rte_eth_dev *dev)
1034 {
1035 	uint32_t count;
1036 	bool vf_flag = false;
1037 	struct nfp_net_hw *hw;
1038 	struct nfp_flower_representor *repr;
1039 	const uint32_t size = RTE_DIM(nfp_net_xstats);
1040 
1041 	if (rte_eth_dev_is_repr(dev)) {
1042 		repr = dev->data->dev_private;
1043 		if (nfp_flower_repr_is_vf(repr))
1044 			vf_flag = true;
1045 	} else {
1046 		hw = dev->data->dev_private;
1047 		if (hw->mac_stats == NULL)
1048 			vf_flag = true;
1049 	}
1050 
1051 	/* If the device is a VF or VF-repr, then there will be no MAC stats */
1052 	if (vf_flag) {
1053 		for (count = 0; count < size; count++) {
1054 			if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC)
1055 				break;
1056 		}
1057 
1058 		return count;
1059 	}
1060 
1061 	return size;
1062 }
1063 
1064 static const struct nfp_xstat *
1065 nfp_net_xstats_info(const struct rte_eth_dev *dev,
1066 		uint32_t index)
1067 {
1068 	if (index >= nfp_net_xstats_size(dev)) {
1069 		PMD_DRV_LOG(ERR, "The xstat index out of bounds.");
1070 		return NULL;
1071 	}
1072 
1073 	return &nfp_net_xstats[index];
1074 }
1075 
1076 static uint64_t
1077 nfp_net_xstats_value(const struct rte_eth_dev *dev,
1078 		uint32_t index,
1079 		bool raw)
1080 {
1081 	uint64_t value;
1082 	uint8_t *mac_stats;
1083 	struct nfp_net_hw *hw;
1084 	struct nfp_xstat xstat;
1085 	struct rte_eth_xstat *xstats_base;
1086 	struct nfp_flower_representor *repr;
1087 
1088 	if (rte_eth_dev_is_repr(dev)) {
1089 		repr = dev->data->dev_private;
1090 		hw = repr->app_fw_flower->pf_hw;
1091 
1092 		mac_stats = repr->mac_stats;
1093 		xstats_base = repr->repr_xstats_base;
1094 	} else {
1095 		hw = dev->data->dev_private;
1096 
1097 		mac_stats = hw->mac_stats;
1098 		xstats_base = hw->eth_xstats_base;
1099 	}
1100 
1101 	xstat = nfp_net_xstats[index];
1102 
1103 	if (xstat.group == NFP_XSTAT_GROUP_MAC)
1104 		value = nn_readq(mac_stats + xstat.offset);
1105 	else
1106 		value = nn_cfg_readq(&hw->super, xstat.offset);
1107 
1108 	if (raw)
1109 		return value;
1110 
1111 	/*
1112 	 * A baseline value of each statistic counter is recorded when stats are "reset".
1113 	 * Thus, the value returned by this function need to be decremented by this
1114 	 * baseline value. The result is the count of this statistic since the last time
1115 	 * it was "reset".
1116 	 */
1117 	return value - xstats_base[index].value;
1118 }
1119 
1120 /* NOTE: All callers ensure dev is always set. */
1121 int
1122 nfp_net_xstats_get_names(struct rte_eth_dev *dev,
1123 		struct rte_eth_xstat_name *xstats_names,
1124 		unsigned int size)
1125 {
1126 	uint32_t id;
1127 	uint32_t nfp_size;
1128 	uint32_t read_size;
1129 
1130 	nfp_size = nfp_net_xstats_size(dev);
1131 
1132 	if (xstats_names == NULL)
1133 		return nfp_size;
1134 
1135 	/* Read at most NFP xstats number of names. */
1136 	read_size = RTE_MIN(size, nfp_size);
1137 
1138 	for (id = 0; id < read_size; id++)
1139 		rte_strlcpy(xstats_names[id].name, nfp_net_xstats[id].name,
1140 				RTE_ETH_XSTATS_NAME_SIZE);
1141 
1142 	return read_size;
1143 }
1144 
1145 /* NOTE: All callers ensure dev is always set. */
1146 int
1147 nfp_net_xstats_get(struct rte_eth_dev *dev,
1148 		struct rte_eth_xstat *xstats,
1149 		unsigned int n)
1150 {
1151 	uint32_t id;
1152 	uint32_t nfp_size;
1153 	uint32_t read_size;
1154 
1155 	nfp_size = nfp_net_xstats_size(dev);
1156 
1157 	if (xstats == NULL)
1158 		return nfp_size;
1159 
1160 	/* Read at most NFP xstats number of values. */
1161 	read_size = RTE_MIN(n, nfp_size);
1162 
1163 	for (id = 0; id < read_size; id++) {
1164 		xstats[id].id = id;
1165 		xstats[id].value = nfp_net_xstats_value(dev, id, false);
1166 	}
1167 
1168 	return read_size;
1169 }
1170 
1171 /*
1172  * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
1173  * ids, xstats_names and size are valid, and non-NULL.
1174  */
1175 int
1176 nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
1177 		const uint64_t *ids,
1178 		struct rte_eth_xstat_name *xstats_names,
1179 		unsigned int size)
1180 {
1181 	uint32_t i;
1182 	uint32_t read_size;
1183 
1184 	/* Read at most NFP xstats number of names. */
1185 	read_size = RTE_MIN(size, nfp_net_xstats_size(dev));
1186 
1187 	for (i = 0; i < read_size; i++) {
1188 		const struct nfp_xstat *xstat;
1189 
1190 		/* Make sure ID is valid for device. */
1191 		xstat = nfp_net_xstats_info(dev, ids[i]);
1192 		if (xstat == NULL)
1193 			return -EINVAL;
1194 
1195 		rte_strlcpy(xstats_names[i].name, xstat->name,
1196 				RTE_ETH_XSTATS_NAME_SIZE);
1197 	}
1198 
1199 	return read_size;
1200 }
1201 
1202 /*
1203  * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
1204  * ids, values and n are valid, and non-NULL.
1205  */
1206 int
1207 nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
1208 		const uint64_t *ids,
1209 		uint64_t *values,
1210 		unsigned int n)
1211 {
1212 	uint32_t i;
1213 	uint32_t read_size;
1214 
1215 	/* Read at most NFP xstats number of values. */
1216 	read_size = RTE_MIN(n, nfp_net_xstats_size(dev));
1217 
1218 	for (i = 0; i < read_size; i++) {
1219 		const struct nfp_xstat *xstat;
1220 
1221 		/* Make sure index is valid for device. */
1222 		xstat = nfp_net_xstats_info(dev, ids[i]);
1223 		if (xstat == NULL)
1224 			return -EINVAL;
1225 
1226 		values[i] = nfp_net_xstats_value(dev, ids[i], false);
1227 	}
1228 
1229 	return read_size;
1230 }
1231 
1232 int
1233 nfp_net_xstats_reset(struct rte_eth_dev *dev)
1234 {
1235 	uint32_t id;
1236 	uint32_t read_size;
1237 	struct nfp_net_hw *hw;
1238 	struct rte_eth_xstat *xstats_base;
1239 	struct nfp_flower_representor *repr;
1240 
1241 	read_size = nfp_net_xstats_size(dev);
1242 
1243 	if (rte_eth_dev_is_repr(dev)) {
1244 		repr = dev->data->dev_private;
1245 		xstats_base = repr->repr_xstats_base;
1246 	} else {
1247 		hw = dev->data->dev_private;
1248 		xstats_base = hw->eth_xstats_base;
1249 	}
1250 
1251 	for (id = 0; id < read_size; id++) {
1252 		xstats_base[id].id = id;
1253 		xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
1254 	}
1255 
1256 	/* Successfully reset xstats, now call function to reset basic stats. */
1257 	if (rte_eth_dev_is_repr(dev))
1258 		return nfp_flower_repr_stats_reset(dev);
1259 	else
1260 		return nfp_net_stats_reset(dev);
1261 }
1262 
1263 void
1264 nfp_net_rx_desc_limits(struct nfp_net_hw_priv *hw_priv,
1265 		uint16_t *min_rx_desc,
1266 		uint16_t *max_rx_desc)
1267 {
1268 	*max_rx_desc = hw_priv->dev_info->max_qc_size;
1269 	*min_rx_desc = hw_priv->dev_info->min_qc_size;
1270 }
1271 
1272 void
1273 nfp_net_tx_desc_limits(struct nfp_net_hw_priv *hw_priv,
1274 		uint16_t *min_tx_desc,
1275 		uint16_t *max_tx_desc)
1276 {
1277 	uint16_t tx_dpp;
1278 
1279 	if (hw_priv->pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
1280 		tx_dpp = NFD3_TX_DESC_PER_PKT;
1281 	else
1282 		tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT;
1283 
1284 	*max_tx_desc = hw_priv->dev_info->max_qc_size / tx_dpp;
1285 	*min_tx_desc = hw_priv->dev_info->min_qc_size / tx_dpp;
1286 }
1287 
1288 int
1289 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1290 {
1291 	uint32_t cap;
1292 	uint32_t cap_extend;
1293 	uint16_t min_rx_desc;
1294 	uint16_t max_rx_desc;
1295 	uint16_t min_tx_desc;
1296 	uint16_t max_tx_desc;
1297 	struct nfp_net_hw *hw;
1298 	struct nfp_net_hw_priv *hw_priv;
1299 
1300 	hw = nfp_net_get_hw(dev);
1301 	hw_priv = dev->process_private;
1302 	if (hw_priv == NULL)
1303 		return -EINVAL;
1304 
1305 	nfp_net_rx_desc_limits(hw_priv, &min_rx_desc, &max_rx_desc);
1306 	nfp_net_tx_desc_limits(hw_priv, &min_tx_desc, &max_tx_desc);
1307 
1308 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1309 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1310 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1311 	/*
1312 	 * The maximum rx packet length is set to the maximum layer 3 MTU,
1313 	 * plus layer 2, CRC and VLAN headers.
1314 	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
1315 	 * which was set by the firmware loaded onto the card.
1316 	 */
1317 	dev_info->max_rx_pktlen = hw->max_mtu + NFP_ETH_OVERHEAD;
1318 	dev_info->max_mtu = hw->max_mtu;
1319 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1320 	/* Next should change when PF support is implemented */
1321 	dev_info->max_mac_addrs = 1;
1322 
1323 	cap = hw->super.cap;
1324 
1325 	if ((cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
1326 		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1327 
1328 	if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
1329 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
1330 
1331 	if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
1332 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1333 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1334 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
1335 
1336 	if ((cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
1337 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1338 
1339 	if ((cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
1340 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1341 				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1342 				RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1343 
1344 	if ((cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
1345 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1346 		if ((cap & NFP_NET_CFG_CTRL_USO) != 0)
1347 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_TSO;
1348 		if ((cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
1349 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
1350 	}
1351 
1352 	if ((cap & NFP_NET_CFG_CTRL_GATHER) != 0)
1353 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1354 
1355 	cap_extend = hw->super.cap_ext;
1356 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) {
1357 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1358 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1359 	}
1360 
1361 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1362 		.rx_thresh = {
1363 			.pthresh = DEFAULT_RX_PTHRESH,
1364 			.hthresh = DEFAULT_RX_HTHRESH,
1365 			.wthresh = DEFAULT_RX_WTHRESH,
1366 		},
1367 		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1368 		.rx_drop_en = 0,
1369 	};
1370 
1371 	dev_info->default_txconf = (struct rte_eth_txconf) {
1372 		.tx_thresh = {
1373 			.pthresh = DEFAULT_TX_PTHRESH,
1374 			.hthresh = DEFAULT_TX_HTHRESH,
1375 			.wthresh = DEFAULT_TX_WTHRESH,
1376 		},
1377 		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1378 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1379 	};
1380 
1381 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1382 		.nb_max = max_rx_desc,
1383 		.nb_min = min_rx_desc,
1384 		.nb_align = NFP_ALIGN_RING_DESC,
1385 	};
1386 
1387 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1388 		.nb_max = max_tx_desc,
1389 		.nb_min = min_tx_desc,
1390 		.nb_align = NFP_ALIGN_RING_DESC,
1391 		.nb_seg_max = NFP_TX_MAX_SEG,
1392 		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1393 	};
1394 
1395 	if ((cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
1396 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1397 		dev_info->flow_type_rss_offloads = NFP_NET_RSS_CAP;
1398 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1399 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1400 	}
1401 
1402 	/* Only PF supports getting speed capability. */
1403 	if (hw_priv->is_pf)
1404 		dev_info->speed_capa = hw_priv->pf_dev->speed_capa;
1405 
1406 	return 0;
1407 }
1408 
1409 int
1410 nfp_net_common_init(struct nfp_pf_dev *pf_dev,
1411 		struct nfp_net_hw *hw)
1412 {
1413 	const int stride = 4;
1414 	struct rte_pci_device *pci_dev;
1415 
1416 	pci_dev = pf_dev->pci_dev;
1417 	hw->device_id = pci_dev->id.device_id;
1418 	hw->vendor_id = pci_dev->id.vendor_id;
1419 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1420 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1421 
1422 	hw->max_rx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_RXRINGS);
1423 	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
1424 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
1425 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
1426 				"pairs for use.", pci_dev->name);
1427 		return -ENODEV;
1428 	}
1429 
1430 	if (nfp_net_check_dma_mask(pf_dev, pci_dev->name) != 0)
1431 		return -ENODEV;
1432 
1433 	/* Get some of the read-only fields from the config BAR */
1434 	hw->super.cap = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP);
1435 	hw->super.cap_ext = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP_WORD1);
1436 	hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
1437 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
1438 
1439 	nfp_net_meta_init_format(hw, pf_dev);
1440 
1441 	/* Read the Rx offset configured from firmware */
1442 	if (pf_dev->ver.major < 2)
1443 		hw->rx_offset = NFP_NET_RX_OFFSET;
1444 	else
1445 		hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET);
1446 
1447 	hw->super.ctrl = 0;
1448 	hw->stride_rx = stride;
1449 	hw->stride_tx = stride;
1450 
1451 	return 0;
1452 }
1453 
1454 const uint32_t *
1455 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1456 {
1457 	struct nfp_net_hw *net_hw;
1458 	static const uint32_t ptypes[] = {
1459 		RTE_PTYPE_L2_ETHER,
1460 		RTE_PTYPE_L3_IPV4,
1461 		RTE_PTYPE_L3_IPV4_EXT,
1462 		RTE_PTYPE_L3_IPV6,
1463 		RTE_PTYPE_L3_IPV6_EXT,
1464 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1465 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1466 		RTE_PTYPE_L4_TCP,
1467 		RTE_PTYPE_L4_UDP,
1468 		RTE_PTYPE_L4_FRAG,
1469 		RTE_PTYPE_L4_NONFRAG,
1470 		RTE_PTYPE_L4_ICMP,
1471 		RTE_PTYPE_L4_SCTP,
1472 		RTE_PTYPE_TUNNEL_VXLAN,
1473 		RTE_PTYPE_TUNNEL_NVGRE,
1474 		RTE_PTYPE_TUNNEL_GENEVE,
1475 		RTE_PTYPE_INNER_L2_ETHER,
1476 		RTE_PTYPE_INNER_L3_IPV4,
1477 		RTE_PTYPE_INNER_L3_IPV4_EXT,
1478 		RTE_PTYPE_INNER_L3_IPV6,
1479 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1480 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1481 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1482 		RTE_PTYPE_INNER_L4_TCP,
1483 		RTE_PTYPE_INNER_L4_UDP,
1484 		RTE_PTYPE_INNER_L4_FRAG,
1485 		RTE_PTYPE_INNER_L4_NONFRAG,
1486 		RTE_PTYPE_INNER_L4_ICMP,
1487 		RTE_PTYPE_INNER_L4_SCTP,
1488 	};
1489 
1490 	if (dev->rx_pkt_burst == NULL)
1491 		return NULL;
1492 
1493 	net_hw = dev->data->dev_private;
1494 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1495 		return NULL;
1496 
1497 	*no_of_elements = RTE_DIM(ptypes);
1498 	return ptypes;
1499 }
1500 
1501 int
1502 nfp_net_ptypes_set(struct rte_eth_dev *dev,
1503 		uint32_t ptype_mask)
1504 {
1505 	int ret;
1506 	uint32_t update;
1507 	uint32_t ctrl_ext;
1508 	struct nfp_hw *hw;
1509 	struct nfp_net_hw *net_hw;
1510 
1511 	net_hw = dev->data->dev_private;
1512 	hw = &net_hw->super;
1513 
1514 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1515 		return -ENOTSUP;
1516 
1517 	ctrl_ext = hw->ctrl_ext;
1518 	if (ptype_mask == 0) {
1519 		if ((ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1520 			return 0;
1521 
1522 		ctrl_ext &= ~NFP_NET_CFG_CTRL_PKT_TYPE;
1523 	} else {
1524 		if ((ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
1525 			return 0;
1526 
1527 		ctrl_ext |= NFP_NET_CFG_CTRL_PKT_TYPE;
1528 	}
1529 
1530 	update = NFP_NET_CFG_UPDATE_GEN;
1531 
1532 	ret = nfp_ext_reconfig(hw, ctrl_ext, update);
1533 	if (ret != 0)
1534 		return ret;
1535 
1536 	hw->ctrl_ext = ctrl_ext;
1537 
1538 	return 0;
1539 }
1540 
1541 int
1542 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
1543 		uint16_t queue_id)
1544 {
1545 	uint16_t base = 0;
1546 	struct nfp_net_hw *hw;
1547 	struct rte_pci_device *pci_dev;
1548 
1549 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1550 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1551 		base = 1;
1552 
1553 	/* Make sure all updates are written before un-masking */
1554 	rte_wmb();
1555 
1556 	hw = nfp_net_get_hw(dev);
1557 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id),
1558 			NFP_NET_CFG_ICR_UNMASKED);
1559 	return 0;
1560 }
1561 
1562 int
1563 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
1564 		uint16_t queue_id)
1565 {
1566 	uint16_t base = 0;
1567 	struct nfp_net_hw *hw;
1568 	struct rte_pci_device *pci_dev;
1569 
1570 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1571 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1572 		base = 1;
1573 
1574 	/* Make sure all updates are written before un-masking */
1575 	rte_wmb();
1576 
1577 	hw = nfp_net_get_hw(dev);
1578 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
1579 
1580 	return 0;
1581 }
1582 
1583 static void
1584 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1585 {
1586 	struct rte_eth_link link;
1587 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1588 
1589 	rte_eth_linkstatus_get(dev, &link);
1590 	if (link.link_status != 0)
1591 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s.",
1592 				dev->data->port_id, link.link_speed,
1593 				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1594 				"full-duplex" : "half-duplex");
1595 	else
1596 		PMD_DRV_LOG(INFO, " Port %d: Link Down.", dev->data->port_id);
1597 
1598 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1599 			pci_dev->addr.domain, pci_dev->addr.bus,
1600 			pci_dev->addr.devid, pci_dev->addr.function);
1601 }
1602 
1603 /*
1604  * Unmask an interrupt
1605  *
1606  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1607  * clear the ICR for the entry.
1608  */
1609 void
1610 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1611 {
1612 	struct nfp_net_hw *hw;
1613 	struct rte_pci_device *pci_dev;
1614 
1615 	hw = nfp_net_get_hw(dev);
1616 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1617 
1618 	/* Make sure all updates are written before un-masking */
1619 	rte_wmb();
1620 
1621 	if ((hw->super.ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
1622 		/* If MSI-X auto-masking is used, clear the entry */
1623 		rte_intr_ack(pci_dev->intr_handle);
1624 	} else {
1625 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1626 				NFP_NET_CFG_ICR_UNMASKED);
1627 	}
1628 }
1629 
1630 /**
1631  * Interrupt handler which shall be registered for alarm callback for delayed
1632  * handling specific interrupt to wait for the stable nic state. As the NIC
1633  * interrupt state is not stable for nfp after link is just down, it needs
1634  * to wait 4 seconds to get the stable status.
1635  *
1636  * @param param
1637  *   The address of parameter (struct rte_eth_dev *)
1638  */
1639 void
1640 nfp_net_dev_interrupt_delayed_handler(void *param)
1641 {
1642 	struct rte_eth_dev *dev = param;
1643 
1644 	nfp_net_link_update(dev, 0);
1645 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1646 
1647 	nfp_net_dev_link_status_print(dev);
1648 
1649 	/* Unmasking */
1650 	nfp_net_irq_unmask(dev);
1651 }
1652 
1653 void
1654 nfp_net_dev_interrupt_handler(void *param)
1655 {
1656 	int64_t timeout;
1657 	struct rte_eth_link link;
1658 	struct rte_eth_dev *dev = param;
1659 
1660 	PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1661 
1662 	rte_eth_linkstatus_get(dev, &link);
1663 
1664 	nfp_net_link_update(dev, 0);
1665 
1666 	/* Likely to up */
1667 	if (link.link_status == 0) {
1668 		/* Handle it 1 sec later, wait it being stable */
1669 		timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1670 	} else {  /* Likely to down */
1671 		/* Handle it 4 sec later, wait it being stable */
1672 		timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1673 	}
1674 
1675 	if (rte_eal_alarm_set(timeout * 1000,
1676 			nfp_net_dev_interrupt_delayed_handler,
1677 			(void *)dev) != 0) {
1678 		PMD_INIT_LOG(ERR, "Error setting alarm.");
1679 		/* Unmasking */
1680 		nfp_net_irq_unmask(dev);
1681 	}
1682 }
1683 
1684 int
1685 nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
1686 		uint16_t mtu)
1687 {
1688 	struct nfp_net_hw *hw;
1689 
1690 	hw = nfp_net_get_hw(dev);
1691 
1692 	/* MTU setting is forbidden if port is started */
1693 	if (dev->data->dev_started) {
1694 		PMD_DRV_LOG(ERR, "Port %d must be stopped before configuration.",
1695 				dev->data->port_id);
1696 		return -EBUSY;
1697 	}
1698 
1699 	/* MTU larger than current mbufsize not supported */
1700 	if (mtu > hw->flbufsz) {
1701 		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported.",
1702 				mtu, hw->flbufsz);
1703 		return -ERANGE;
1704 	}
1705 
1706 	/* Writing to configuration space */
1707 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, mtu);
1708 
1709 	hw->mtu = mtu;
1710 
1711 	return 0;
1712 }
1713 
1714 int
1715 nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
1716 		int mask)
1717 {
1718 	int ret;
1719 	uint32_t update;
1720 	uint32_t new_ctrl;
1721 	struct nfp_hw *hw;
1722 	uint64_t rx_offload;
1723 	struct nfp_net_hw *net_hw;
1724 	uint32_t rxvlan_ctrl = 0;
1725 
1726 	net_hw = nfp_net_get_hw(dev);
1727 	hw = &net_hw->super;
1728 	rx_offload = dev->data->dev_conf.rxmode.offloads;
1729 	new_ctrl = hw->ctrl;
1730 
1731 	/* VLAN stripping setting */
1732 	if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
1733 		nfp_net_enable_rxvlan_cap(net_hw, &rxvlan_ctrl);
1734 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
1735 			new_ctrl |= rxvlan_ctrl;
1736 		else
1737 			new_ctrl &= ~rxvlan_ctrl;
1738 	}
1739 
1740 	/* QinQ stripping setting */
1741 	if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
1742 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
1743 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1744 		else
1745 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1746 	}
1747 
1748 	if (new_ctrl == hw->ctrl)
1749 		return 0;
1750 
1751 	update = NFP_NET_CFG_UPDATE_GEN;
1752 
1753 	ret = nfp_reconfig(hw, new_ctrl, update);
1754 	if (ret != 0)
1755 		return ret;
1756 
1757 	hw->ctrl = new_ctrl;
1758 
1759 	return 0;
1760 }
1761 
1762 static int
1763 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1764 		struct rte_eth_rss_reta_entry64 *reta_conf,
1765 		uint16_t reta_size)
1766 {
1767 	uint16_t i;
1768 	uint16_t j;
1769 	uint16_t idx;
1770 	uint8_t mask;
1771 	uint32_t reta;
1772 	uint16_t shift;
1773 	struct nfp_hw *hw;
1774 	struct nfp_net_hw *net_hw;
1775 
1776 	net_hw = nfp_net_get_hw(dev);
1777 	hw = &net_hw->super;
1778 
1779 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1780 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
1781 				" does not match hardware can supported (%d).",
1782 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1783 		return -EINVAL;
1784 	}
1785 
1786 	/*
1787 	 * Update Redirection Table. There are 128 8bit-entries which can be
1788 	 * manage as 32 32bit-entries.
1789 	 */
1790 	for (i = 0; i < reta_size; i += 4) {
1791 		/* Handling 4 RSS entries per loop */
1792 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1793 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1794 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1795 		if (mask == 0)
1796 			continue;
1797 
1798 		reta = 0;
1799 
1800 		/* If all 4 entries were set, don't need read RETA register */
1801 		if (mask != 0xF)
1802 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1803 
1804 		for (j = 0; j < 4; j++) {
1805 			if ((mask & (0x1 << j)) == 0)
1806 				continue;
1807 
1808 			/* Clearing the entry bits */
1809 			if (mask != 0xF)
1810 				reta &= ~(0xFF << (8 * j));
1811 
1812 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1813 		}
1814 
1815 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
1816 	}
1817 
1818 	return 0;
1819 }
1820 
1821 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1822 int
1823 nfp_net_reta_update(struct rte_eth_dev *dev,
1824 		struct rte_eth_rss_reta_entry64 *reta_conf,
1825 		uint16_t reta_size)
1826 {
1827 	int ret;
1828 	uint32_t update;
1829 	struct nfp_hw *hw;
1830 	struct nfp_net_hw *net_hw;
1831 
1832 	net_hw = nfp_net_get_hw(dev);
1833 	hw = &net_hw->super;
1834 
1835 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1836 		return -EINVAL;
1837 
1838 	ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1839 	if (ret != 0)
1840 		return ret;
1841 
1842 	update = NFP_NET_CFG_UPDATE_RSS;
1843 
1844 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1845 		return -EIO;
1846 
1847 	return 0;
1848 }
1849 
1850 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1851 int
1852 nfp_net_reta_query(struct rte_eth_dev *dev,
1853 		struct rte_eth_rss_reta_entry64 *reta_conf,
1854 		uint16_t reta_size)
1855 {
1856 	uint16_t i;
1857 	uint16_t j;
1858 	uint16_t idx;
1859 	uint8_t mask;
1860 	uint32_t reta;
1861 	uint16_t shift;
1862 	struct nfp_hw *hw;
1863 	struct nfp_net_hw *net_hw;
1864 
1865 	net_hw = nfp_net_get_hw(dev);
1866 	hw = &net_hw->super;
1867 
1868 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1869 		return -EINVAL;
1870 
1871 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1872 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)"
1873 				" does not match hardware can supported (%d).",
1874 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1875 		return -EINVAL;
1876 	}
1877 
1878 	/*
1879 	 * Reading Redirection Table. There are 128 8bit-entries which can be
1880 	 * manage as 32 32bit-entries.
1881 	 */
1882 	for (i = 0; i < reta_size; i += 4) {
1883 		/* Handling 4 RSS entries per loop */
1884 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1885 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1886 		mask = (reta_conf[idx].mask >> shift) & 0xF;
1887 
1888 		if (mask == 0)
1889 			continue;
1890 
1891 		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
1892 		for (j = 0; j < 4; j++) {
1893 			if ((mask & (0x1 << j)) == 0)
1894 				continue;
1895 
1896 			reta_conf[idx].reta[shift + j] =
1897 					(uint8_t)((reta >> (8 * j)) & 0xF);
1898 		}
1899 	}
1900 
1901 	return 0;
1902 }
1903 
1904 static int
1905 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1906 		struct rte_eth_rss_conf *rss_conf)
1907 {
1908 	uint8_t i;
1909 	uint8_t key;
1910 	uint64_t rss_hf;
1911 	struct nfp_hw *hw;
1912 	struct nfp_net_hw *net_hw;
1913 	uint32_t cfg_rss_ctrl = 0;
1914 
1915 	net_hw = nfp_net_get_hw(dev);
1916 	hw = &net_hw->super;
1917 
1918 	/* Writing the key byte by byte */
1919 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1920 		memcpy(&key, &rss_conf->rss_key[i], 1);
1921 		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1922 	}
1923 
1924 	rss_hf = rss_conf->rss_hf;
1925 
1926 	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
1927 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1928 
1929 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1930 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1931 
1932 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
1933 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1934 
1935 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0)
1936 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP;
1937 
1938 	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
1939 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1940 
1941 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
1942 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1943 
1944 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
1945 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1946 
1947 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0)
1948 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP;
1949 
1950 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1951 
1952 	if (rte_eth_dev_is_repr(dev))
1953 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_CRC32;
1954 	else
1955 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1956 
1957 	/* Configuring where to apply the RSS hash */
1958 	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1959 
1960 	/* Writing the key size */
1961 	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1962 
1963 	return 0;
1964 }
1965 
1966 int
1967 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1968 		struct rte_eth_rss_conf *rss_conf)
1969 {
1970 	uint32_t update;
1971 	uint64_t rss_hf;
1972 	struct nfp_hw *hw;
1973 	struct nfp_net_hw *net_hw;
1974 
1975 	net_hw = nfp_net_get_hw(dev);
1976 	hw = &net_hw->super;
1977 
1978 	rss_hf = rss_conf->rss_hf;
1979 
1980 	/* Checking if RSS is enabled */
1981 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
1982 		if (rss_hf != 0) {
1983 			PMD_DRV_LOG(ERR, "RSS unsupported.");
1984 			return -EINVAL;
1985 		}
1986 
1987 		return 0; /* Nothing to do */
1988 	}
1989 
1990 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1991 		PMD_DRV_LOG(ERR, "RSS hash key too long.");
1992 		return -EINVAL;
1993 	}
1994 
1995 	nfp_net_rss_hash_write(dev, rss_conf);
1996 
1997 	update = NFP_NET_CFG_UPDATE_RSS;
1998 
1999 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
2000 		return -EIO;
2001 
2002 	return 0;
2003 }
2004 
2005 int
2006 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2007 		struct rte_eth_rss_conf *rss_conf)
2008 {
2009 	uint8_t i;
2010 	uint8_t key;
2011 	uint64_t rss_hf;
2012 	struct nfp_hw *hw;
2013 	uint32_t cfg_rss_ctrl;
2014 	struct nfp_net_hw *net_hw;
2015 
2016 	net_hw = nfp_net_get_hw(dev);
2017 	hw = &net_hw->super;
2018 
2019 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
2020 		return -EINVAL;
2021 
2022 	rss_hf = rss_conf->rss_hf;
2023 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2024 
2025 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
2026 		rss_hf |= RTE_ETH_RSS_IPV4;
2027 
2028 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0)
2029 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2030 
2031 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0)
2032 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
2033 
2034 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0)
2035 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2036 
2037 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0)
2038 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
2039 
2040 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0)
2041 		rss_hf |= RTE_ETH_RSS_IPV6;
2042 
2043 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0)
2044 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP;
2045 
2046 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0)
2047 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
2048 
2049 	/* Propagate current RSS hash functions to caller */
2050 	rss_conf->rss_hf = rss_hf;
2051 
2052 	/* Reading the key size */
2053 	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2054 
2055 	/* Reading the key byte a byte */
2056 	for (i = 0; i < rss_conf->rss_key_len; i++) {
2057 		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2058 		memcpy(&rss_conf->rss_key[i], &key, 1);
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 int
2065 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2066 {
2067 	int ret;
2068 	uint8_t i;
2069 	uint8_t j;
2070 	uint16_t queue = 0;
2071 	struct rte_eth_conf *dev_conf;
2072 	struct rte_eth_rss_conf rss_conf;
2073 	uint16_t rx_queues = dev->data->nb_rx_queues;
2074 	struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2075 
2076 	nfp_reta_conf[0].mask = ~0x0;
2077 	nfp_reta_conf[1].mask = ~0x0;
2078 
2079 	for (i = 0; i < 0x40; i += 8) {
2080 		for (j = i; j < (i + 8); j++) {
2081 			nfp_reta_conf[0].reta[j] = queue;
2082 			nfp_reta_conf[1].reta[j] = queue++;
2083 			queue %= rx_queues;
2084 		}
2085 	}
2086 
2087 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2088 	if (ret != 0)
2089 		return ret;
2090 
2091 	dev_conf = &dev->data->dev_conf;
2092 	if (dev_conf == NULL) {
2093 		PMD_DRV_LOG(ERR, "Wrong rss conf.");
2094 		return -EINVAL;
2095 	}
2096 
2097 	rss_conf = dev_conf->rx_adv_conf.rss_conf;
2098 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
2099 
2100 	return ret;
2101 }
2102 
2103 void
2104 nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
2105 {
2106 	uint16_t i;
2107 	struct nfp_net_rxq *this_rx_q;
2108 
2109 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2110 		this_rx_q = dev->data->rx_queues[i];
2111 		nfp_net_reset_rx_queue(this_rx_q);
2112 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2113 	}
2114 }
2115 
2116 void
2117 nfp_net_close_rx_queue(struct rte_eth_dev *dev)
2118 {
2119 	uint16_t i;
2120 	struct nfp_net_rxq *this_rx_q;
2121 
2122 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2123 		this_rx_q = dev->data->rx_queues[i];
2124 		nfp_net_reset_rx_queue(this_rx_q);
2125 		nfp_net_rx_queue_release(dev, i);
2126 	}
2127 }
2128 
2129 void
2130 nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
2131 {
2132 	uint16_t i;
2133 	struct nfp_net_txq *this_tx_q;
2134 
2135 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2136 		this_tx_q = dev->data->tx_queues[i];
2137 		nfp_net_reset_tx_queue(this_tx_q);
2138 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2139 	}
2140 }
2141 
2142 void
2143 nfp_net_close_tx_queue(struct rte_eth_dev *dev)
2144 {
2145 	uint16_t i;
2146 	struct nfp_net_txq *this_tx_q;
2147 
2148 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2149 		this_tx_q = dev->data->tx_queues[i];
2150 		nfp_net_reset_tx_queue(this_tx_q);
2151 		nfp_net_tx_queue_release(dev, i);
2152 	}
2153 }
2154 
2155 int
2156 nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw,
2157 		size_t idx,
2158 		uint16_t port,
2159 		uint32_t ctrl)
2160 {
2161 	uint32_t i;
2162 	struct nfp_hw *hw = &net_hw->super;
2163 
2164 	if (idx >= NFP_NET_N_VXLAN_PORTS) {
2165 		PMD_DRV_LOG(ERR, "The idx value is out of range.");
2166 		return -ERANGE;
2167 	}
2168 
2169 	net_hw->vxlan_ports[idx] = port;
2170 
2171 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2172 		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2173 				(net_hw->vxlan_ports[i + 1] << 16) | net_hw->vxlan_ports[i]);
2174 	}
2175 
2176 	return nfp_reconfig(hw, ctrl, NFP_NET_CFG_UPDATE_VXLAN);
2177 }
2178 
2179 /*
2180  * The firmware with NFD3 can not handle DMA address requiring more
2181  * than 40 bits.
2182  */
2183 int
2184 nfp_net_check_dma_mask(struct nfp_pf_dev *pf_dev,
2185 		char *name)
2186 {
2187 	if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
2188 			rte_mem_check_dma_mask(40) != 0) {
2189 		PMD_DRV_LOG(ERR, "Device %s can not be used: restricted dma mask to 40 bits!",
2190 				name);
2191 		return -ENODEV;
2192 	}
2193 
2194 	return 0;
2195 }
2196 
2197 int
2198 nfp_net_txrwb_alloc(struct rte_eth_dev *eth_dev)
2199 {
2200 	struct nfp_net_hw *net_hw;
2201 	char mz_name[RTE_MEMZONE_NAMESIZE];
2202 
2203 	net_hw = nfp_net_get_hw(eth_dev);
2204 	snprintf(mz_name, sizeof(mz_name), "%s_TXRWB", eth_dev->data->name);
2205 	net_hw->txrwb_mz = rte_memzone_reserve_aligned(mz_name,
2206 			net_hw->max_tx_queues * sizeof(uint64_t),
2207 			rte_socket_id(),
2208 			RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2209 	if (net_hw->txrwb_mz == NULL) {
2210 		PMD_INIT_LOG(ERR, "Failed to alloc %s for TX ring write back.",
2211 				mz_name);
2212 		return -ENOMEM;
2213 	}
2214 
2215 	return 0;
2216 }
2217 
2218 void
2219 nfp_net_txrwb_free(struct rte_eth_dev *eth_dev)
2220 {
2221 	struct nfp_net_hw *net_hw;
2222 
2223 	net_hw = nfp_net_get_hw(eth_dev);
2224 	if (net_hw->txrwb_mz == NULL)
2225 		return;
2226 
2227 	rte_memzone_free(net_hw->txrwb_mz);
2228 	net_hw->txrwb_mz = NULL;
2229 }
2230 
2231 static void
2232 nfp_net_cfg_read_version(struct nfp_hw *hw,
2233 		struct nfp_pf_dev *pf_dev)
2234 {
2235 	union {
2236 		uint32_t whole;
2237 		struct nfp_net_fw_ver split;
2238 	} version;
2239 
2240 	version.whole = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2241 	pf_dev->ver = version.split;
2242 }
2243 
2244 bool
2245 nfp_net_version_check(struct nfp_hw *hw,
2246 		struct nfp_pf_dev *pf_dev)
2247 {
2248 	nfp_net_cfg_read_version(hw, pf_dev);
2249 	if (!nfp_net_is_valid_nfd_version(pf_dev->ver))
2250 		return false;
2251 
2252 	if (!nfp_net_is_valid_version_class(pf_dev->ver))
2253 		return false;
2254 
2255 	return true;
2256 }
2257 
2258 static void
2259 nfp_net_get_nsp_info(struct nfp_net_hw_priv *hw_priv,
2260 		char *nsp_version)
2261 {
2262 	struct nfp_nsp *nsp;
2263 
2264 	nsp = nfp_nsp_open(hw_priv->pf_dev->cpp);
2265 	if (nsp == NULL)
2266 		return;
2267 
2268 	snprintf(nsp_version, FW_VER_LEN, "%hu.%hu",
2269 			nfp_nsp_get_abi_ver_major(nsp),
2270 			nfp_nsp_get_abi_ver_minor(nsp));
2271 
2272 	nfp_nsp_close(nsp);
2273 }
2274 
2275 void
2276 nfp_net_get_fw_version(struct nfp_cpp *cpp,
2277 		uint32_t *mip_version)
2278 {
2279 	struct nfp_mip *mip;
2280 
2281 	mip = nfp_mip_open(cpp);
2282 	if (mip == NULL) {
2283 		*mip_version = 0;
2284 		return;
2285 	}
2286 
2287 	*mip_version = nfp_mip_fw_version(mip);
2288 
2289 	nfp_mip_close(mip);
2290 }
2291 
2292 static void
2293 nfp_net_get_mip_name(struct nfp_net_hw_priv *hw_priv,
2294 		char *mip_name)
2295 {
2296 	struct nfp_mip *mip;
2297 
2298 	mip = nfp_mip_open(hw_priv->pf_dev->cpp);
2299 	if (mip == NULL)
2300 		return;
2301 
2302 	strlcpy(mip_name, nfp_mip_name(mip), FW_VER_LEN);
2303 
2304 	nfp_mip_close(mip);
2305 }
2306 
2307 static void
2308 nfp_net_get_app_name(struct nfp_net_hw_priv *hw_priv,
2309 		char *app_name)
2310 {
2311 	switch (hw_priv->pf_dev->app_fw_id) {
2312 	case NFP_APP_FW_CORE_NIC:
2313 		strlcpy(app_name, "nic", FW_VER_LEN);
2314 		break;
2315 	case NFP_APP_FW_FLOWER_NIC:
2316 		strlcpy(app_name, "flower", FW_VER_LEN);
2317 		break;
2318 	default:
2319 		strlcpy(app_name, "unknown", FW_VER_LEN);
2320 		break;
2321 	}
2322 }
2323 
2324 int
2325 nfp_net_firmware_version_get(struct rte_eth_dev *dev,
2326 		char *fw_version,
2327 		size_t fw_size)
2328 {
2329 	struct nfp_net_hw *hw;
2330 	struct nfp_pf_dev *pf_dev;
2331 	struct nfp_net_hw_priv *hw_priv;
2332 	char app_name[FW_VER_LEN] = {0};
2333 	char mip_name[FW_VER_LEN] = {0};
2334 	char nsp_version[FW_VER_LEN] = {0};
2335 	char vnic_version[FW_VER_LEN] = {0};
2336 
2337 	if (fw_size < FW_VER_LEN)
2338 		return FW_VER_LEN;
2339 
2340 	hw = nfp_net_get_hw(dev);
2341 	hw_priv = dev->process_private;
2342 	pf_dev = hw_priv->pf_dev;
2343 
2344 	if (hw->fw_version[0] != 0) {
2345 		snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2346 		return 0;
2347 	}
2348 
2349 	if (!rte_eth_dev_is_repr(dev)) {
2350 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
2351 			pf_dev->ver.extend, pf_dev->ver.class,
2352 			pf_dev->ver.major, pf_dev->ver.minor);
2353 	} else {
2354 		snprintf(vnic_version, FW_VER_LEN, "*");
2355 	}
2356 
2357 	nfp_net_get_nsp_info(hw_priv, nsp_version);
2358 	nfp_net_get_mip_name(hw_priv, mip_name);
2359 	nfp_net_get_app_name(hw_priv, app_name);
2360 
2361 	if (nsp_version[0] == 0 || mip_name[0] == 0) {
2362 		snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
2363 			vnic_version, nsp_version, mip_name, app_name);
2364 		return 0;
2365 	}
2366 
2367 	snprintf(hw->fw_version, FW_VER_LEN, "%s %s %s %s",
2368 			vnic_version, nsp_version, mip_name, app_name);
2369 
2370 	snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version);
2371 
2372 	return 0;
2373 }
2374 
2375 bool
2376 nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
2377 {
2378 	uint8_t nfd_version = version.extend;
2379 
2380 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFD3)
2381 		return true;
2382 
2383 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) {
2384 		if (version.major < 5) {
2385 			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d.",
2386 					version.major);
2387 			return false;
2388 		}
2389 
2390 		return true;
2391 	}
2392 
2393 	return false;
2394 }
2395 
2396 bool
2397 nfp_net_is_valid_version_class(struct nfp_net_fw_ver version)
2398 {
2399 	switch (version.class) {
2400 	case NFP_NET_CFG_VERSION_CLASS_GENERIC:
2401 		return true;
2402 	case NFP_NET_CFG_VERSION_CLASS_NO_EMEM:
2403 		return true;
2404 	default:
2405 		return false;
2406 	}
2407 }
2408 
2409 void
2410 nfp_net_ctrl_bar_size_set(struct nfp_pf_dev *pf_dev)
2411 {
2412 	if (pf_dev->ver.class == NFP_NET_CFG_VERSION_CLASS_GENERIC)
2413 		pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_32K;
2414 	else
2415 		pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_8K;
2416 }
2417 
2418 /* Disable rx and tx functions to allow for reconfiguring. */
2419 int
2420 nfp_net_stop(struct rte_eth_dev *dev)
2421 {
2422 	int ret;
2423 	struct nfp_net_hw *hw;
2424 	struct nfp_net_hw_priv *hw_priv;
2425 
2426 	hw = nfp_net_get_hw(dev);
2427 	hw_priv = dev->process_private;
2428 
2429 	nfp_net_disable_queues(dev);
2430 
2431 	/* Clear queues */
2432 	nfp_net_stop_tx_queue(dev);
2433 	nfp_net_stop_rx_queue(dev);
2434 
2435 	ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0);
2436 	if (ret < 0)
2437 		return ret;
2438 
2439 	return 0;
2440 }
2441 
2442 static enum rte_eth_fc_mode
2443 nfp_net_get_pause_mode(struct nfp_eth_table_port *eth_port)
2444 {
2445 	enum rte_eth_fc_mode mode;
2446 
2447 	if (eth_port->rx_pause_enabled) {
2448 		if (eth_port->tx_pause_enabled)
2449 			mode = RTE_ETH_FC_FULL;
2450 		else
2451 			mode = RTE_ETH_FC_RX_PAUSE;
2452 	} else {
2453 		if (eth_port->tx_pause_enabled)
2454 			mode = RTE_ETH_FC_TX_PAUSE;
2455 		else
2456 			mode = RTE_ETH_FC_NONE;
2457 	}
2458 
2459 	return mode;
2460 }
2461 
2462 int
2463 nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
2464 		struct rte_eth_fc_conf *fc_conf)
2465 {
2466 	struct nfp_net_hw_priv *hw_priv;
2467 	struct nfp_eth_table *nfp_eth_table;
2468 	struct nfp_eth_table_port *eth_port;
2469 
2470 	hw_priv = dev->process_private;
2471 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2472 		return -EINVAL;
2473 
2474 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2475 	eth_port = &nfp_eth_table->ports[dev->data->port_id];
2476 
2477 	/* Currently only RX/TX switch are supported */
2478 	fc_conf->mode = nfp_net_get_pause_mode(eth_port);
2479 
2480 	return 0;
2481 }
2482 
2483 static int
2484 nfp_net_pause_frame_set(struct nfp_net_hw_priv *hw_priv,
2485 		struct nfp_eth_table_port *eth_port,
2486 		enum rte_eth_fc_mode mode)
2487 {
2488 	int err;
2489 	bool flag;
2490 	struct nfp_nsp *nsp;
2491 
2492 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
2493 	if (nsp == NULL) {
2494 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
2495 		return -EIO;
2496 	}
2497 
2498 	flag = (mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2499 	err = nfp_eth_set_tx_pause(nsp, flag);
2500 	if (err != 0) {
2501 		PMD_DRV_LOG(ERR, "Failed to configure TX pause frame.");
2502 		nfp_eth_config_cleanup_end(nsp);
2503 		return err;
2504 	}
2505 
2506 	flag = (mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2507 	err = nfp_eth_set_rx_pause(nsp, flag);
2508 	if (err != 0) {
2509 		PMD_DRV_LOG(ERR, "Failed to configure RX pause frame.");
2510 		nfp_eth_config_cleanup_end(nsp);
2511 		return err;
2512 	}
2513 
2514 	err = nfp_eth_config_commit_end(nsp);
2515 	if (err < 0) {
2516 		PMD_DRV_LOG(ERR, "Failed to configure pause frame.");
2517 		return err;
2518 	}
2519 
2520 	return 0;
2521 }
2522 
2523 int
2524 nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
2525 		struct rte_eth_fc_conf *fc_conf)
2526 {
2527 	int ret;
2528 	uint8_t idx;
2529 	enum rte_eth_fc_mode set_mode;
2530 	struct nfp_net_hw_priv *hw_priv;
2531 	enum rte_eth_fc_mode original_mode;
2532 	struct nfp_eth_table *nfp_eth_table;
2533 	struct nfp_eth_table_port *eth_port;
2534 
2535 	idx = nfp_net_get_idx(dev);
2536 	hw_priv = dev->process_private;
2537 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2538 		return -EINVAL;
2539 
2540 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2541 	eth_port = &nfp_eth_table->ports[idx];
2542 
2543 	original_mode = nfp_net_get_pause_mode(eth_port);
2544 	set_mode = fc_conf->mode;
2545 
2546 	if (set_mode == original_mode)
2547 		return 0;
2548 
2549 	ret = nfp_net_pause_frame_set(hw_priv, eth_port, set_mode);
2550 	if (ret != 0)
2551 		return ret;
2552 
2553 	/* Update eth_table after modifying RX/TX pause frame mode. */
2554 	eth_port->tx_pause_enabled = (set_mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2555 	eth_port->rx_pause_enabled = (set_mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2556 
2557 	return 0;
2558 }
2559 
2560 int
2561 nfp_net_fec_get_capability(struct rte_eth_dev *dev,
2562 		struct rte_eth_fec_capa *speed_fec_capa,
2563 		__rte_unused unsigned int num)
2564 {
2565 	uint8_t idx;
2566 	uint16_t speed;
2567 	uint32_t supported_fec;
2568 	struct nfp_net_hw_priv *hw_priv;
2569 	struct nfp_eth_table *nfp_eth_table;
2570 	struct nfp_eth_table_port *eth_port;
2571 
2572 	idx = nfp_net_get_idx(dev);
2573 	hw_priv = dev->process_private;
2574 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2575 		return -EINVAL;
2576 
2577 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2578 	eth_port = &nfp_eth_table->ports[idx];
2579 
2580 	speed = eth_port->speed;
2581 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2582 	if (speed == 0 || supported_fec == 0) {
2583 		PMD_DRV_LOG(ERR, "FEC modes supported or Speed is invalid.");
2584 		return -EINVAL;
2585 	}
2586 
2587 	if (speed_fec_capa == NULL)
2588 		return NFP_FEC_CAPA_ENTRY_NUM;
2589 
2590 	speed_fec_capa->speed = speed;
2591 
2592 	if ((supported_fec & NFP_FEC_AUTO) != 0)
2593 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2594 	if ((supported_fec & NFP_FEC_BASER) != 0)
2595 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2596 	if ((supported_fec & NFP_FEC_REED_SOLOMON) != 0)
2597 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2598 	if ((supported_fec & NFP_FEC_DISABLED) != 0)
2599 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2600 
2601 	return NFP_FEC_CAPA_ENTRY_NUM;
2602 }
2603 
2604 static uint32_t
2605 nfp_net_fec_nfp_to_rte(enum nfp_eth_fec fec)
2606 {
2607 	switch (fec) {
2608 	case NFP_FEC_AUTO_BIT:
2609 		return RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2610 	case NFP_FEC_BASER_BIT:
2611 		return RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2612 	case NFP_FEC_REED_SOLOMON_BIT:
2613 		return RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2614 	case NFP_FEC_DISABLED_BIT:
2615 		return RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2616 	default:
2617 		PMD_DRV_LOG(ERR, "FEC mode is invalid.");
2618 		return 0;
2619 	}
2620 }
2621 
2622 int
2623 nfp_net_fec_get(struct rte_eth_dev *dev,
2624 		uint32_t *fec_capa)
2625 {
2626 	uint8_t idx;
2627 	struct nfp_net_hw_priv *hw_priv;
2628 	struct nfp_eth_table *nfp_eth_table;
2629 	struct nfp_eth_table_port *eth_port;
2630 
2631 	idx = nfp_net_get_idx(dev);
2632 	hw_priv = dev->process_private;
2633 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2634 		return -EINVAL;
2635 
2636 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN) {
2637 		nfp_eth_table = nfp_eth_read_ports(hw_priv->pf_dev->cpp);
2638 		hw_priv->pf_dev->nfp_eth_table->ports[idx] = nfp_eth_table->ports[idx];
2639 		free(nfp_eth_table);
2640 	}
2641 
2642 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2643 	eth_port = &nfp_eth_table->ports[idx];
2644 
2645 	if (!nfp_eth_can_support_fec(eth_port)) {
2646 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2647 		return -ENOTSUP;
2648 	}
2649 
2650 	/*
2651 	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
2652 	 * configured FEC mode is returned.
2653 	 * If link is up, current FEC mode is returned.
2654 	 */
2655 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN)
2656 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->fec);
2657 	else
2658 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->act_fec);
2659 
2660 	if (*fec_capa == 0)
2661 		return -EINVAL;
2662 
2663 	return 0;
2664 }
2665 
2666 static enum nfp_eth_fec
2667 nfp_net_fec_rte_to_nfp(uint32_t fec)
2668 {
2669 	switch (fec) {
2670 	case RTE_BIT32(RTE_ETH_FEC_AUTO):
2671 		return NFP_FEC_AUTO_BIT;
2672 	case RTE_BIT32(RTE_ETH_FEC_NOFEC):
2673 		return NFP_FEC_DISABLED_BIT;
2674 	case RTE_BIT32(RTE_ETH_FEC_RS):
2675 		return NFP_FEC_REED_SOLOMON_BIT;
2676 	case RTE_BIT32(RTE_ETH_FEC_BASER):
2677 		return NFP_FEC_BASER_BIT;
2678 	default:
2679 		return NFP_FEC_INVALID_BIT;
2680 	}
2681 }
2682 
2683 int
2684 nfp_net_fec_set(struct rte_eth_dev *dev,
2685 		uint32_t fec_capa)
2686 {
2687 	int ret;
2688 	uint8_t idx;
2689 	enum nfp_eth_fec fec;
2690 	uint32_t supported_fec;
2691 	struct nfp_net_hw_priv *hw_priv;
2692 	struct nfp_eth_table *nfp_eth_table;
2693 	struct nfp_eth_table_port *eth_port;
2694 
2695 	idx = nfp_net_get_idx(dev);
2696 	hw_priv = dev->process_private;
2697 	if (hw_priv == NULL || hw_priv->pf_dev == NULL)
2698 		return -EINVAL;
2699 
2700 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
2701 	eth_port = &nfp_eth_table->ports[idx];
2702 
2703 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2704 	if (supported_fec == 0) {
2705 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2706 		return -ENOTSUP;
2707 	}
2708 
2709 	fec = nfp_net_fec_rte_to_nfp(fec_capa);
2710 	if (fec == NFP_FEC_INVALID_BIT) {
2711 		PMD_DRV_LOG(ERR, "FEC modes is invalid.");
2712 		return -EINVAL;
2713 	}
2714 
2715 	if ((RTE_BIT32(fec) & supported_fec) == 0) {
2716 		PMD_DRV_LOG(ERR, "Unsupported FEC mode is set.");
2717 		return -EIO;
2718 	}
2719 
2720 	ret = nfp_eth_set_fec(hw_priv->pf_dev->cpp, eth_port->index, fec);
2721 	if (ret < 0) {
2722 		PMD_DRV_LOG(ERR, "NFP set FEC mode failed.");
2723 		return ret;
2724 	}
2725 
2726 	return 0;
2727 }
2728 
2729 uint32_t
2730 nfp_net_get_phyports_from_nsp(struct nfp_pf_dev *pf_dev)
2731 {
2732 	if (pf_dev->multi_pf.enabled)
2733 		return 1;
2734 	else
2735 		return pf_dev->nfp_eth_table->count;
2736 }
2737 
2738 uint32_t
2739 nfp_net_get_phyports_from_fw(struct nfp_pf_dev *pf_dev)
2740 {
2741 	int ret = 0;
2742 	uint8_t total_phyports;
2743 	char pf_name[RTE_ETH_NAME_MAX_LEN];
2744 
2745 	/* Read the number of vNIC's created for the PF */
2746 	snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports",
2747 			pf_dev->multi_pf.function_id);
2748 	total_phyports = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &ret);
2749 	if (ret != 0 || total_phyports == 0 || total_phyports > 8) {
2750 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name);
2751 		return 0;
2752 	}
2753 
2754 	return total_phyports;
2755 }
2756 
2757 uint8_t
2758 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
2759 		uint8_t port_id)
2760 {
2761 	if (pf_dev->multi_pf.enabled)
2762 		return pf_dev->multi_pf.function_id;
2763 
2764 	return port_id;
2765 }
2766 
2767 static int
2768 nfp_net_sriov_check(struct nfp_pf_dev *pf_dev,
2769 		uint16_t cap)
2770 {
2771 	uint16_t cap_vf;
2772 
2773 	cap_vf = nn_readw(pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_CAP);
2774 	if ((cap_vf & cap) != cap)
2775 		return -ENOTSUP;
2776 
2777 	return 0;
2778 }
2779 
2780 static int
2781 nfp_net_sriov_update(struct nfp_net_hw *net_hw,
2782 		struct nfp_pf_dev *pf_dev,
2783 		uint16_t update)
2784 {
2785 	int ret;
2786 
2787 	/* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_base_id to FW. */
2788 	ret = nfp_net_vf_reconfig(net_hw, pf_dev, update, pf_dev->vf_base_id,
2789 			NFP_NET_VF_CFG_MB_VF_NUM);
2790 	if (ret != 0) {
2791 		PMD_INIT_LOG(ERR, "Error nfp VF reconfig.");
2792 		return ret;
2793 	}
2794 
2795 	return 0;
2796 }
2797 
2798 static int
2799 nfp_net_vf_queues_config(struct nfp_net_hw *net_hw,
2800 		struct nfp_pf_dev *pf_dev)
2801 {
2802 	int ret;
2803 	uint32_t i;
2804 	uint32_t offset;
2805 
2806 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG);
2807 	if (ret != 0) {
2808 		if (ret == -ENOTSUP) {
2809 			PMD_INIT_LOG(DEBUG, "Set VF max queue not supported.");
2810 			return 0;
2811 		}
2812 
2813 		PMD_INIT_LOG(ERR, "Set VF max queue failed.");
2814 		return ret;
2815 	}
2816 
2817 	offset = NFP_NET_VF_CFG_MB_SZ + pf_dev->max_vfs * NFP_NET_VF_CFG_SZ;
2818 	for (i = 0; i < pf_dev->sriov_vf; i++) {
2819 		ret = nfp_net_vf_reconfig(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG,
2820 				pf_dev->queue_per_vf, pf_dev->vf_base_id + offset + i);
2821 		if (ret != 0) {
2822 			PMD_INIT_LOG(ERR, "Set VF max_queue failed.");
2823 			return ret;
2824 		}
2825 	}
2826 
2827 	return 0;
2828 }
2829 
2830 static int
2831 nfp_net_sriov_init(struct nfp_net_hw *net_hw,
2832 		struct nfp_pf_dev *pf_dev)
2833 {
2834 	int ret;
2835 
2836 	ret = nfp_net_sriov_check(pf_dev, NFP_NET_VF_CFG_MB_CAP_SPLIT);
2837 	if (ret != 0) {
2838 		if (ret == -ENOTSUP) {
2839 			PMD_INIT_LOG(DEBUG, "Set VF split not supported.");
2840 			return 0;
2841 		}
2842 
2843 		PMD_INIT_LOG(ERR, "Set VF split failed.");
2844 		return ret;
2845 	}
2846 
2847 	nn_writeb(pf_dev->sriov_vf, pf_dev->vf_cfg_tbl_bar + NFP_NET_VF_CFG_MB_VF_CNT);
2848 
2849 	ret = nfp_net_sriov_update(net_hw, pf_dev, NFP_NET_VF_CFG_MB_UPD_SPLIT);
2850 	if (ret != 0) {
2851 		PMD_INIT_LOG(ERR, "The nfp sriov update spilt failed.");
2852 		return ret;
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 int
2859 nfp_net_vf_config_app_init(struct nfp_net_hw *net_hw,
2860 		struct nfp_pf_dev *pf_dev)
2861 {
2862 	int ret;
2863 
2864 	if (pf_dev->sriov_vf == 0)
2865 		return 0;
2866 
2867 	ret = nfp_net_sriov_init(net_hw, pf_dev);
2868 	if (ret != 0) {
2869 		PMD_INIT_LOG(ERR, "Failed to init sriov module.");
2870 		return ret;
2871 	}
2872 
2873 	ret = nfp_net_vf_queues_config(net_hw, pf_dev);
2874 	if (ret != 0) {
2875 		PMD_INIT_LOG(ERR, "Failed to config vf queue.");
2876 		return ret;
2877 	}
2878 
2879 	return 0;
2880 }
2881 
2882 static inline bool
2883 nfp_net_meta_has_no_port_type(__rte_unused struct nfp_net_meta_parsed *meta)
2884 {
2885 	return true;
2886 }
2887 
2888 static inline bool
2889 nfp_net_meta_is_not_pf_port(__rte_unused struct nfp_net_meta_parsed *meta)
2890 {
2891 	return false;
2892 }
2893 
2894 static inline bool
2895 nfp_net_meta_is_pf_port(struct nfp_net_meta_parsed *meta)
2896 {
2897 	return nfp_flower_port_is_phy_port(meta->port_id);
2898 }
2899 
2900 bool
2901 nfp_net_recv_pkt_meta_check_register(struct nfp_net_hw_priv *hw_priv)
2902 {
2903 	struct nfp_pf_dev *pf_dev;
2904 
2905 	pf_dev = hw_priv->pf_dev;
2906 	if (!hw_priv->is_pf) {
2907 		pf_dev->recv_pkt_meta_check_t = nfp_net_meta_has_no_port_type;
2908 		return true;
2909 	}
2910 
2911 	switch (pf_dev->app_fw_id) {
2912 	case NFP_APP_FW_CORE_NIC:
2913 		pf_dev->recv_pkt_meta_check_t = nfp_net_meta_has_no_port_type;
2914 		break;
2915 	case NFP_APP_FW_FLOWER_NIC:
2916 		if (pf_dev->multi_pf.enabled)
2917 			pf_dev->recv_pkt_meta_check_t = nfp_net_meta_is_pf_port;
2918 		else
2919 			pf_dev->recv_pkt_meta_check_t = nfp_net_meta_is_not_pf_port;
2920 		break;
2921 	default:
2922 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded.");
2923 		return false;
2924 	}
2925 
2926 	return true;
2927 }
2928