xref: /dpdk/drivers/net/nfp/nfp_net_common.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include "nfp_net_common.h"
9 
10 #include <rte_alarm.h>
11 
12 #include "flower/nfp_flower_representor.h"
13 #include "nfd3/nfp_nfd3.h"
14 #include "nfdk/nfp_nfdk.h"
15 #include "nfpcore/nfp_mip.h"
16 #include "nfpcore/nfp_nsp.h"
17 #include "nfp_logs.h"
18 
19 #define NFP_TX_MAX_SEG       UINT8_MAX
20 #define NFP_TX_MAX_MTU_SEG   8
21 
22 #define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
23 #define NFP_NET_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
24 
25 #define DEFAULT_FLBUF_SIZE        9216
26 #define NFP_ETH_OVERHEAD \
27 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
28 
29 enum nfp_xstat_group {
30 	NFP_XSTAT_GROUP_NET,
31 	NFP_XSTAT_GROUP_MAC
32 };
33 
34 struct nfp_xstat {
35 	char name[RTE_ETH_XSTATS_NAME_SIZE];
36 	int offset;
37 	enum nfp_xstat_group group;
38 };
39 
40 #define NFP_XSTAT_NET(_name, _offset) {                 \
41 	.name = _name,                                  \
42 	.offset = NFP_NET_CFG_STATS_##_offset,          \
43 	.group = NFP_XSTAT_GROUP_NET,                   \
44 }
45 
46 #define NFP_XSTAT_MAC(_name, _offset) {                 \
47 	.name = _name,                                  \
48 	.offset = NFP_MAC_STATS_##_offset,              \
49 	.group = NFP_XSTAT_GROUP_MAC,                   \
50 }
51 
52 static const struct nfp_xstat nfp_net_xstats[] = {
53 	/*
54 	 * Basic xstats available on both VF and PF.
55 	 * Note that in case new statistics of group NFP_XSTAT_GROUP_NET
56 	 * are added to this array, they must appear before any statistics
57 	 * of group NFP_XSTAT_GROUP_MAC.
58 	 */
59 	NFP_XSTAT_NET("rx_good_packets_mc", RX_MC_FRAMES),
60 	NFP_XSTAT_NET("tx_good_packets_mc", TX_MC_FRAMES),
61 	NFP_XSTAT_NET("rx_good_packets_bc", RX_BC_FRAMES),
62 	NFP_XSTAT_NET("tx_good_packets_bc", TX_BC_FRAMES),
63 	NFP_XSTAT_NET("rx_good_bytes_uc", RX_UC_OCTETS),
64 	NFP_XSTAT_NET("tx_good_bytes_uc", TX_UC_OCTETS),
65 	NFP_XSTAT_NET("rx_good_bytes_mc", RX_MC_OCTETS),
66 	NFP_XSTAT_NET("tx_good_bytes_mc", TX_MC_OCTETS),
67 	NFP_XSTAT_NET("rx_good_bytes_bc", RX_BC_OCTETS),
68 	NFP_XSTAT_NET("tx_good_bytes_bc", TX_BC_OCTETS),
69 	NFP_XSTAT_NET("tx_missed_erros", TX_DISCARDS),
70 	NFP_XSTAT_NET("bpf_pass_pkts", APP0_FRAMES),
71 	NFP_XSTAT_NET("bpf_pass_bytes", APP0_BYTES),
72 	NFP_XSTAT_NET("bpf_app1_pkts", APP1_FRAMES),
73 	NFP_XSTAT_NET("bpf_app1_bytes", APP1_BYTES),
74 	NFP_XSTAT_NET("bpf_app2_pkts", APP2_FRAMES),
75 	NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES),
76 	NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES),
77 	NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES),
78 	/*
79 	 * MAC xstats available only on PF. These statistics are not available for VFs as the
80 	 * PF is not initialized when the VF is initialized as it is still bound to the kernel
81 	 * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order
82 	 * to get the pointer to the start of the MAC statistics counters.
83 	 */
84 	NFP_XSTAT_MAC("mac.rx_octets", RX_IN_OCTS),
85 	NFP_XSTAT_MAC("mac.rx_frame_too_long_errors", RX_FRAME_TOO_LONG_ERRORS),
86 	NFP_XSTAT_MAC("mac.rx_range_length_errors", RX_RANGE_LENGTH_ERRORS),
87 	NFP_XSTAT_MAC("mac.rx_vlan_received_ok", RX_VLAN_RECEIVED_OK),
88 	NFP_XSTAT_MAC("mac.rx_errors", RX_IN_ERRORS),
89 	NFP_XSTAT_MAC("mac.rx_broadcast_pkts", RX_IN_BROADCAST_PKTS),
90 	NFP_XSTAT_MAC("mac.rx_drop_events", RX_DROP_EVENTS),
91 	NFP_XSTAT_MAC("mac.rx_alignment_errors", RX_ALIGNMENT_ERRORS),
92 	NFP_XSTAT_MAC("mac.rx_pause_mac_ctrl_frames", RX_PAUSE_MAC_CTRL_FRAMES),
93 	NFP_XSTAT_MAC("mac.rx_frames_received_ok", RX_FRAMES_RECEIVED_OK),
94 	NFP_XSTAT_MAC("mac.rx_frame_check_sequence_errors", RX_FRAME_CHECK_SEQ_ERRORS),
95 	NFP_XSTAT_MAC("mac.rx_unicast_pkts", RX_UNICAST_PKTS),
96 	NFP_XSTAT_MAC("mac.rx_multicast_pkts", RX_MULTICAST_PKTS),
97 	NFP_XSTAT_MAC("mac.rx_pkts", RX_PKTS),
98 	NFP_XSTAT_MAC("mac.rx_undersize_pkts", RX_UNDERSIZE_PKTS),
99 	NFP_XSTAT_MAC("mac.rx_pkts_64_octets", RX_PKTS_64_OCTS),
100 	NFP_XSTAT_MAC("mac.rx_pkts_65_to_127_octets", RX_PKTS_65_TO_127_OCTS),
101 	NFP_XSTAT_MAC("mac.rx_pkts_128_to_255_octets", RX_PKTS_128_TO_255_OCTS),
102 	NFP_XSTAT_MAC("mac.rx_pkts_256_to_511_octets", RX_PKTS_256_TO_511_OCTS),
103 	NFP_XSTAT_MAC("mac.rx_pkts_512_to_1023_octets", RX_PKTS_512_TO_1023_OCTS),
104 	NFP_XSTAT_MAC("mac.rx_pkts_1024_to_1518_octets", RX_PKTS_1024_TO_1518_OCTS),
105 	NFP_XSTAT_MAC("mac.rx_pkts_1519_to_max_octets", RX_PKTS_1519_TO_MAX_OCTS),
106 	NFP_XSTAT_MAC("mac.rx_jabbers", RX_JABBERS),
107 	NFP_XSTAT_MAC("mac.rx_fragments", RX_FRAGMENTS),
108 	NFP_XSTAT_MAC("mac.rx_oversize_pkts", RX_OVERSIZE_PKTS),
109 	NFP_XSTAT_MAC("mac.rx_pause_frames_class0", RX_PAUSE_FRAMES_CLASS0),
110 	NFP_XSTAT_MAC("mac.rx_pause_frames_class1", RX_PAUSE_FRAMES_CLASS1),
111 	NFP_XSTAT_MAC("mac.rx_pause_frames_class2", RX_PAUSE_FRAMES_CLASS2),
112 	NFP_XSTAT_MAC("mac.rx_pause_frames_class3", RX_PAUSE_FRAMES_CLASS3),
113 	NFP_XSTAT_MAC("mac.rx_pause_frames_class4", RX_PAUSE_FRAMES_CLASS4),
114 	NFP_XSTAT_MAC("mac.rx_pause_frames_class5", RX_PAUSE_FRAMES_CLASS5),
115 	NFP_XSTAT_MAC("mac.rx_pause_frames_class6", RX_PAUSE_FRAMES_CLASS6),
116 	NFP_XSTAT_MAC("mac.rx_pause_frames_class7", RX_PAUSE_FRAMES_CLASS7),
117 	NFP_XSTAT_MAC("mac.rx_mac_ctrl_frames_received", RX_MAC_CTRL_FRAMES_REC),
118 	NFP_XSTAT_MAC("mac.rx_mac_head_drop", RX_MAC_HEAD_DROP),
119 	NFP_XSTAT_MAC("mac.tx_queue_drop", TX_QUEUE_DROP),
120 	NFP_XSTAT_MAC("mac.tx_octets", TX_OUT_OCTS),
121 	NFP_XSTAT_MAC("mac.tx_vlan_transmitted_ok", TX_VLAN_TRANSMITTED_OK),
122 	NFP_XSTAT_MAC("mac.tx_errors", TX_OUT_ERRORS),
123 	NFP_XSTAT_MAC("mac.tx_broadcast_pkts", TX_BROADCAST_PKTS),
124 	NFP_XSTAT_MAC("mac.tx_pause_mac_ctrl_frames", TX_PAUSE_MAC_CTRL_FRAMES),
125 	NFP_XSTAT_MAC("mac.tx_frames_transmitted_ok", TX_FRAMES_TRANSMITTED_OK),
126 	NFP_XSTAT_MAC("mac.tx_unicast_pkts", TX_UNICAST_PKTS),
127 	NFP_XSTAT_MAC("mac.tx_multicast_pkts", TX_MULTICAST_PKTS),
128 	NFP_XSTAT_MAC("mac.tx_pkts_64_octets", TX_PKTS_64_OCTS),
129 	NFP_XSTAT_MAC("mac.tx_pkts_65_to_127_octets", TX_PKTS_65_TO_127_OCTS),
130 	NFP_XSTAT_MAC("mac.tx_pkts_128_to_255_octets", TX_PKTS_128_TO_255_OCTS),
131 	NFP_XSTAT_MAC("mac.tx_pkts_256_to_511_octets", TX_PKTS_256_TO_511_OCTS),
132 	NFP_XSTAT_MAC("mac.tx_pkts_512_to_1023_octets", TX_PKTS_512_TO_1023_OCTS),
133 	NFP_XSTAT_MAC("mac.tx_pkts_1024_to_1518_octets", TX_PKTS_1024_TO_1518_OCTS),
134 	NFP_XSTAT_MAC("mac.tx_pkts_1519_to_max_octets", TX_PKTS_1519_TO_MAX_OCTS),
135 	NFP_XSTAT_MAC("mac.tx_pause_frames_class0", TX_PAUSE_FRAMES_CLASS0),
136 	NFP_XSTAT_MAC("mac.tx_pause_frames_class1", TX_PAUSE_FRAMES_CLASS1),
137 	NFP_XSTAT_MAC("mac.tx_pause_frames_class2", TX_PAUSE_FRAMES_CLASS2),
138 	NFP_XSTAT_MAC("mac.tx_pause_frames_class3", TX_PAUSE_FRAMES_CLASS3),
139 	NFP_XSTAT_MAC("mac.tx_pause_frames_class4", TX_PAUSE_FRAMES_CLASS4),
140 	NFP_XSTAT_MAC("mac.tx_pause_frames_class5", TX_PAUSE_FRAMES_CLASS5),
141 	NFP_XSTAT_MAC("mac.tx_pause_frames_class6", TX_PAUSE_FRAMES_CLASS6),
142 	NFP_XSTAT_MAC("mac.tx_pause_frames_class7", TX_PAUSE_FRAMES_CLASS7),
143 };
144 
145 static const uint32_t nfp_net_link_speed_nfp2rte[] = {
146 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
147 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
148 	[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
149 	[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
150 	[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
151 	[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
152 	[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
153 	[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
154 };
155 
156 static uint16_t
157 nfp_net_link_speed_rte2nfp(uint16_t speed)
158 {
159 	uint16_t i;
160 
161 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
162 		if (speed == nfp_net_link_speed_nfp2rte[i])
163 			return i;
164 	}
165 
166 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
167 }
168 
169 static void
170 nfp_net_notify_port_speed(struct nfp_net_hw *hw,
171 		struct rte_eth_link *link)
172 {
173 	/*
174 	 * Read the link status from NFP_NET_CFG_STS. If the link is down
175 	 * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to
176 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
177 	 */
178 	if (link->link_status == RTE_ETH_LINK_DOWN) {
179 		nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
180 				NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
181 		return;
182 	}
183 
184 	/*
185 	 * Link is up so write the link speed from the eth_table to
186 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
187 	 */
188 	nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
189 			nfp_net_link_speed_rte2nfp(link->link_speed));
190 }
191 
192 /* The length of firmware version string */
193 #define FW_VER_LEN        32
194 
195 /**
196  * Reconfigure the firmware via the mailbox
197  *
198  * @param net_hw
199  *   Device to reconfigure
200  * @param mbox_cmd
201  *   The value for the mailbox command
202  *
203  * @return
204  *   - (0) if OK to reconfigure by the mailbox.
205  *   - (-EIO) if I/O err and fail to reconfigure by the mailbox
206  */
207 int
208 nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
209 		uint32_t mbox_cmd)
210 {
211 	int ret;
212 	uint32_t mbox;
213 
214 	mbox = net_hw->tlv_caps.mbox_off;
215 
216 	rte_spinlock_lock(&net_hw->super.reconfig_lock);
217 
218 	nn_cfg_writeq(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
219 	nn_cfg_writel(&net_hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
220 
221 	rte_wmb();
222 
223 	ret = nfp_reconfig_real(&net_hw->super, NFP_NET_CFG_UPDATE_MBOX);
224 
225 	rte_spinlock_unlock(&net_hw->super.reconfig_lock);
226 
227 	if (ret != 0) {
228 		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x",
229 				mbox_cmd, NFP_NET_CFG_UPDATE_MBOX);
230 		return -EIO;
231 	}
232 
233 	return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
234 }
235 
236 struct nfp_net_hw *
237 nfp_net_get_hw(const struct rte_eth_dev *dev)
238 {
239 	struct nfp_net_hw *hw;
240 
241 	if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) {
242 		struct nfp_flower_representor *repr;
243 		repr = dev->data->dev_private;
244 		hw = repr->app_fw_flower->pf_hw;
245 	} else {
246 		hw = dev->data->dev_private;
247 	}
248 
249 	return hw;
250 }
251 
252 /*
253  * Configure an Ethernet device.
254  *
255  * This function must be invoked first before any other function in the Ethernet API.
256  * This function can also be re-invoked when a device is in the stopped state.
257  *
258  * A DPDK app sends info about how many queues to use and how  those queues
259  * need to be configured. This is used by the DPDK core and it makes sure no
260  * more queues than those advertised by the driver are requested.
261  * This function is called after that internal process.
262  */
263 int
264 nfp_net_configure(struct rte_eth_dev *dev)
265 {
266 	struct nfp_net_hw *hw;
267 	struct rte_eth_conf *dev_conf;
268 	struct rte_eth_rxmode *rxmode;
269 	struct rte_eth_txmode *txmode;
270 
271 	hw = nfp_net_get_hw(dev);
272 	dev_conf = &dev->data->dev_conf;
273 	rxmode = &dev_conf->rxmode;
274 	txmode = &dev_conf->txmode;
275 
276 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0)
277 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
278 
279 	/* Checking TX mode */
280 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
281 		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported");
282 		return -EINVAL;
283 	}
284 
285 	/* Checking RX mode */
286 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
287 			(hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
288 		PMD_DRV_LOG(ERR, "RSS not supported");
289 		return -EINVAL;
290 	}
291 
292 	/* Checking MTU set */
293 	if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) {
294 		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u)",
295 				rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD);
296 		return -ERANGE;
297 	}
298 
299 	return 0;
300 }
301 
302 void
303 nfp_net_log_device_information(const struct nfp_net_hw *hw)
304 {
305 	uint32_t cap = hw->super.cap;
306 
307 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
308 			hw->ver.major, hw->ver.minor, hw->max_mtu);
309 
310 	PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", cap,
311 			cap & NFP_NET_CFG_CTRL_PROMISC   ? "PROMISC "   : "",
312 			cap & NFP_NET_CFG_CTRL_L2BC      ? "L2BCFILT "  : "",
313 			cap & NFP_NET_CFG_CTRL_L2MC      ? "L2MCFILT "  : "",
314 			cap & NFP_NET_CFG_CTRL_RXCSUM    ? "RXCSUM "    : "",
315 			cap & NFP_NET_CFG_CTRL_TXCSUM    ? "TXCSUM "    : "",
316 			cap & NFP_NET_CFG_CTRL_RXVLAN    ? "RXVLAN "    : "",
317 			cap & NFP_NET_CFG_CTRL_TXVLAN    ? "TXVLAN "    : "",
318 			cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 "  : "",
319 			cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLANv2 "  : "",
320 			cap & NFP_NET_CFG_CTRL_RXQINQ    ? "RXQINQ "    : "",
321 			cap & NFP_NET_CFG_CTRL_SCATTER   ? "SCATTER "   : "",
322 			cap & NFP_NET_CFG_CTRL_GATHER    ? "GATHER "    : "",
323 			cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
324 			cap & NFP_NET_CFG_CTRL_LSO       ? "TSO "       : "",
325 			cap & NFP_NET_CFG_CTRL_LSO2      ? "TSOv2 "     : "",
326 			cap & NFP_NET_CFG_CTRL_RSS       ? "RSS "       : "",
327 			cap & NFP_NET_CFG_CTRL_RSS2      ? "RSSv2 "     : "");
328 
329 	PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
330 			hw->max_rx_queues, hw->max_tx_queues);
331 }
332 
333 static inline void
334 nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw,
335 		uint32_t *ctrl)
336 {
337 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
338 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2;
339 	else if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN) != 0)
340 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
341 }
342 
343 void
344 nfp_net_enable_queues(struct rte_eth_dev *dev)
345 {
346 	struct nfp_net_hw *hw;
347 
348 	hw = nfp_net_get_hw(dev);
349 
350 	nfp_enable_queues(&hw->super, dev->data->nb_rx_queues,
351 			dev->data->nb_tx_queues);
352 }
353 
354 void
355 nfp_net_disable_queues(struct rte_eth_dev *dev)
356 {
357 	struct nfp_net_hw *net_hw;
358 
359 	net_hw = nfp_net_get_hw(dev);
360 
361 	nfp_disable_queues(&net_hw->super);
362 }
363 
364 void
365 nfp_net_params_setup(struct nfp_net_hw *hw)
366 {
367 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, hw->mtu);
368 	nn_cfg_writel(&hw->super, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
369 }
370 
371 void
372 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
373 {
374 	hw->super.qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
375 }
376 
377 int
378 nfp_net_set_mac_addr(struct rte_eth_dev *dev,
379 		struct rte_ether_addr *mac_addr)
380 {
381 	uint32_t update;
382 	uint32_t new_ctrl;
383 	struct nfp_hw *hw;
384 	struct nfp_net_hw *net_hw;
385 
386 	net_hw = nfp_net_get_hw(dev);
387 	hw = &net_hw->super;
388 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
389 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
390 		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled");
391 		return -EBUSY;
392 	}
393 
394 	/* Writing new MAC to the specific port BAR address */
395 	nfp_write_mac(hw, (uint8_t *)mac_addr);
396 
397 	update = NFP_NET_CFG_UPDATE_MACADDR;
398 	new_ctrl = hw->ctrl;
399 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
400 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
401 		new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
402 
403 	/* Signal the NIC about the change */
404 	if (nfp_reconfig(hw, new_ctrl, update) != 0) {
405 		PMD_DRV_LOG(ERR, "MAC address update failed");
406 		return -EIO;
407 	}
408 
409 	hw->ctrl = new_ctrl;
410 
411 	return 0;
412 }
413 
414 int
415 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
416 		struct rte_intr_handle *intr_handle)
417 {
418 	uint16_t i;
419 	struct nfp_net_hw *hw;
420 
421 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
422 				dev->data->nb_rx_queues) != 0) {
423 		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec",
424 				dev->data->nb_rx_queues);
425 		return -ENOMEM;
426 	}
427 
428 	hw = nfp_net_get_hw(dev);
429 
430 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
431 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO");
432 		/* UIO just supports one queue and no LSC */
433 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
434 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
435 			return -1;
436 	} else {
437 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO");
438 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
439 			/*
440 			 * The first msix vector is reserved for non
441 			 * efd interrupts.
442 			 */
443 			nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(i), i + 1);
444 			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
445 				return -1;
446 		}
447 	}
448 
449 	/* Avoiding TX interrupts */
450 	hw->super.ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
451 	return 0;
452 }
453 
454 uint32_t
455 nfp_check_offloads(struct rte_eth_dev *dev)
456 {
457 	uint32_t cap;
458 	uint32_t ctrl = 0;
459 	uint64_t rx_offload;
460 	uint64_t tx_offload;
461 	struct nfp_net_hw *hw;
462 	struct rte_eth_conf *dev_conf;
463 
464 	hw = nfp_net_get_hw(dev);
465 	cap = hw->super.cap;
466 
467 	dev_conf = &dev->data->dev_conf;
468 	rx_offload = dev_conf->rxmode.offloads;
469 	tx_offload = dev_conf->txmode.offloads;
470 
471 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
472 		if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
473 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
474 	}
475 
476 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
477 		nfp_net_enable_rxvlan_cap(hw, &ctrl);
478 
479 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
480 		if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
481 			ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
482 	}
483 
484 	hw->mtu = dev->data->mtu;
485 
486 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
487 		if ((cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
488 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
489 		else if ((cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
490 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
491 	}
492 
493 	/* L2 broadcast */
494 	if ((cap & NFP_NET_CFG_CTRL_L2BC) != 0)
495 		ctrl |= NFP_NET_CFG_CTRL_L2BC;
496 
497 	/* L2 multicast */
498 	if ((cap & NFP_NET_CFG_CTRL_L2MC) != 0)
499 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
500 
501 	/* TX checksum offload */
502 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
503 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
504 			(tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
505 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
506 
507 	/* LSO offload */
508 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
509 			(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
510 		if ((cap & NFP_NET_CFG_CTRL_LSO) != 0)
511 			ctrl |= NFP_NET_CFG_CTRL_LSO;
512 		else
513 			ctrl |= NFP_NET_CFG_CTRL_LSO2;
514 	}
515 
516 	/* RX gather */
517 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
518 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
519 
520 	return ctrl;
521 }
522 
523 int
524 nfp_net_promisc_enable(struct rte_eth_dev *dev)
525 {
526 	int ret;
527 	uint32_t update;
528 	uint32_t new_ctrl;
529 	struct nfp_hw *hw;
530 	struct nfp_net_hw *net_hw;
531 
532 	net_hw = nfp_net_get_hw(dev);
533 
534 	hw = &net_hw->super;
535 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
536 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
537 		return -ENOTSUP;
538 	}
539 
540 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
541 		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
542 		return 0;
543 	}
544 
545 	new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
546 	update = NFP_NET_CFG_UPDATE_GEN;
547 
548 	ret = nfp_reconfig(hw, new_ctrl, update);
549 	if (ret != 0)
550 		return ret;
551 
552 	hw->ctrl = new_ctrl;
553 
554 	return 0;
555 }
556 
557 int
558 nfp_net_promisc_disable(struct rte_eth_dev *dev)
559 {
560 	int ret;
561 	uint32_t update;
562 	uint32_t new_ctrl;
563 	struct nfp_hw *hw;
564 	struct nfp_net_hw *net_hw;
565 
566 	net_hw = nfp_net_get_hw(dev);
567 	hw = &net_hw->super;
568 
569 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
570 		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
571 		return 0;
572 	}
573 
574 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
575 	update = NFP_NET_CFG_UPDATE_GEN;
576 
577 	ret = nfp_reconfig(hw, new_ctrl, update);
578 	if (ret != 0)
579 		return ret;
580 
581 	hw->ctrl = new_ctrl;
582 
583 	return 0;
584 }
585 
586 static int
587 nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev,
588 		bool enable)
589 {
590 	int ret;
591 	uint32_t update;
592 	struct nfp_hw *hw;
593 	uint32_t cap_extend;
594 	uint32_t ctrl_extend;
595 	uint32_t new_ctrl_extend;
596 	struct nfp_net_hw *net_hw;
597 
598 	net_hw = nfp_net_get_hw(dev);
599 	hw = &net_hw->super;
600 
601 	cap_extend = hw->cap_ext;
602 	if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) {
603 		PMD_DRV_LOG(ERR, "Allmulticast mode not supported");
604 		return -ENOTSUP;
605 	}
606 
607 	/*
608 	 * Allmulticast mode enabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 0.
609 	 * Allmulticast mode disabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 1.
610 	 */
611 	ctrl_extend = hw->ctrl_ext;
612 	if (enable) {
613 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0)
614 			return 0;
615 
616 		new_ctrl_extend = ctrl_extend & ~NFP_NET_CFG_CTRL_MCAST_FILTER;
617 	} else {
618 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) != 0)
619 			return 0;
620 
621 		new_ctrl_extend = ctrl_extend | NFP_NET_CFG_CTRL_MCAST_FILTER;
622 	}
623 
624 	update = NFP_NET_CFG_UPDATE_GEN;
625 
626 	ret = nfp_ext_reconfig(hw, new_ctrl_extend, update);
627 	if (ret != 0)
628 		return ret;
629 
630 	hw->ctrl_ext = new_ctrl_extend;
631 	return 0;
632 }
633 
634 int
635 nfp_net_allmulticast_enable(struct rte_eth_dev *dev)
636 {
637 	return nfp_net_set_allmulticast_mode(dev, true);
638 }
639 
640 int
641 nfp_net_allmulticast_disable(struct rte_eth_dev *dev)
642 {
643 	return nfp_net_set_allmulticast_mode(dev, false);
644 }
645 
646 int
647 nfp_net_link_update_common(struct rte_eth_dev *dev,
648 		struct nfp_net_hw *hw,
649 		struct rte_eth_link *link,
650 		uint32_t link_status)
651 {
652 	int ret;
653 	uint32_t i;
654 	uint32_t nn_link_status;
655 	struct nfp_eth_table *nfp_eth_table;
656 
657 	link->link_speed = RTE_ETH_SPEED_NUM_NONE;
658 
659 	if (link->link_status == RTE_ETH_LINK_UP) {
660 		if (hw->pf_dev != NULL) {
661 			nfp_eth_table = hw->pf_dev->nfp_eth_table;
662 			if (nfp_eth_table != NULL) {
663 				uint32_t speed = nfp_eth_table->ports[hw->idx].speed;
664 				for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
665 					if (nfp_net_link_speed_nfp2rte[i] == speed) {
666 						link->link_speed = speed;
667 						break;
668 					}
669 				}
670 			}
671 		} else {
672 			/*
673 			 * Shift and mask nn_link_status so that it is effectively the value
674 			 * at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
675 			 */
676 			nn_link_status = (link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
677 					NFP_NET_CFG_STS_LINK_RATE_MASK;
678 			if (nn_link_status < RTE_DIM(nfp_net_link_speed_nfp2rte))
679 				link->link_speed = nfp_net_link_speed_nfp2rte[nn_link_status];
680 		}
681 	}
682 
683 	ret = rte_eth_linkstatus_set(dev, link);
684 	if (ret == 0) {
685 		if (link->link_status != 0)
686 			PMD_DRV_LOG(INFO, "NIC Link is Up");
687 		else
688 			PMD_DRV_LOG(INFO, "NIC Link is Down");
689 	}
690 
691 	return ret;
692 }
693 
694 /*
695  * Return 0 means link status changed, -1 means not changed
696  *
697  * Wait to complete is needed as it can take up to 9 seconds to get the Link
698  * status.
699  */
700 int
701 nfp_net_link_update(struct rte_eth_dev *dev,
702 		__rte_unused int wait_to_complete)
703 {
704 	int ret;
705 	struct nfp_net_hw *hw;
706 	uint32_t nn_link_status;
707 	struct rte_eth_link link;
708 
709 	hw = nfp_net_get_hw(dev);
710 
711 	memset(&link, 0, sizeof(struct rte_eth_link));
712 
713 	/* Read link status */
714 	nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
715 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
716 		link.link_status = RTE_ETH_LINK_UP;
717 
718 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
719 
720 	ret = nfp_net_link_update_common(dev, hw, &link, nn_link_status);
721 
722 	/*
723 	 * Notify the port to update the speed value in the CTRL BAR from NSP.
724 	 * Not applicable for VFs as the associated PF is still attached to the
725 	 * kernel driver.
726 	 */
727 	if (hw->pf_dev != NULL)
728 		nfp_net_notify_port_speed(hw, &link);
729 
730 	return ret;
731 }
732 
733 int
734 nfp_net_stats_get(struct rte_eth_dev *dev,
735 		struct rte_eth_stats *stats)
736 {
737 	uint16_t i;
738 	struct nfp_net_hw *hw;
739 	struct rte_eth_stats nfp_dev_stats;
740 
741 	if (stats == NULL)
742 		return -EINVAL;
743 
744 	hw = nfp_net_get_hw(dev);
745 
746 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
747 
748 	/* Reading per RX ring stats */
749 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
750 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
751 			break;
752 
753 		nfp_dev_stats.q_ipackets[i] =
754 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
755 		nfp_dev_stats.q_ipackets[i] -=
756 				hw->eth_stats_base.q_ipackets[i];
757 
758 		nfp_dev_stats.q_ibytes[i] =
759 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
760 		nfp_dev_stats.q_ibytes[i] -=
761 				hw->eth_stats_base.q_ibytes[i];
762 	}
763 
764 	/* Reading per TX ring stats */
765 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
766 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
767 			break;
768 
769 		nfp_dev_stats.q_opackets[i] =
770 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
771 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
772 
773 		nfp_dev_stats.q_obytes[i] =
774 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
775 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
776 	}
777 
778 	nfp_dev_stats.ipackets = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
779 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
780 
781 	nfp_dev_stats.ibytes = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
782 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
783 
784 	nfp_dev_stats.opackets =
785 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
786 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
787 
788 	nfp_dev_stats.obytes =
789 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
790 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
791 
792 	/* Reading general device stats */
793 	nfp_dev_stats.ierrors =
794 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
795 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
796 
797 	nfp_dev_stats.oerrors =
798 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
799 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
800 
801 	/* RX ring mbuf allocation failures */
802 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
803 
804 	nfp_dev_stats.imissed =
805 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
806 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
807 
808 	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
809 	return 0;
810 }
811 
812 /*
813  * hw->eth_stats_base records the per counter starting point.
814  * Lets update it now.
815  */
816 int
817 nfp_net_stats_reset(struct rte_eth_dev *dev)
818 {
819 	uint16_t i;
820 	struct nfp_net_hw *hw;
821 
822 	hw = nfp_net_get_hw(dev);
823 
824 	/* Reading per RX ring stats */
825 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
826 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
827 			break;
828 
829 		hw->eth_stats_base.q_ipackets[i] =
830 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
831 
832 		hw->eth_stats_base.q_ibytes[i] =
833 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
834 	}
835 
836 	/* Reading per TX ring stats */
837 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
838 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
839 			break;
840 
841 		hw->eth_stats_base.q_opackets[i] =
842 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
843 
844 		hw->eth_stats_base.q_obytes[i] =
845 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
846 	}
847 
848 	hw->eth_stats_base.ipackets =
849 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
850 
851 	hw->eth_stats_base.ibytes =
852 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
853 
854 	hw->eth_stats_base.opackets =
855 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
856 
857 	hw->eth_stats_base.obytes =
858 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
859 
860 	/* Reading general device stats */
861 	hw->eth_stats_base.ierrors =
862 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
863 
864 	hw->eth_stats_base.oerrors =
865 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
866 
867 	/* RX ring mbuf allocation failures */
868 	dev->data->rx_mbuf_alloc_failed = 0;
869 
870 	hw->eth_stats_base.imissed =
871 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
872 
873 	return 0;
874 }
875 
876 uint32_t
877 nfp_net_xstats_size(const struct rte_eth_dev *dev)
878 {
879 	uint32_t count;
880 	struct nfp_net_hw *hw;
881 	const uint32_t size = RTE_DIM(nfp_net_xstats);
882 
883 	/* If the device is a VF, then there will be no MAC stats */
884 	hw = nfp_net_get_hw(dev);
885 	if (hw->mac_stats == NULL) {
886 		for (count = 0; count < size; count++) {
887 			if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC)
888 				break;
889 		}
890 
891 		return count;
892 	}
893 
894 	return size;
895 }
896 
897 static const struct nfp_xstat *
898 nfp_net_xstats_info(const struct rte_eth_dev *dev,
899 		uint32_t index)
900 {
901 	if (index >= nfp_net_xstats_size(dev)) {
902 		PMD_DRV_LOG(ERR, "xstat index out of bounds");
903 		return NULL;
904 	}
905 
906 	return &nfp_net_xstats[index];
907 }
908 
909 static uint64_t
910 nfp_net_xstats_value(const struct rte_eth_dev *dev,
911 		uint32_t index,
912 		bool raw)
913 {
914 	uint64_t value;
915 	struct nfp_net_hw *hw;
916 	struct nfp_xstat xstat;
917 
918 	hw = nfp_net_get_hw(dev);
919 	xstat = nfp_net_xstats[index];
920 
921 	if (xstat.group == NFP_XSTAT_GROUP_MAC)
922 		value = nn_readq(hw->mac_stats + xstat.offset);
923 	else
924 		value = nn_cfg_readq(&hw->super, xstat.offset);
925 
926 	if (raw)
927 		return value;
928 
929 	/*
930 	 * A baseline value of each statistic counter is recorded when stats are "reset".
931 	 * Thus, the value returned by this function need to be decremented by this
932 	 * baseline value. The result is the count of this statistic since the last time
933 	 * it was "reset".
934 	 */
935 	return value - hw->eth_xstats_base[index].value;
936 }
937 
938 /* NOTE: All callers ensure dev is always set. */
939 int
940 nfp_net_xstats_get_names(struct rte_eth_dev *dev,
941 		struct rte_eth_xstat_name *xstats_names,
942 		unsigned int size)
943 {
944 	uint32_t id;
945 	uint32_t nfp_size;
946 	uint32_t read_size;
947 
948 	nfp_size = nfp_net_xstats_size(dev);
949 
950 	if (xstats_names == NULL)
951 		return nfp_size;
952 
953 	/* Read at most NFP xstats number of names. */
954 	read_size = RTE_MIN(size, nfp_size);
955 
956 	for (id = 0; id < read_size; id++)
957 		rte_strlcpy(xstats_names[id].name, nfp_net_xstats[id].name,
958 				RTE_ETH_XSTATS_NAME_SIZE);
959 
960 	return read_size;
961 }
962 
963 /* NOTE: All callers ensure dev is always set. */
964 int
965 nfp_net_xstats_get(struct rte_eth_dev *dev,
966 		struct rte_eth_xstat *xstats,
967 		unsigned int n)
968 {
969 	uint32_t id;
970 	uint32_t nfp_size;
971 	uint32_t read_size;
972 
973 	nfp_size = nfp_net_xstats_size(dev);
974 
975 	if (xstats == NULL)
976 		return nfp_size;
977 
978 	/* Read at most NFP xstats number of values. */
979 	read_size = RTE_MIN(n, nfp_size);
980 
981 	for (id = 0; id < read_size; id++) {
982 		xstats[id].id = id;
983 		xstats[id].value = nfp_net_xstats_value(dev, id, false);
984 	}
985 
986 	return read_size;
987 }
988 
989 /*
990  * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
991  * ids, xstats_names and size are valid, and non-NULL.
992  */
993 int
994 nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
995 		const uint64_t *ids,
996 		struct rte_eth_xstat_name *xstats_names,
997 		unsigned int size)
998 {
999 	uint32_t i;
1000 	uint32_t read_size;
1001 
1002 	/* Read at most NFP xstats number of names. */
1003 	read_size = RTE_MIN(size, nfp_net_xstats_size(dev));
1004 
1005 	for (i = 0; i < read_size; i++) {
1006 		const struct nfp_xstat *xstat;
1007 
1008 		/* Make sure ID is valid for device. */
1009 		xstat = nfp_net_xstats_info(dev, ids[i]);
1010 		if (xstat == NULL)
1011 			return -EINVAL;
1012 
1013 		rte_strlcpy(xstats_names[i].name, xstat->name,
1014 				RTE_ETH_XSTATS_NAME_SIZE);
1015 	}
1016 
1017 	return read_size;
1018 }
1019 
1020 /*
1021  * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
1022  * ids, values and n are valid, and non-NULL.
1023  */
1024 int
1025 nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
1026 		const uint64_t *ids,
1027 		uint64_t *values,
1028 		unsigned int n)
1029 {
1030 	uint32_t i;
1031 	uint32_t read_size;
1032 
1033 	/* Read at most NFP xstats number of values. */
1034 	read_size = RTE_MIN(n, nfp_net_xstats_size(dev));
1035 
1036 	for (i = 0; i < read_size; i++) {
1037 		const struct nfp_xstat *xstat;
1038 
1039 		/* Make sure index is valid for device. */
1040 		xstat = nfp_net_xstats_info(dev, ids[i]);
1041 		if (xstat == NULL)
1042 			return -EINVAL;
1043 
1044 		values[i] = nfp_net_xstats_value(dev, ids[i], false);
1045 	}
1046 
1047 	return read_size;
1048 }
1049 
1050 int
1051 nfp_net_xstats_reset(struct rte_eth_dev *dev)
1052 {
1053 	uint32_t id;
1054 	uint32_t read_size;
1055 	struct nfp_net_hw *hw;
1056 
1057 	hw = nfp_net_get_hw(dev);
1058 	read_size = nfp_net_xstats_size(dev);
1059 
1060 	for (id = 0; id < read_size; id++) {
1061 		hw->eth_xstats_base[id].id = id;
1062 		hw->eth_xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
1063 	}
1064 
1065 	/* Successfully reset xstats, now call function to reset basic stats. */
1066 	return nfp_net_stats_reset(dev);
1067 }
1068 
1069 void
1070 nfp_net_rx_desc_limits(struct nfp_net_hw *hw,
1071 		uint16_t *min_rx_desc,
1072 		uint16_t *max_rx_desc)
1073 {
1074 	*max_rx_desc = hw->dev_info->max_qc_size;
1075 	*min_rx_desc = hw->dev_info->min_qc_size;
1076 }
1077 
1078 void
1079 nfp_net_tx_desc_limits(struct nfp_net_hw *hw,
1080 		uint16_t *min_tx_desc,
1081 		uint16_t *max_tx_desc)
1082 {
1083 	uint16_t tx_dpp;
1084 
1085 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
1086 		tx_dpp = NFD3_TX_DESC_PER_PKT;
1087 	else
1088 		tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT;
1089 
1090 	*max_tx_desc = hw->dev_info->max_qc_size / tx_dpp;
1091 	*min_tx_desc = hw->dev_info->min_qc_size / tx_dpp;
1092 }
1093 
1094 int
1095 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1096 {
1097 	uint32_t cap;
1098 	uint32_t cap_extend;
1099 	uint16_t min_rx_desc;
1100 	uint16_t max_rx_desc;
1101 	uint16_t min_tx_desc;
1102 	uint16_t max_tx_desc;
1103 	struct nfp_net_hw *hw;
1104 
1105 	hw = nfp_net_get_hw(dev);
1106 
1107 	nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc);
1108 	nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc);
1109 
1110 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1111 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1112 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1113 	/*
1114 	 * The maximum rx packet length is set to the maximum layer 3 MTU,
1115 	 * plus layer 2, CRC and VLAN headers.
1116 	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
1117 	 * which was set by the firmware loaded onto the card.
1118 	 */
1119 	dev_info->max_rx_pktlen = hw->max_mtu + NFP_ETH_OVERHEAD;
1120 	dev_info->max_mtu = hw->max_mtu;
1121 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1122 	/* Next should change when PF support is implemented */
1123 	dev_info->max_mac_addrs = 1;
1124 
1125 	cap = hw->super.cap;
1126 
1127 	if ((cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
1128 		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1129 
1130 	if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
1131 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
1132 
1133 	if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
1134 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1135 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1136 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
1137 
1138 	if ((cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
1139 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1140 
1141 	if ((cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
1142 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1143 				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1144 				RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1145 
1146 	if ((cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
1147 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1148 		if ((cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
1149 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
1150 	}
1151 
1152 	if ((cap & NFP_NET_CFG_CTRL_GATHER) != 0)
1153 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1154 
1155 	cap_extend = hw->super.cap_ext;
1156 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) {
1157 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1158 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1159 	}
1160 
1161 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1162 		.rx_thresh = {
1163 			.pthresh = DEFAULT_RX_PTHRESH,
1164 			.hthresh = DEFAULT_RX_HTHRESH,
1165 			.wthresh = DEFAULT_RX_WTHRESH,
1166 		},
1167 		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1168 		.rx_drop_en = 0,
1169 	};
1170 
1171 	dev_info->default_txconf = (struct rte_eth_txconf) {
1172 		.tx_thresh = {
1173 			.pthresh = DEFAULT_TX_PTHRESH,
1174 			.hthresh = DEFAULT_TX_HTHRESH,
1175 			.wthresh = DEFAULT_TX_WTHRESH,
1176 		},
1177 		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1178 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1179 	};
1180 
1181 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1182 		.nb_max = max_rx_desc,
1183 		.nb_min = min_rx_desc,
1184 		.nb_align = NFP_ALIGN_RING_DESC,
1185 	};
1186 
1187 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1188 		.nb_max = max_tx_desc,
1189 		.nb_min = min_tx_desc,
1190 		.nb_align = NFP_ALIGN_RING_DESC,
1191 		.nb_seg_max = NFP_TX_MAX_SEG,
1192 		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1193 	};
1194 
1195 	if ((cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
1196 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1197 
1198 		dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
1199 				RTE_ETH_RSS_NONFRAG_IPV4_TCP |
1200 				RTE_ETH_RSS_NONFRAG_IPV4_UDP |
1201 				RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
1202 				RTE_ETH_RSS_IPV6 |
1203 				RTE_ETH_RSS_NONFRAG_IPV6_TCP |
1204 				RTE_ETH_RSS_NONFRAG_IPV6_UDP |
1205 				RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
1206 
1207 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1208 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1209 	}
1210 
1211 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
1212 			RTE_ETH_LINK_SPEED_10G |
1213 			RTE_ETH_LINK_SPEED_25G |
1214 			RTE_ETH_LINK_SPEED_40G |
1215 			RTE_ETH_LINK_SPEED_50G |
1216 			RTE_ETH_LINK_SPEED_100G;
1217 
1218 	return 0;
1219 }
1220 
1221 int
1222 nfp_net_common_init(struct rte_pci_device *pci_dev,
1223 		struct nfp_net_hw *hw)
1224 {
1225 	const int stride = 4;
1226 
1227 	hw->device_id = pci_dev->id.device_id;
1228 	hw->vendor_id = pci_dev->id.vendor_id;
1229 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1230 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1231 
1232 	hw->max_rx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_RXRINGS);
1233 	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
1234 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
1235 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
1236 				"pairs for use", pci_dev->name);
1237 		return -ENODEV;
1238 	}
1239 
1240 	nfp_net_cfg_read_version(hw);
1241 	if (!nfp_net_is_valid_nfd_version(hw->ver))
1242 		return -EINVAL;
1243 
1244 	if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0)
1245 		return -ENODEV;
1246 
1247 	/* Get some of the read-only fields from the config BAR */
1248 	hw->super.cap = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP);
1249 	hw->super.cap_ext = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP_WORD1);
1250 	hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
1251 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
1252 
1253 	nfp_net_init_metadata_format(hw);
1254 
1255 	/* Read the Rx offset configured from firmware */
1256 	if (hw->ver.major < 2)
1257 		hw->rx_offset = NFP_NET_RX_OFFSET;
1258 	else
1259 		hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET);
1260 
1261 	hw->super.ctrl = 0;
1262 	hw->stride_rx = stride;
1263 	hw->stride_tx = stride;
1264 
1265 	return 0;
1266 }
1267 
1268 const uint32_t *
1269 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1270 {
1271 	struct nfp_net_hw *net_hw;
1272 	static const uint32_t ptypes[] = {
1273 		RTE_PTYPE_L2_ETHER,
1274 		RTE_PTYPE_L3_IPV4,
1275 		RTE_PTYPE_L3_IPV4_EXT,
1276 		RTE_PTYPE_L3_IPV6,
1277 		RTE_PTYPE_L3_IPV6_EXT,
1278 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1279 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1280 		RTE_PTYPE_L4_TCP,
1281 		RTE_PTYPE_L4_UDP,
1282 		RTE_PTYPE_L4_FRAG,
1283 		RTE_PTYPE_L4_NONFRAG,
1284 		RTE_PTYPE_L4_ICMP,
1285 		RTE_PTYPE_L4_SCTP,
1286 		RTE_PTYPE_TUNNEL_VXLAN,
1287 		RTE_PTYPE_TUNNEL_NVGRE,
1288 		RTE_PTYPE_TUNNEL_GENEVE,
1289 		RTE_PTYPE_INNER_L2_ETHER,
1290 		RTE_PTYPE_INNER_L3_IPV4,
1291 		RTE_PTYPE_INNER_L3_IPV4_EXT,
1292 		RTE_PTYPE_INNER_L3_IPV6,
1293 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1294 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1295 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1296 		RTE_PTYPE_INNER_L4_TCP,
1297 		RTE_PTYPE_INNER_L4_UDP,
1298 		RTE_PTYPE_INNER_L4_FRAG,
1299 		RTE_PTYPE_INNER_L4_NONFRAG,
1300 		RTE_PTYPE_INNER_L4_ICMP,
1301 		RTE_PTYPE_INNER_L4_SCTP,
1302 	};
1303 
1304 	if (dev->rx_pkt_burst != nfp_net_recv_pkts)
1305 		return NULL;
1306 
1307 	net_hw = dev->data->dev_private;
1308 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1309 		return NULL;
1310 
1311 	return ptypes;
1312 }
1313 
1314 int
1315 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
1316 		uint16_t queue_id)
1317 {
1318 	uint16_t base = 0;
1319 	struct nfp_net_hw *hw;
1320 	struct rte_pci_device *pci_dev;
1321 
1322 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1323 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1324 		base = 1;
1325 
1326 	/* Make sure all updates are written before un-masking */
1327 	rte_wmb();
1328 
1329 	hw = nfp_net_get_hw(dev);
1330 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id),
1331 			NFP_NET_CFG_ICR_UNMASKED);
1332 	return 0;
1333 }
1334 
1335 int
1336 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
1337 		uint16_t queue_id)
1338 {
1339 	uint16_t base = 0;
1340 	struct nfp_net_hw *hw;
1341 	struct rte_pci_device *pci_dev;
1342 
1343 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1344 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1345 		base = 1;
1346 
1347 	/* Make sure all updates are written before un-masking */
1348 	rte_wmb();
1349 
1350 	hw = nfp_net_get_hw(dev);
1351 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
1352 
1353 	return 0;
1354 }
1355 
1356 static void
1357 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1358 {
1359 	struct rte_eth_link link;
1360 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1361 
1362 	rte_eth_linkstatus_get(dev, &link);
1363 	if (link.link_status != 0)
1364 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1365 				dev->data->port_id, link.link_speed,
1366 				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1367 				"full-duplex" : "half-duplex");
1368 	else
1369 		PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
1370 
1371 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1372 			pci_dev->addr.domain, pci_dev->addr.bus,
1373 			pci_dev->addr.devid, pci_dev->addr.function);
1374 }
1375 
1376 /*
1377  * Unmask an interrupt
1378  *
1379  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1380  * clear the ICR for the entry.
1381  */
1382 void
1383 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1384 {
1385 	struct nfp_net_hw *hw;
1386 	struct rte_pci_device *pci_dev;
1387 
1388 	hw = nfp_net_get_hw(dev);
1389 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1390 
1391 	/* Make sure all updates are written before un-masking */
1392 	rte_wmb();
1393 
1394 	if ((hw->super.ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
1395 		/* If MSI-X auto-masking is used, clear the entry */
1396 		rte_intr_ack(pci_dev->intr_handle);
1397 	} else {
1398 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1399 				NFP_NET_CFG_ICR_UNMASKED);
1400 	}
1401 }
1402 
1403 /**
1404  * Interrupt handler which shall be registered for alarm callback for delayed
1405  * handling specific interrupt to wait for the stable nic state. As the NIC
1406  * interrupt state is not stable for nfp after link is just down, it needs
1407  * to wait 4 seconds to get the stable status.
1408  *
1409  * @param param
1410  *   The address of parameter (struct rte_eth_dev *)
1411  */
1412 void
1413 nfp_net_dev_interrupt_delayed_handler(void *param)
1414 {
1415 	struct rte_eth_dev *dev = param;
1416 
1417 	nfp_net_link_update(dev, 0);
1418 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1419 
1420 	nfp_net_dev_link_status_print(dev);
1421 
1422 	/* Unmasking */
1423 	nfp_net_irq_unmask(dev);
1424 }
1425 
1426 void
1427 nfp_net_dev_interrupt_handler(void *param)
1428 {
1429 	int64_t timeout;
1430 	struct rte_eth_link link;
1431 	struct rte_eth_dev *dev = param;
1432 
1433 	PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1434 
1435 	rte_eth_linkstatus_get(dev, &link);
1436 
1437 	nfp_net_link_update(dev, 0);
1438 
1439 	/* Likely to up */
1440 	if (link.link_status == 0) {
1441 		/* Handle it 1 sec later, wait it being stable */
1442 		timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1443 	} else {  /* Likely to down */
1444 		/* Handle it 4 sec later, wait it being stable */
1445 		timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1446 	}
1447 
1448 	if (rte_eal_alarm_set(timeout * 1000,
1449 			nfp_net_dev_interrupt_delayed_handler,
1450 			(void *)dev) != 0) {
1451 		PMD_INIT_LOG(ERR, "Error setting alarm");
1452 		/* Unmasking */
1453 		nfp_net_irq_unmask(dev);
1454 	}
1455 }
1456 
1457 int
1458 nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
1459 		uint16_t mtu)
1460 {
1461 	struct nfp_net_hw *hw;
1462 
1463 	hw = nfp_net_get_hw(dev);
1464 
1465 	/* MTU setting is forbidden if port is started */
1466 	if (dev->data->dev_started) {
1467 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1468 				dev->data->port_id);
1469 		return -EBUSY;
1470 	}
1471 
1472 	/* MTU larger than current mbufsize not supported */
1473 	if (mtu > hw->flbufsz) {
1474 		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported",
1475 				mtu, hw->flbufsz);
1476 		return -ERANGE;
1477 	}
1478 
1479 	/* Writing to configuration space */
1480 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, mtu);
1481 
1482 	hw->mtu = mtu;
1483 
1484 	return 0;
1485 }
1486 
1487 int
1488 nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
1489 		int mask)
1490 {
1491 	int ret;
1492 	uint32_t update;
1493 	uint32_t new_ctrl;
1494 	struct nfp_hw *hw;
1495 	uint64_t rx_offload;
1496 	struct nfp_net_hw *net_hw;
1497 	uint32_t rxvlan_ctrl = 0;
1498 
1499 	net_hw = nfp_net_get_hw(dev);
1500 	hw = &net_hw->super;
1501 	rx_offload = dev->data->dev_conf.rxmode.offloads;
1502 	new_ctrl = hw->ctrl;
1503 
1504 	/* VLAN stripping setting */
1505 	if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
1506 		nfp_net_enable_rxvlan_cap(net_hw, &rxvlan_ctrl);
1507 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
1508 			new_ctrl |= rxvlan_ctrl;
1509 		else
1510 			new_ctrl &= ~rxvlan_ctrl;
1511 	}
1512 
1513 	/* QinQ stripping setting */
1514 	if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
1515 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
1516 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1517 		else
1518 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1519 	}
1520 
1521 	if (new_ctrl == hw->ctrl)
1522 		return 0;
1523 
1524 	update = NFP_NET_CFG_UPDATE_GEN;
1525 
1526 	ret = nfp_reconfig(hw, new_ctrl, update);
1527 	if (ret != 0)
1528 		return ret;
1529 
1530 	hw->ctrl = new_ctrl;
1531 
1532 	return 0;
1533 }
1534 
1535 static int
1536 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1537 		struct rte_eth_rss_reta_entry64 *reta_conf,
1538 		uint16_t reta_size)
1539 {
1540 	uint16_t i;
1541 	uint16_t j;
1542 	uint16_t idx;
1543 	uint8_t mask;
1544 	uint32_t reta;
1545 	uint16_t shift;
1546 	struct nfp_hw *hw;
1547 	struct nfp_net_hw *net_hw;
1548 
1549 	net_hw = nfp_net_get_hw(dev);
1550 	hw = &net_hw->super;
1551 
1552 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1553 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
1554 				" doesn't match hardware can supported (%d)",
1555 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1556 		return -EINVAL;
1557 	}
1558 
1559 	/*
1560 	 * Update Redirection Table. There are 128 8bit-entries which can be
1561 	 * manage as 32 32bit-entries.
1562 	 */
1563 	for (i = 0; i < reta_size; i += 4) {
1564 		/* Handling 4 RSS entries per loop */
1565 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1566 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1567 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1568 		if (mask == 0)
1569 			continue;
1570 
1571 		reta = 0;
1572 
1573 		/* If all 4 entries were set, don't need read RETA register */
1574 		if (mask != 0xF)
1575 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1576 
1577 		for (j = 0; j < 4; j++) {
1578 			if ((mask & (0x1 << j)) == 0)
1579 				continue;
1580 
1581 			/* Clearing the entry bits */
1582 			if (mask != 0xF)
1583 				reta &= ~(0xFF << (8 * j));
1584 
1585 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1586 		}
1587 
1588 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
1589 	}
1590 
1591 	return 0;
1592 }
1593 
1594 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1595 int
1596 nfp_net_reta_update(struct rte_eth_dev *dev,
1597 		struct rte_eth_rss_reta_entry64 *reta_conf,
1598 		uint16_t reta_size)
1599 {
1600 	int ret;
1601 	uint32_t update;
1602 	struct nfp_hw *hw;
1603 	struct nfp_net_hw *net_hw;
1604 
1605 	net_hw = nfp_net_get_hw(dev);
1606 	hw = &net_hw->super;
1607 
1608 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1609 		return -EINVAL;
1610 
1611 	ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1612 	if (ret != 0)
1613 		return ret;
1614 
1615 	update = NFP_NET_CFG_UPDATE_RSS;
1616 
1617 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1618 		return -EIO;
1619 
1620 	return 0;
1621 }
1622 
1623 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1624 int
1625 nfp_net_reta_query(struct rte_eth_dev *dev,
1626 		struct rte_eth_rss_reta_entry64 *reta_conf,
1627 		uint16_t reta_size)
1628 {
1629 	uint16_t i;
1630 	uint16_t j;
1631 	uint16_t idx;
1632 	uint8_t mask;
1633 	uint32_t reta;
1634 	uint16_t shift;
1635 	struct nfp_hw *hw;
1636 	struct nfp_net_hw *net_hw;
1637 
1638 	net_hw = nfp_net_get_hw(dev);
1639 	hw = &net_hw->super;
1640 
1641 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1642 		return -EINVAL;
1643 
1644 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1645 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)"
1646 				" doesn't match hardware can supported (%d)",
1647 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1648 		return -EINVAL;
1649 	}
1650 
1651 	/*
1652 	 * Reading Redirection Table. There are 128 8bit-entries which can be
1653 	 * manage as 32 32bit-entries.
1654 	 */
1655 	for (i = 0; i < reta_size; i += 4) {
1656 		/* Handling 4 RSS entries per loop */
1657 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1658 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1659 		mask = (reta_conf[idx].mask >> shift) & 0xF;
1660 
1661 		if (mask == 0)
1662 			continue;
1663 
1664 		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
1665 		for (j = 0; j < 4; j++) {
1666 			if ((mask & (0x1 << j)) == 0)
1667 				continue;
1668 
1669 			reta_conf[idx].reta[shift + j] =
1670 					(uint8_t)((reta >> (8 * j)) & 0xF);
1671 		}
1672 	}
1673 
1674 	return 0;
1675 }
1676 
1677 static int
1678 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1679 		struct rte_eth_rss_conf *rss_conf)
1680 {
1681 	uint8_t i;
1682 	uint8_t key;
1683 	uint64_t rss_hf;
1684 	struct nfp_hw *hw;
1685 	struct nfp_net_hw *net_hw;
1686 	uint32_t cfg_rss_ctrl = 0;
1687 
1688 	net_hw = nfp_net_get_hw(dev);
1689 	hw = &net_hw->super;
1690 
1691 	/* Writing the key byte by byte */
1692 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1693 		memcpy(&key, &rss_conf->rss_key[i], 1);
1694 		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1695 	}
1696 
1697 	rss_hf = rss_conf->rss_hf;
1698 
1699 	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
1700 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1701 
1702 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1703 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1704 
1705 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
1706 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1707 
1708 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0)
1709 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP;
1710 
1711 	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
1712 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1713 
1714 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
1715 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1716 
1717 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
1718 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1719 
1720 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0)
1721 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP;
1722 
1723 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1724 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1725 
1726 	/* Configuring where to apply the RSS hash */
1727 	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1728 
1729 	/* Writing the key size */
1730 	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1731 
1732 	return 0;
1733 }
1734 
1735 int
1736 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1737 		struct rte_eth_rss_conf *rss_conf)
1738 {
1739 	uint32_t update;
1740 	uint64_t rss_hf;
1741 	struct nfp_hw *hw;
1742 	struct nfp_net_hw *net_hw;
1743 
1744 	net_hw = nfp_net_get_hw(dev);
1745 	hw = &net_hw->super;
1746 
1747 	rss_hf = rss_conf->rss_hf;
1748 
1749 	/* Checking if RSS is enabled */
1750 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
1751 		if (rss_hf != 0) {
1752 			PMD_DRV_LOG(ERR, "RSS unsupported");
1753 			return -EINVAL;
1754 		}
1755 
1756 		return 0; /* Nothing to do */
1757 	}
1758 
1759 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1760 		PMD_DRV_LOG(ERR, "RSS hash key too long");
1761 		return -EINVAL;
1762 	}
1763 
1764 	nfp_net_rss_hash_write(dev, rss_conf);
1765 
1766 	update = NFP_NET_CFG_UPDATE_RSS;
1767 
1768 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1769 		return -EIO;
1770 
1771 	return 0;
1772 }
1773 
1774 int
1775 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1776 		struct rte_eth_rss_conf *rss_conf)
1777 {
1778 	uint8_t i;
1779 	uint8_t key;
1780 	uint64_t rss_hf;
1781 	struct nfp_hw *hw;
1782 	uint32_t cfg_rss_ctrl;
1783 	struct nfp_net_hw *net_hw;
1784 
1785 	net_hw = nfp_net_get_hw(dev);
1786 	hw = &net_hw->super;
1787 
1788 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1789 		return -EINVAL;
1790 
1791 	rss_hf = rss_conf->rss_hf;
1792 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1793 
1794 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
1795 		rss_hf |= RTE_ETH_RSS_IPV4;
1796 
1797 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0)
1798 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1799 
1800 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0)
1801 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1802 
1803 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0)
1804 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1805 
1806 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0)
1807 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1808 
1809 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0)
1810 		rss_hf |= RTE_ETH_RSS_IPV6;
1811 
1812 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0)
1813 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP;
1814 
1815 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0)
1816 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
1817 
1818 	/* Propagate current RSS hash functions to caller */
1819 	rss_conf->rss_hf = rss_hf;
1820 
1821 	/* Reading the key size */
1822 	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1823 
1824 	/* Reading the key byte a byte */
1825 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1826 		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1827 		memcpy(&rss_conf->rss_key[i], &key, 1);
1828 	}
1829 
1830 	return 0;
1831 }
1832 
1833 int
1834 nfp_net_rss_config_default(struct rte_eth_dev *dev)
1835 {
1836 	int ret;
1837 	uint8_t i;
1838 	uint8_t j;
1839 	uint16_t queue = 0;
1840 	struct rte_eth_conf *dev_conf;
1841 	struct rte_eth_rss_conf rss_conf;
1842 	uint16_t rx_queues = dev->data->nb_rx_queues;
1843 	struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
1844 
1845 	nfp_reta_conf[0].mask = ~0x0;
1846 	nfp_reta_conf[1].mask = ~0x0;
1847 
1848 	for (i = 0; i < 0x40; i += 8) {
1849 		for (j = i; j < (i + 8); j++) {
1850 			nfp_reta_conf[0].reta[j] = queue;
1851 			nfp_reta_conf[1].reta[j] = queue++;
1852 			queue %= rx_queues;
1853 		}
1854 	}
1855 
1856 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
1857 	if (ret != 0)
1858 		return ret;
1859 
1860 	dev_conf = &dev->data->dev_conf;
1861 	if (dev_conf == NULL) {
1862 		PMD_DRV_LOG(ERR, "Wrong rss conf");
1863 		return -EINVAL;
1864 	}
1865 
1866 	rss_conf = dev_conf->rx_adv_conf.rss_conf;
1867 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
1868 
1869 	return ret;
1870 }
1871 
1872 void
1873 nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
1874 {
1875 	uint16_t i;
1876 	struct nfp_net_rxq *this_rx_q;
1877 
1878 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1879 		this_rx_q = dev->data->rx_queues[i];
1880 		nfp_net_reset_rx_queue(this_rx_q);
1881 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1882 	}
1883 }
1884 
1885 void
1886 nfp_net_close_rx_queue(struct rte_eth_dev *dev)
1887 {
1888 	uint16_t i;
1889 	struct nfp_net_rxq *this_rx_q;
1890 
1891 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1892 		this_rx_q = dev->data->rx_queues[i];
1893 		nfp_net_reset_rx_queue(this_rx_q);
1894 		nfp_net_rx_queue_release(dev, i);
1895 	}
1896 }
1897 
1898 void
1899 nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
1900 {
1901 	uint16_t i;
1902 	struct nfp_net_txq *this_tx_q;
1903 
1904 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1905 		this_tx_q = dev->data->tx_queues[i];
1906 		nfp_net_reset_tx_queue(this_tx_q);
1907 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1908 	}
1909 }
1910 
1911 void
1912 nfp_net_close_tx_queue(struct rte_eth_dev *dev)
1913 {
1914 	uint16_t i;
1915 	struct nfp_net_txq *this_tx_q;
1916 
1917 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1918 		this_tx_q = dev->data->tx_queues[i];
1919 		nfp_net_reset_tx_queue(this_tx_q);
1920 		nfp_net_tx_queue_release(dev, i);
1921 	}
1922 }
1923 
1924 int
1925 nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw,
1926 		size_t idx,
1927 		uint16_t port)
1928 {
1929 	int ret;
1930 	uint32_t i;
1931 	struct nfp_hw *hw = &net_hw->super;
1932 
1933 	if (idx >= NFP_NET_N_VXLAN_PORTS) {
1934 		PMD_DRV_LOG(ERR, "The idx value is out of range.");
1935 		return -ERANGE;
1936 	}
1937 
1938 	net_hw->vxlan_ports[idx] = port;
1939 
1940 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
1941 		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
1942 				(net_hw->vxlan_ports[i + 1] << 16) | net_hw->vxlan_ports[i]);
1943 	}
1944 
1945 	rte_spinlock_lock(&hw->reconfig_lock);
1946 
1947 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VXLAN);
1948 	rte_wmb();
1949 
1950 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VXLAN);
1951 
1952 	rte_spinlock_unlock(&hw->reconfig_lock);
1953 
1954 	return ret;
1955 }
1956 
1957 /*
1958  * The firmware with NFD3 can not handle DMA address requiring more
1959  * than 40 bits.
1960  */
1961 int
1962 nfp_net_check_dma_mask(struct nfp_net_hw *hw,
1963 		char *name)
1964 {
1965 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
1966 			rte_mem_check_dma_mask(40) != 0) {
1967 		PMD_DRV_LOG(ERR, "Device %s can't be used: restricted dma mask to 40 bits!",
1968 				name);
1969 		return -ENODEV;
1970 	}
1971 
1972 	return 0;
1973 }
1974 
1975 void
1976 nfp_net_init_metadata_format(struct nfp_net_hw *hw)
1977 {
1978 	/*
1979 	 * ABI 4.x and ctrl vNIC always use chained metadata, in other cases we allow use of
1980 	 * single metadata if only RSS(v1) is supported by hw capability, and RSS(v2)
1981 	 * also indicate that we are using chained metadata.
1982 	 */
1983 	if (hw->ver.major == 4) {
1984 		hw->meta_format = NFP_NET_METAFORMAT_CHAINED;
1985 	} else if ((hw->super.cap & NFP_NET_CFG_CTRL_CHAIN_META) != 0) {
1986 		hw->meta_format = NFP_NET_METAFORMAT_CHAINED;
1987 		/*
1988 		 * RSS is incompatible with chained metadata. hw->super.cap just represents
1989 		 * firmware's ability rather than the firmware's configuration. We decide
1990 		 * to reduce the confusion to allow us can use hw->super.cap to identify RSS later.
1991 		 */
1992 		hw->super.cap &= ~NFP_NET_CFG_CTRL_RSS;
1993 	} else {
1994 		hw->meta_format = NFP_NET_METAFORMAT_SINGLE;
1995 	}
1996 }
1997 
1998 void
1999 nfp_net_cfg_read_version(struct nfp_net_hw *hw)
2000 {
2001 	union {
2002 		uint32_t whole;
2003 		struct nfp_net_fw_ver split;
2004 	} version;
2005 
2006 	version.whole = nn_cfg_readl(&hw->super, NFP_NET_CFG_VERSION);
2007 	hw->ver = version.split;
2008 }
2009 
2010 static void
2011 nfp_net_get_nsp_info(struct nfp_net_hw *hw,
2012 		char *nsp_version)
2013 {
2014 	struct nfp_nsp *nsp;
2015 
2016 	nsp = nfp_nsp_open(hw->cpp);
2017 	if (nsp == NULL)
2018 		return;
2019 
2020 	snprintf(nsp_version, FW_VER_LEN, "%hu.%hu",
2021 			nfp_nsp_get_abi_ver_major(nsp),
2022 			nfp_nsp_get_abi_ver_minor(nsp));
2023 
2024 	nfp_nsp_close(nsp);
2025 }
2026 
2027 static void
2028 nfp_net_get_mip_name(struct nfp_net_hw *hw,
2029 		char *mip_name)
2030 {
2031 	struct nfp_mip *mip;
2032 
2033 	mip = nfp_mip_open(hw->cpp);
2034 	if (mip == NULL)
2035 		return;
2036 
2037 	snprintf(mip_name, FW_VER_LEN, "%s", nfp_mip_name(mip));
2038 
2039 	nfp_mip_close(mip);
2040 }
2041 
2042 static void
2043 nfp_net_get_app_name(struct nfp_net_hw *hw,
2044 		char *app_name)
2045 {
2046 	switch (hw->pf_dev->app_fw_id) {
2047 	case NFP_APP_FW_CORE_NIC:
2048 		snprintf(app_name, FW_VER_LEN, "%s", "nic");
2049 		break;
2050 	case NFP_APP_FW_FLOWER_NIC:
2051 		snprintf(app_name, FW_VER_LEN, "%s", "flower");
2052 		break;
2053 	default:
2054 		snprintf(app_name, FW_VER_LEN, "%s", "unknown");
2055 		break;
2056 	}
2057 }
2058 
2059 int
2060 nfp_net_firmware_version_get(struct rte_eth_dev *dev,
2061 		char *fw_version,
2062 		size_t fw_size)
2063 {
2064 	struct nfp_net_hw *hw;
2065 	char mip_name[FW_VER_LEN];
2066 	char app_name[FW_VER_LEN];
2067 	char nsp_version[FW_VER_LEN];
2068 	char vnic_version[FW_VER_LEN];
2069 
2070 	if (fw_size < FW_VER_LEN)
2071 		return FW_VER_LEN;
2072 
2073 	hw = nfp_net_get_hw(dev);
2074 
2075 	if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) {
2076 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
2077 			hw->ver.extend, hw->ver.class,
2078 			hw->ver.major, hw->ver.minor);
2079 	} else {
2080 		snprintf(vnic_version, FW_VER_LEN, "*");
2081 	}
2082 
2083 	nfp_net_get_nsp_info(hw, nsp_version);
2084 	nfp_net_get_mip_name(hw, mip_name);
2085 	nfp_net_get_app_name(hw, app_name);
2086 
2087 	snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
2088 			vnic_version, nsp_version, mip_name, app_name);
2089 
2090 	return 0;
2091 }
2092 
2093 bool
2094 nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
2095 {
2096 	uint8_t nfd_version = version.extend;
2097 
2098 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFD3)
2099 		return true;
2100 
2101 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) {
2102 		if (version.major < 5) {
2103 			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
2104 					version.major);
2105 			return false;
2106 		}
2107 
2108 		return true;
2109 	}
2110 
2111 	return false;
2112 }
2113 
2114 /* Disable rx and tx functions to allow for reconfiguring. */
2115 int
2116 nfp_net_stop(struct rte_eth_dev *dev)
2117 {
2118 	struct nfp_cpp *cpp;
2119 	struct nfp_net_hw *hw;
2120 
2121 	hw = nfp_net_get_hw(dev);
2122 
2123 	nfp_net_disable_queues(dev);
2124 
2125 	/* Clear queues */
2126 	nfp_net_stop_tx_queue(dev);
2127 	nfp_net_stop_rx_queue(dev);
2128 
2129 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2130 		cpp = hw->cpp;
2131 	else
2132 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
2133 
2134 	nfp_eth_set_configured(cpp, hw->nfp_idx, 0);
2135 
2136 	return 0;
2137 }
2138 
2139 static enum rte_eth_fc_mode
2140 nfp_net_get_pause_mode(struct nfp_eth_table_port *eth_port)
2141 {
2142 	enum rte_eth_fc_mode mode;
2143 
2144 	if (eth_port->rx_pause_enabled) {
2145 		if (eth_port->tx_pause_enabled)
2146 			mode = RTE_ETH_FC_FULL;
2147 		else
2148 			mode = RTE_ETH_FC_RX_PAUSE;
2149 	} else {
2150 		if (eth_port->tx_pause_enabled)
2151 			mode = RTE_ETH_FC_TX_PAUSE;
2152 		else
2153 			mode = RTE_ETH_FC_NONE;
2154 	}
2155 
2156 	return mode;
2157 }
2158 
2159 int
2160 nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
2161 		struct rte_eth_fc_conf *fc_conf)
2162 {
2163 	struct nfp_net_hw *net_hw;
2164 	struct nfp_eth_table *nfp_eth_table;
2165 	struct nfp_eth_table_port *eth_port;
2166 
2167 	net_hw = nfp_net_get_hw(dev);
2168 	if (net_hw->pf_dev == NULL)
2169 		return -EINVAL;
2170 
2171 	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
2172 	eth_port = &nfp_eth_table->ports[dev->data->port_id];
2173 
2174 	/* Currently only RX/TX switch are supported */
2175 	fc_conf->mode = nfp_net_get_pause_mode(eth_port);
2176 
2177 	return 0;
2178 }
2179 
2180 static int
2181 nfp_net_pause_frame_set(struct nfp_net_hw *net_hw,
2182 		struct nfp_eth_table_port *eth_port,
2183 		enum rte_eth_fc_mode mode)
2184 {
2185 	int err;
2186 	bool flag;
2187 	struct nfp_nsp *nsp;
2188 
2189 	nsp = nfp_eth_config_start(net_hw->cpp, eth_port->index);
2190 	if (nsp == NULL) {
2191 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
2192 		return -EIO;
2193 	}
2194 
2195 	flag = (mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2196 	err = nfp_eth_set_tx_pause(nsp, flag);
2197 	if (err != 0) {
2198 		PMD_DRV_LOG(ERR, "Failed to configure TX pause frame.");
2199 		nfp_eth_config_cleanup_end(nsp);
2200 		return err;
2201 	}
2202 
2203 	flag = (mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2204 	err = nfp_eth_set_rx_pause(nsp, flag);
2205 	if (err != 0) {
2206 		PMD_DRV_LOG(ERR, "Failed to configure RX pause frame.");
2207 		nfp_eth_config_cleanup_end(nsp);
2208 		return err;
2209 	}
2210 
2211 	err = nfp_eth_config_commit_end(nsp);
2212 	if (err != 0) {
2213 		PMD_DRV_LOG(ERR, "Failed to configure pause frame.");
2214 		return err;
2215 	}
2216 
2217 	return 0;
2218 }
2219 
2220 int
2221 nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
2222 		struct rte_eth_fc_conf *fc_conf)
2223 {
2224 	int ret;
2225 	struct nfp_net_hw *net_hw;
2226 	enum rte_eth_fc_mode set_mode;
2227 	enum rte_eth_fc_mode original_mode;
2228 	struct nfp_eth_table *nfp_eth_table;
2229 	struct nfp_eth_table_port *eth_port;
2230 
2231 	net_hw = nfp_net_get_hw(dev);
2232 	if (net_hw->pf_dev == NULL)
2233 		return -EINVAL;
2234 
2235 	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
2236 	eth_port = &nfp_eth_table->ports[net_hw->idx];
2237 
2238 	original_mode = nfp_net_get_pause_mode(eth_port);
2239 	set_mode = fc_conf->mode;
2240 
2241 	if (set_mode == original_mode)
2242 		return 0;
2243 
2244 	ret = nfp_net_pause_frame_set(net_hw, eth_port, set_mode);
2245 	if (ret != 0)
2246 		return ret;
2247 
2248 	/* Update eth_table after modifying RX/TX pause frame mode. */
2249 	eth_port->tx_pause_enabled = (set_mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2250 	eth_port->rx_pause_enabled = (set_mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2251 
2252 	return 0;
2253 }
2254