xref: /dpdk/drivers/net/nfp/nfp_net_common.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2018 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include "nfp_net_common.h"
9 
10 #include <rte_alarm.h>
11 
12 #include "flower/nfp_flower_representor.h"
13 #include "nfd3/nfp_nfd3.h"
14 #include "nfdk/nfp_nfdk.h"
15 #include "nfpcore/nfp_mip.h"
16 #include "nfpcore/nfp_nsp.h"
17 #include "nfp_logs.h"
18 
19 #define NFP_TX_MAX_SEG       UINT8_MAX
20 #define NFP_TX_MAX_MTU_SEG   8
21 
22 #define NFP_NET_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
23 #define NFP_NET_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
24 
25 #define DEFAULT_FLBUF_SIZE        9216
26 #define NFP_ETH_OVERHEAD \
27 	(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2)
28 
29 /* Only show FEC capability supported by the current speed. */
30 #define NFP_FEC_CAPA_ENTRY_NUM  1
31 
32 enum nfp_xstat_group {
33 	NFP_XSTAT_GROUP_NET,
34 	NFP_XSTAT_GROUP_MAC
35 };
36 
37 struct nfp_xstat {
38 	char name[RTE_ETH_XSTATS_NAME_SIZE];
39 	int offset;
40 	enum nfp_xstat_group group;
41 };
42 
43 #define NFP_XSTAT_NET(_name, _offset) {                 \
44 	.name = _name,                                  \
45 	.offset = NFP_NET_CFG_STATS_##_offset,          \
46 	.group = NFP_XSTAT_GROUP_NET,                   \
47 }
48 
49 #define NFP_XSTAT_MAC(_name, _offset) {                 \
50 	.name = _name,                                  \
51 	.offset = NFP_MAC_STATS_##_offset,              \
52 	.group = NFP_XSTAT_GROUP_MAC,                   \
53 }
54 
55 static const struct nfp_xstat nfp_net_xstats[] = {
56 	/*
57 	 * Basic xstats available on both VF and PF.
58 	 * Note that in case new statistics of group NFP_XSTAT_GROUP_NET
59 	 * are added to this array, they must appear before any statistics
60 	 * of group NFP_XSTAT_GROUP_MAC.
61 	 */
62 	NFP_XSTAT_NET("rx_good_packets_mc", RX_MC_FRAMES),
63 	NFP_XSTAT_NET("tx_good_packets_mc", TX_MC_FRAMES),
64 	NFP_XSTAT_NET("rx_good_packets_bc", RX_BC_FRAMES),
65 	NFP_XSTAT_NET("tx_good_packets_bc", TX_BC_FRAMES),
66 	NFP_XSTAT_NET("rx_good_bytes_uc", RX_UC_OCTETS),
67 	NFP_XSTAT_NET("tx_good_bytes_uc", TX_UC_OCTETS),
68 	NFP_XSTAT_NET("rx_good_bytes_mc", RX_MC_OCTETS),
69 	NFP_XSTAT_NET("tx_good_bytes_mc", TX_MC_OCTETS),
70 	NFP_XSTAT_NET("rx_good_bytes_bc", RX_BC_OCTETS),
71 	NFP_XSTAT_NET("tx_good_bytes_bc", TX_BC_OCTETS),
72 	NFP_XSTAT_NET("tx_missed_erros", TX_DISCARDS),
73 	NFP_XSTAT_NET("bpf_pass_pkts", APP0_FRAMES),
74 	NFP_XSTAT_NET("bpf_pass_bytes", APP0_BYTES),
75 	NFP_XSTAT_NET("bpf_app1_pkts", APP1_FRAMES),
76 	NFP_XSTAT_NET("bpf_app1_bytes", APP1_BYTES),
77 	NFP_XSTAT_NET("bpf_app2_pkts", APP2_FRAMES),
78 	NFP_XSTAT_NET("bpf_app2_bytes", APP2_BYTES),
79 	NFP_XSTAT_NET("bpf_app3_pkts", APP3_FRAMES),
80 	NFP_XSTAT_NET("bpf_app3_bytes", APP3_BYTES),
81 	/*
82 	 * MAC xstats available only on PF. These statistics are not available for VFs as the
83 	 * PF is not initialized when the VF is initialized as it is still bound to the kernel
84 	 * driver. As such, the PMD cannot obtain a CPP handle and access the rtsym_table in order
85 	 * to get the pointer to the start of the MAC statistics counters.
86 	 */
87 	NFP_XSTAT_MAC("mac.rx_octets", RX_IN_OCTS),
88 	NFP_XSTAT_MAC("mac.rx_frame_too_long_errors", RX_FRAME_TOO_LONG_ERRORS),
89 	NFP_XSTAT_MAC("mac.rx_range_length_errors", RX_RANGE_LENGTH_ERRORS),
90 	NFP_XSTAT_MAC("mac.rx_vlan_received_ok", RX_VLAN_RECEIVED_OK),
91 	NFP_XSTAT_MAC("mac.rx_errors", RX_IN_ERRORS),
92 	NFP_XSTAT_MAC("mac.rx_broadcast_pkts", RX_IN_BROADCAST_PKTS),
93 	NFP_XSTAT_MAC("mac.rx_drop_events", RX_DROP_EVENTS),
94 	NFP_XSTAT_MAC("mac.rx_alignment_errors", RX_ALIGNMENT_ERRORS),
95 	NFP_XSTAT_MAC("mac.rx_pause_mac_ctrl_frames", RX_PAUSE_MAC_CTRL_FRAMES),
96 	NFP_XSTAT_MAC("mac.rx_frames_received_ok", RX_FRAMES_RECEIVED_OK),
97 	NFP_XSTAT_MAC("mac.rx_frame_check_sequence_errors", RX_FRAME_CHECK_SEQ_ERRORS),
98 	NFP_XSTAT_MAC("mac.rx_unicast_pkts", RX_UNICAST_PKTS),
99 	NFP_XSTAT_MAC("mac.rx_multicast_pkts", RX_MULTICAST_PKTS),
100 	NFP_XSTAT_MAC("mac.rx_pkts", RX_PKTS),
101 	NFP_XSTAT_MAC("mac.rx_undersize_pkts", RX_UNDERSIZE_PKTS),
102 	NFP_XSTAT_MAC("mac.rx_pkts_64_octets", RX_PKTS_64_OCTS),
103 	NFP_XSTAT_MAC("mac.rx_pkts_65_to_127_octets", RX_PKTS_65_TO_127_OCTS),
104 	NFP_XSTAT_MAC("mac.rx_pkts_128_to_255_octets", RX_PKTS_128_TO_255_OCTS),
105 	NFP_XSTAT_MAC("mac.rx_pkts_256_to_511_octets", RX_PKTS_256_TO_511_OCTS),
106 	NFP_XSTAT_MAC("mac.rx_pkts_512_to_1023_octets", RX_PKTS_512_TO_1023_OCTS),
107 	NFP_XSTAT_MAC("mac.rx_pkts_1024_to_1518_octets", RX_PKTS_1024_TO_1518_OCTS),
108 	NFP_XSTAT_MAC("mac.rx_pkts_1519_to_max_octets", RX_PKTS_1519_TO_MAX_OCTS),
109 	NFP_XSTAT_MAC("mac.rx_jabbers", RX_JABBERS),
110 	NFP_XSTAT_MAC("mac.rx_fragments", RX_FRAGMENTS),
111 	NFP_XSTAT_MAC("mac.rx_oversize_pkts", RX_OVERSIZE_PKTS),
112 	NFP_XSTAT_MAC("mac.rx_pause_frames_class0", RX_PAUSE_FRAMES_CLASS0),
113 	NFP_XSTAT_MAC("mac.rx_pause_frames_class1", RX_PAUSE_FRAMES_CLASS1),
114 	NFP_XSTAT_MAC("mac.rx_pause_frames_class2", RX_PAUSE_FRAMES_CLASS2),
115 	NFP_XSTAT_MAC("mac.rx_pause_frames_class3", RX_PAUSE_FRAMES_CLASS3),
116 	NFP_XSTAT_MAC("mac.rx_pause_frames_class4", RX_PAUSE_FRAMES_CLASS4),
117 	NFP_XSTAT_MAC("mac.rx_pause_frames_class5", RX_PAUSE_FRAMES_CLASS5),
118 	NFP_XSTAT_MAC("mac.rx_pause_frames_class6", RX_PAUSE_FRAMES_CLASS6),
119 	NFP_XSTAT_MAC("mac.rx_pause_frames_class7", RX_PAUSE_FRAMES_CLASS7),
120 	NFP_XSTAT_MAC("mac.rx_mac_ctrl_frames_received", RX_MAC_CTRL_FRAMES_REC),
121 	NFP_XSTAT_MAC("mac.rx_mac_head_drop", RX_MAC_HEAD_DROP),
122 	NFP_XSTAT_MAC("mac.tx_queue_drop", TX_QUEUE_DROP),
123 	NFP_XSTAT_MAC("mac.tx_octets", TX_OUT_OCTS),
124 	NFP_XSTAT_MAC("mac.tx_vlan_transmitted_ok", TX_VLAN_TRANSMITTED_OK),
125 	NFP_XSTAT_MAC("mac.tx_errors", TX_OUT_ERRORS),
126 	NFP_XSTAT_MAC("mac.tx_broadcast_pkts", TX_BROADCAST_PKTS),
127 	NFP_XSTAT_MAC("mac.tx_pause_mac_ctrl_frames", TX_PAUSE_MAC_CTRL_FRAMES),
128 	NFP_XSTAT_MAC("mac.tx_frames_transmitted_ok", TX_FRAMES_TRANSMITTED_OK),
129 	NFP_XSTAT_MAC("mac.tx_unicast_pkts", TX_UNICAST_PKTS),
130 	NFP_XSTAT_MAC("mac.tx_multicast_pkts", TX_MULTICAST_PKTS),
131 	NFP_XSTAT_MAC("mac.tx_pkts_64_octets", TX_PKTS_64_OCTS),
132 	NFP_XSTAT_MAC("mac.tx_pkts_65_to_127_octets", TX_PKTS_65_TO_127_OCTS),
133 	NFP_XSTAT_MAC("mac.tx_pkts_128_to_255_octets", TX_PKTS_128_TO_255_OCTS),
134 	NFP_XSTAT_MAC("mac.tx_pkts_256_to_511_octets", TX_PKTS_256_TO_511_OCTS),
135 	NFP_XSTAT_MAC("mac.tx_pkts_512_to_1023_octets", TX_PKTS_512_TO_1023_OCTS),
136 	NFP_XSTAT_MAC("mac.tx_pkts_1024_to_1518_octets", TX_PKTS_1024_TO_1518_OCTS),
137 	NFP_XSTAT_MAC("mac.tx_pkts_1519_to_max_octets", TX_PKTS_1519_TO_MAX_OCTS),
138 	NFP_XSTAT_MAC("mac.tx_pause_frames_class0", TX_PAUSE_FRAMES_CLASS0),
139 	NFP_XSTAT_MAC("mac.tx_pause_frames_class1", TX_PAUSE_FRAMES_CLASS1),
140 	NFP_XSTAT_MAC("mac.tx_pause_frames_class2", TX_PAUSE_FRAMES_CLASS2),
141 	NFP_XSTAT_MAC("mac.tx_pause_frames_class3", TX_PAUSE_FRAMES_CLASS3),
142 	NFP_XSTAT_MAC("mac.tx_pause_frames_class4", TX_PAUSE_FRAMES_CLASS4),
143 	NFP_XSTAT_MAC("mac.tx_pause_frames_class5", TX_PAUSE_FRAMES_CLASS5),
144 	NFP_XSTAT_MAC("mac.tx_pause_frames_class6", TX_PAUSE_FRAMES_CLASS6),
145 	NFP_XSTAT_MAC("mac.tx_pause_frames_class7", TX_PAUSE_FRAMES_CLASS7),
146 };
147 
148 static const uint32_t nfp_net_link_speed_nfp2rte[] = {
149 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = RTE_ETH_SPEED_NUM_NONE,
150 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]     = RTE_ETH_SPEED_NUM_NONE,
151 	[NFP_NET_CFG_STS_LINK_RATE_1G]          = RTE_ETH_SPEED_NUM_1G,
152 	[NFP_NET_CFG_STS_LINK_RATE_10G]         = RTE_ETH_SPEED_NUM_10G,
153 	[NFP_NET_CFG_STS_LINK_RATE_25G]         = RTE_ETH_SPEED_NUM_25G,
154 	[NFP_NET_CFG_STS_LINK_RATE_40G]         = RTE_ETH_SPEED_NUM_40G,
155 	[NFP_NET_CFG_STS_LINK_RATE_50G]         = RTE_ETH_SPEED_NUM_50G,
156 	[NFP_NET_CFG_STS_LINK_RATE_100G]        = RTE_ETH_SPEED_NUM_100G,
157 };
158 
159 static uint16_t
160 nfp_net_link_speed_rte2nfp(uint16_t speed)
161 {
162 	uint16_t i;
163 
164 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
165 		if (speed == nfp_net_link_speed_nfp2rte[i])
166 			return i;
167 	}
168 
169 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
170 }
171 
172 static void
173 nfp_net_notify_port_speed(struct nfp_net_hw *hw,
174 		struct rte_eth_link *link)
175 {
176 	/*
177 	 * Read the link status from NFP_NET_CFG_STS. If the link is down
178 	 * then write the link speed NFP_NET_CFG_STS_LINK_RATE_UNKNOWN to
179 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
180 	 */
181 	if (link->link_status == RTE_ETH_LINK_DOWN) {
182 		nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
183 				NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
184 		return;
185 	}
186 
187 	/*
188 	 * Link is up so write the link speed from the eth_table to
189 	 * NFP_NET_CFG_STS_NSP_LINK_RATE.
190 	 */
191 	nn_cfg_writew(&hw->super, NFP_NET_CFG_STS_NSP_LINK_RATE,
192 			nfp_net_link_speed_rte2nfp(link->link_speed));
193 }
194 
195 /* The length of firmware version string */
196 #define FW_VER_LEN        32
197 
198 /**
199  * Reconfigure the firmware via the mailbox
200  *
201  * @param net_hw
202  *   Device to reconfigure
203  * @param mbox_cmd
204  *   The value for the mailbox command
205  *
206  * @return
207  *   - (0) if OK to reconfigure by the mailbox.
208  *   - (-EIO) if I/O err and fail to reconfigure by the mailbox
209  */
210 int
211 nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
212 		uint32_t mbox_cmd)
213 {
214 	int ret;
215 	uint32_t mbox;
216 
217 	mbox = net_hw->tlv_caps.mbox_off;
218 
219 	rte_spinlock_lock(&net_hw->super.reconfig_lock);
220 
221 	nn_cfg_writeq(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
222 	nn_cfg_writel(&net_hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
223 
224 	rte_wmb();
225 
226 	ret = nfp_reconfig_real(&net_hw->super, NFP_NET_CFG_UPDATE_MBOX);
227 
228 	rte_spinlock_unlock(&net_hw->super.reconfig_lock);
229 
230 	if (ret != 0) {
231 		PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x",
232 				mbox_cmd, NFP_NET_CFG_UPDATE_MBOX);
233 		return -EIO;
234 	}
235 
236 	return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
237 }
238 
239 struct nfp_net_hw *
240 nfp_net_get_hw(const struct rte_eth_dev *dev)
241 {
242 	struct nfp_net_hw *hw;
243 
244 	if (rte_eth_dev_is_repr(dev)) {
245 		struct nfp_flower_representor *repr;
246 		repr = dev->data->dev_private;
247 		hw = repr->app_fw_flower->pf_hw;
248 	} else {
249 		hw = dev->data->dev_private;
250 	}
251 
252 	return hw;
253 }
254 
255 /*
256  * Configure an Ethernet device.
257  *
258  * This function must be invoked first before any other function in the Ethernet API.
259  * This function can also be re-invoked when a device is in the stopped state.
260  *
261  * A DPDK app sends info about how many queues to use and how  those queues
262  * need to be configured. This is used by the DPDK core and it makes sure no
263  * more queues than those advertised by the driver are requested.
264  * This function is called after that internal process.
265  */
266 int
267 nfp_net_configure(struct rte_eth_dev *dev)
268 {
269 	struct nfp_net_hw *hw;
270 	struct rte_eth_conf *dev_conf;
271 	struct rte_eth_rxmode *rxmode;
272 	struct rte_eth_txmode *txmode;
273 
274 	hw = nfp_net_get_hw(dev);
275 	dev_conf = &dev->data->dev_conf;
276 	rxmode = &dev_conf->rxmode;
277 	txmode = &dev_conf->txmode;
278 
279 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0)
280 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
281 
282 	/* Checking TX mode */
283 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
284 		PMD_DRV_LOG(ERR, "TX mq_mode DCB and VMDq not supported");
285 		return -EINVAL;
286 	}
287 
288 	/* Checking RX mode */
289 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) != 0 &&
290 			(hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
291 		PMD_DRV_LOG(ERR, "RSS not supported");
292 		return -EINVAL;
293 	}
294 
295 	/* Checking MTU set */
296 	if (rxmode->mtu > hw->max_mtu + NFP_ETH_OVERHEAD) {
297 		PMD_DRV_LOG(ERR, "MTU (%u) larger than the maximum possible frame size (%u)",
298 				rxmode->mtu, hw->max_mtu + NFP_ETH_OVERHEAD);
299 		return -ERANGE;
300 	}
301 
302 	return 0;
303 }
304 
305 void
306 nfp_net_log_device_information(const struct nfp_net_hw *hw)
307 {
308 	uint32_t cap = hw->super.cap;
309 	uint32_t cap_ext = hw->super.cap_ext;
310 
311 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
312 			hw->ver.major, hw->ver.minor, hw->max_mtu);
313 
314 	PMD_INIT_LOG(INFO, "CAP: %#x", cap);
315 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
316 			cap & NFP_NET_CFG_CTRL_ENABLE        ? "ENABLE "      : "",
317 			cap & NFP_NET_CFG_CTRL_PROMISC       ? "PROMISC "     : "",
318 			cap & NFP_NET_CFG_CTRL_L2BC          ? "L2BCFILT "    : "",
319 			cap & NFP_NET_CFG_CTRL_L2MC          ? "L2MCFILT "    : "",
320 			cap & NFP_NET_CFG_CTRL_RXCSUM        ? "RXCSUM "      : "",
321 			cap & NFP_NET_CFG_CTRL_TXCSUM        ? "TXCSUM "      : "",
322 			cap & NFP_NET_CFG_CTRL_RXVLAN        ? "RXVLAN "      : "",
323 			cap & NFP_NET_CFG_CTRL_TXVLAN        ? "TXVLAN "      : "",
324 			cap & NFP_NET_CFG_CTRL_SCATTER       ? "SCATTER "     : "",
325 			cap & NFP_NET_CFG_CTRL_GATHER        ? "GATHER "      : "",
326 			cap & NFP_NET_CFG_CTRL_LSO           ? "TSO "         : "",
327 			cap & NFP_NET_CFG_CTRL_RXQINQ        ? "RXQINQ "      : "",
328 			cap & NFP_NET_CFG_CTRL_RXVLAN_V2     ? "RXVLANv2 "    : "",
329 			cap & NFP_NET_CFG_CTRL_RINGCFG       ? "RINGCFG "     : "",
330 			cap & NFP_NET_CFG_CTRL_RSS           ? "RSS "         : "",
331 			cap & NFP_NET_CFG_CTRL_IRQMOD        ? "IRQMOD "      : "",
332 			cap & NFP_NET_CFG_CTRL_RINGPRIO      ? "RINGPRIO "    : "",
333 			cap & NFP_NET_CFG_CTRL_MSIXAUTO      ? "MSIXAUTO "    : "",
334 			cap & NFP_NET_CFG_CTRL_TXRWB         ? "TXRWB "       : "",
335 			cap & NFP_NET_CFG_CTRL_L2SWITCH      ? "L2SWITCH "    : "",
336 			cap & NFP_NET_CFG_CTRL_TXVLAN_V2     ? "TXVLANv2 "    : "",
337 			cap & NFP_NET_CFG_CTRL_VXLAN         ? "VXLAN "       : "",
338 			cap & NFP_NET_CFG_CTRL_NVGRE         ? "NVGRE "       : "",
339 			cap & NFP_NET_CFG_CTRL_MSIX_TX_OFF   ? "MSIX_TX_OFF " : "",
340 			cap & NFP_NET_CFG_CTRL_LSO2          ? "TSOv2 "       : "",
341 			cap & NFP_NET_CFG_CTRL_RSS2          ? "RSSv2 "       : "",
342 			cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ? "CSUM "        : "",
343 			cap & NFP_NET_CFG_CTRL_LIVE_ADDR     ? "LIVE_ADDR "   : "",
344 			cap & NFP_NET_CFG_CTRL_USO           ? "USO"          : "");
345 
346 	PMD_INIT_LOG(INFO, "CAP_WORD1: %#x", cap_ext);
347 	PMD_INIT_LOG(INFO, "%s%s%s%s%s%s%s",
348 			cap_ext & NFP_NET_CFG_CTRL_PKT_TYPE        ? "PKT_TYPE "        : "",
349 			cap_ext & NFP_NET_CFG_CTRL_IPSEC           ? "IPSEC "           : "",
350 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP ? "IPSEC_SM "        : "",
351 			cap_ext & NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP ? "IPSEC_LM "        : "",
352 			cap_ext & NFP_NET_CFG_CTRL_MULTI_PF        ? "MULTI_PF "        : "",
353 			cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER      ? "FLOW_STEER "      : "",
354 			cap_ext & NFP_NET_CFG_CTRL_IN_ORDER        ? "VIRTIO_IN_ORDER " : "");
355 
356 	PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
357 			hw->max_rx_queues, hw->max_tx_queues);
358 }
359 
360 static inline void
361 nfp_net_enable_rxvlan_cap(struct nfp_net_hw *hw,
362 		uint32_t *ctrl)
363 {
364 	if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0)
365 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN_V2;
366 	else if ((hw->super.cap & NFP_NET_CFG_CTRL_RXVLAN) != 0)
367 		*ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
368 }
369 
370 void
371 nfp_net_enable_queues(struct rte_eth_dev *dev)
372 {
373 	struct nfp_net_hw *hw;
374 
375 	hw = nfp_net_get_hw(dev);
376 
377 	nfp_enable_queues(&hw->super, dev->data->nb_rx_queues,
378 			dev->data->nb_tx_queues);
379 }
380 
381 void
382 nfp_net_disable_queues(struct rte_eth_dev *dev)
383 {
384 	struct nfp_net_hw *net_hw;
385 
386 	net_hw = nfp_net_get_hw(dev);
387 
388 	nfp_disable_queues(&net_hw->super);
389 }
390 
391 void
392 nfp_net_params_setup(struct nfp_net_hw *hw)
393 {
394 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, hw->mtu);
395 	nn_cfg_writel(&hw->super, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
396 }
397 
398 void
399 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
400 {
401 	hw->super.qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
402 }
403 
404 int
405 nfp_net_set_mac_addr(struct rte_eth_dev *dev,
406 		struct rte_ether_addr *mac_addr)
407 {
408 	uint32_t update;
409 	uint32_t new_ctrl;
410 	struct nfp_hw *hw;
411 	struct nfp_net_hw *net_hw;
412 
413 	net_hw = nfp_net_get_hw(dev);
414 	hw = &net_hw->super;
415 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
416 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0) {
417 		PMD_DRV_LOG(ERR, "MAC address unable to change when port enabled");
418 		return -EBUSY;
419 	}
420 
421 	if (rte_is_valid_assigned_ether_addr(mac_addr) == 0) {
422 		PMD_DRV_LOG(ERR, "Invalid MAC address");
423 		return -EINVAL;
424 	}
425 
426 	/* Writing new MAC to the specific port BAR address */
427 	nfp_write_mac(hw, (uint8_t *)mac_addr);
428 
429 	update = NFP_NET_CFG_UPDATE_MACADDR;
430 	new_ctrl = hw->ctrl;
431 	if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) != 0 &&
432 			(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) != 0)
433 		new_ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
434 
435 	/* Signal the NIC about the change */
436 	if (nfp_reconfig(hw, new_ctrl, update) != 0) {
437 		PMD_DRV_LOG(ERR, "MAC address update failed");
438 		return -EIO;
439 	}
440 
441 	hw->ctrl = new_ctrl;
442 
443 	return 0;
444 }
445 
446 int
447 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
448 		struct rte_intr_handle *intr_handle)
449 {
450 	uint16_t i;
451 	struct nfp_net_hw *hw;
452 
453 	if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
454 				dev->data->nb_rx_queues) != 0) {
455 		PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues intr_vec",
456 				dev->data->nb_rx_queues);
457 		return -ENOMEM;
458 	}
459 
460 	hw = nfp_net_get_hw(dev);
461 
462 	if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
463 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with UIO");
464 		/* UIO just supports one queue and no LSC */
465 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(0), 0);
466 		if (rte_intr_vec_list_index_set(intr_handle, 0, 0) != 0)
467 			return -1;
468 	} else {
469 		PMD_DRV_LOG(INFO, "VF: enabling RX interrupt with VFIO");
470 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
471 			/*
472 			 * The first msix vector is reserved for non
473 			 * efd interrupts.
474 			 */
475 			nn_cfg_writeb(&hw->super, NFP_NET_CFG_RXR_VEC(i), i + 1);
476 			if (rte_intr_vec_list_index_set(intr_handle, i, i + 1) != 0)
477 				return -1;
478 		}
479 	}
480 
481 	/* Avoiding TX interrupts */
482 	hw->super.ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
483 	return 0;
484 }
485 
486 uint32_t
487 nfp_check_offloads(struct rte_eth_dev *dev)
488 {
489 	uint32_t cap;
490 	uint32_t ctrl = 0;
491 	uint64_t rx_offload;
492 	uint64_t tx_offload;
493 	struct nfp_net_hw *hw;
494 	struct rte_eth_conf *dev_conf;
495 
496 	hw = nfp_net_get_hw(dev);
497 	cap = hw->super.cap;
498 
499 	dev_conf = &dev->data->dev_conf;
500 	rx_offload = dev_conf->rxmode.offloads;
501 	tx_offload = dev_conf->txmode.offloads;
502 
503 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
504 		if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
505 			ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
506 	}
507 
508 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
509 		nfp_net_enable_rxvlan_cap(hw, &ctrl);
510 
511 	if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
512 		if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
513 			ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
514 	}
515 
516 	hw->mtu = dev->data->mtu;
517 
518 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
519 		if ((cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
520 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
521 		else if ((cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
522 			ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
523 	}
524 
525 	/* L2 broadcast */
526 	if ((cap & NFP_NET_CFG_CTRL_L2BC) != 0)
527 		ctrl |= NFP_NET_CFG_CTRL_L2BC;
528 
529 	/* L2 multicast */
530 	if ((cap & NFP_NET_CFG_CTRL_L2MC) != 0)
531 		ctrl |= NFP_NET_CFG_CTRL_L2MC;
532 
533 	/* TX checksum offload */
534 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0 ||
535 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0 ||
536 			(tx_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
537 		ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
538 
539 	/* LSO offload */
540 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
541 			(tx_offload & RTE_ETH_TX_OFFLOAD_UDP_TSO) != 0 ||
542 			(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
543 		if ((cap & NFP_NET_CFG_CTRL_LSO) != 0)
544 			ctrl |= NFP_NET_CFG_CTRL_LSO;
545 		else if ((cap & NFP_NET_CFG_CTRL_LSO2) != 0)
546 			ctrl |= NFP_NET_CFG_CTRL_LSO2;
547 	}
548 
549 	/* RX gather */
550 	if ((tx_offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
551 		ctrl |= NFP_NET_CFG_CTRL_GATHER;
552 
553 	return ctrl;
554 }
555 
556 int
557 nfp_net_promisc_enable(struct rte_eth_dev *dev)
558 {
559 	int ret;
560 	uint32_t update;
561 	uint32_t new_ctrl;
562 	struct nfp_hw *hw;
563 	struct nfp_net_hw *net_hw;
564 
565 	net_hw = nfp_net_get_hw(dev);
566 
567 	hw = &net_hw->super;
568 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
569 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
570 		return -ENOTSUP;
571 	}
572 
573 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) != 0) {
574 		PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
575 		return 0;
576 	}
577 
578 	new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
579 	update = NFP_NET_CFG_UPDATE_GEN;
580 
581 	ret = nfp_reconfig(hw, new_ctrl, update);
582 	if (ret != 0)
583 		return ret;
584 
585 	hw->ctrl = new_ctrl;
586 
587 	return 0;
588 }
589 
590 int
591 nfp_net_promisc_disable(struct rte_eth_dev *dev)
592 {
593 	int ret;
594 	uint32_t update;
595 	uint32_t new_ctrl;
596 	struct nfp_hw *hw;
597 	struct nfp_net_hw *net_hw;
598 
599 	net_hw = nfp_net_get_hw(dev);
600 	hw = &net_hw->super;
601 
602 	if ((hw->cap & NFP_NET_CFG_CTRL_PROMISC) == 0) {
603 		PMD_DRV_LOG(ERR, "Promiscuous mode not supported");
604 		return -ENOTSUP;
605 	}
606 
607 	if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
608 		PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
609 		return 0;
610 	}
611 
612 	new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
613 	update = NFP_NET_CFG_UPDATE_GEN;
614 
615 	ret = nfp_reconfig(hw, new_ctrl, update);
616 	if (ret != 0)
617 		return ret;
618 
619 	hw->ctrl = new_ctrl;
620 
621 	return 0;
622 }
623 
624 static int
625 nfp_net_set_allmulticast_mode(struct rte_eth_dev *dev,
626 		bool enable)
627 {
628 	int ret;
629 	uint32_t update;
630 	struct nfp_hw *hw;
631 	uint32_t cap_extend;
632 	uint32_t ctrl_extend;
633 	uint32_t new_ctrl_extend;
634 	struct nfp_net_hw *net_hw;
635 
636 	net_hw = nfp_net_get_hw(dev);
637 	hw = &net_hw->super;
638 
639 	cap_extend = hw->cap_ext;
640 	if ((cap_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0) {
641 		PMD_DRV_LOG(ERR, "Allmulticast mode not supported");
642 		return -ENOTSUP;
643 	}
644 
645 	/*
646 	 * Allmulticast mode enabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 0.
647 	 * Allmulticast mode disabled when NFP_NET_CFG_CTRL_MCAST_FILTER bit is 1.
648 	 */
649 	ctrl_extend = hw->ctrl_ext;
650 	if (enable) {
651 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) == 0)
652 			return 0;
653 
654 		new_ctrl_extend = ctrl_extend & ~NFP_NET_CFG_CTRL_MCAST_FILTER;
655 	} else {
656 		if ((ctrl_extend & NFP_NET_CFG_CTRL_MCAST_FILTER) != 0)
657 			return 0;
658 
659 		new_ctrl_extend = ctrl_extend | NFP_NET_CFG_CTRL_MCAST_FILTER;
660 	}
661 
662 	update = NFP_NET_CFG_UPDATE_GEN;
663 
664 	ret = nfp_ext_reconfig(hw, new_ctrl_extend, update);
665 	if (ret != 0)
666 		return ret;
667 
668 	hw->ctrl_ext = new_ctrl_extend;
669 	return 0;
670 }
671 
672 int
673 nfp_net_allmulticast_enable(struct rte_eth_dev *dev)
674 {
675 	return nfp_net_set_allmulticast_mode(dev, true);
676 }
677 
678 int
679 nfp_net_allmulticast_disable(struct rte_eth_dev *dev)
680 {
681 	return nfp_net_set_allmulticast_mode(dev, false);
682 }
683 
684 static int
685 nfp_net_speed_aneg_update(struct rte_eth_dev *dev,
686 		struct nfp_net_hw *hw,
687 		struct rte_eth_link *link)
688 {
689 	uint32_t i;
690 	uint32_t speed;
691 	struct nfp_eth_table *nfp_eth_table;
692 	struct nfp_eth_table_port *eth_port;
693 
694 	/* Compare whether the current status has changed. */
695 	if (dev->data->dev_link.link_status != link->link_status) {
696 		nfp_eth_table = nfp_eth_read_ports(hw->cpp);
697 		if (nfp_eth_table == NULL) {
698 			PMD_DRV_LOG(DEBUG, "Error reading NFP ethernet table.");
699 			return -EIO;
700 		}
701 
702 		hw->pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
703 		free(nfp_eth_table);
704 	}
705 
706 	nfp_eth_table = hw->pf_dev->nfp_eth_table;
707 	eth_port = &nfp_eth_table->ports[hw->idx];
708 	speed = eth_port->speed;
709 
710 	for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) {
711 		if (nfp_net_link_speed_nfp2rte[i] == speed) {
712 			link->link_speed = speed;
713 			break;
714 		}
715 	}
716 
717 	if (dev->data->dev_conf.link_speeds == RTE_ETH_LINK_SPEED_AUTONEG &&
718 			eth_port->supp_aneg)
719 		link->link_autoneg = RTE_ETH_LINK_AUTONEG;
720 
721 	return 0;
722 }
723 
724 int
725 nfp_net_link_update_common(struct rte_eth_dev *dev,
726 		struct nfp_net_hw *hw,
727 		struct rte_eth_link *link,
728 		uint32_t link_status)
729 {
730 	int ret;
731 	uint32_t nn_link_status;
732 
733 	if (link->link_status == RTE_ETH_LINK_UP) {
734 		if (hw->pf_dev != NULL) {
735 			ret = nfp_net_speed_aneg_update(dev, hw, link);
736 			if (ret != 0) {
737 				PMD_DRV_LOG(DEBUG, "Failed to update speed and aneg.");
738 				return ret;
739 			}
740 		} else {
741 			/*
742 			 * Shift and mask nn_link_status so that it is effectively the value
743 			 * at offset NFP_NET_CFG_STS_NSP_LINK_RATE.
744 			 */
745 			nn_link_status = (link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
746 					NFP_NET_CFG_STS_LINK_RATE_MASK;
747 			if (nn_link_status < RTE_DIM(nfp_net_link_speed_nfp2rte))
748 				link->link_speed = nfp_net_link_speed_nfp2rte[nn_link_status];
749 		}
750 	}
751 
752 	ret = rte_eth_linkstatus_set(dev, link);
753 	if (ret == 0) {
754 		if (link->link_status != 0)
755 			PMD_DRV_LOG(INFO, "NIC Link is Up");
756 		else
757 			PMD_DRV_LOG(INFO, "NIC Link is Down");
758 	}
759 
760 	return ret;
761 }
762 
763 /*
764  * Return 0 means link status changed, -1 means not changed
765  *
766  * Wait to complete is needed as it can take up to 9 seconds to get the Link
767  * status.
768  */
769 int
770 nfp_net_link_update(struct rte_eth_dev *dev,
771 		__rte_unused int wait_to_complete)
772 {
773 	int ret;
774 	struct nfp_net_hw *hw;
775 	uint32_t nn_link_status;
776 	struct rte_eth_link link;
777 
778 	hw = nfp_net_get_hw(dev);
779 
780 	memset(&link, 0, sizeof(struct rte_eth_link));
781 
782 	/* Read link status */
783 	nn_link_status = nn_cfg_readw(&hw->super, NFP_NET_CFG_STS);
784 	if ((nn_link_status & NFP_NET_CFG_STS_LINK) != 0)
785 		link.link_status = RTE_ETH_LINK_UP;
786 
787 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
788 
789 	ret = nfp_net_link_update_common(dev, hw, &link, nn_link_status);
790 	if (ret == -EIO)
791 		return ret;
792 
793 	/*
794 	 * Notify the port to update the speed value in the CTRL BAR from NSP.
795 	 * Not applicable for VFs as the associated PF is still attached to the
796 	 * kernel driver.
797 	 */
798 	if (hw->pf_dev != NULL)
799 		nfp_net_notify_port_speed(hw, &link);
800 
801 	return ret;
802 }
803 
804 int
805 nfp_net_stats_get(struct rte_eth_dev *dev,
806 		struct rte_eth_stats *stats)
807 {
808 	uint16_t i;
809 	struct nfp_net_hw *hw;
810 	struct rte_eth_stats nfp_dev_stats;
811 
812 	if (stats == NULL)
813 		return -EINVAL;
814 
815 	hw = nfp_net_get_hw(dev);
816 
817 	memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
818 
819 	/* Reading per RX ring stats */
820 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
821 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
822 			break;
823 
824 		nfp_dev_stats.q_ipackets[i] =
825 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
826 		nfp_dev_stats.q_ipackets[i] -=
827 				hw->eth_stats_base.q_ipackets[i];
828 
829 		nfp_dev_stats.q_ibytes[i] =
830 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
831 		nfp_dev_stats.q_ibytes[i] -=
832 				hw->eth_stats_base.q_ibytes[i];
833 	}
834 
835 	/* Reading per TX ring stats */
836 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
837 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
838 			break;
839 
840 		nfp_dev_stats.q_opackets[i] =
841 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
842 		nfp_dev_stats.q_opackets[i] -= hw->eth_stats_base.q_opackets[i];
843 
844 		nfp_dev_stats.q_obytes[i] =
845 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
846 		nfp_dev_stats.q_obytes[i] -= hw->eth_stats_base.q_obytes[i];
847 	}
848 
849 	nfp_dev_stats.ipackets = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
850 	nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
851 
852 	nfp_dev_stats.ibytes = nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
853 	nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
854 
855 	nfp_dev_stats.opackets =
856 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
857 	nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
858 
859 	nfp_dev_stats.obytes =
860 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
861 	nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
862 
863 	/* Reading general device stats */
864 	nfp_dev_stats.ierrors =
865 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
866 	nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
867 
868 	nfp_dev_stats.oerrors =
869 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
870 	nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
871 
872 	/* RX ring mbuf allocation failures */
873 	nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
874 
875 	nfp_dev_stats.imissed =
876 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
877 	nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
878 
879 	memcpy(stats, &nfp_dev_stats, sizeof(*stats));
880 	return 0;
881 }
882 
883 /*
884  * hw->eth_stats_base records the per counter starting point.
885  * Lets update it now.
886  */
887 int
888 nfp_net_stats_reset(struct rte_eth_dev *dev)
889 {
890 	uint16_t i;
891 	struct nfp_net_hw *hw;
892 
893 	hw = nfp_net_get_hw(dev);
894 
895 	/* Reading per RX ring stats */
896 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
897 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
898 			break;
899 
900 		hw->eth_stats_base.q_ipackets[i] =
901 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i));
902 
903 		hw->eth_stats_base.q_ibytes[i] =
904 				nn_cfg_readq(&hw->super, NFP_NET_CFG_RXR_STATS(i) + 0x8);
905 	}
906 
907 	/* Reading per TX ring stats */
908 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
909 		if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
910 			break;
911 
912 		hw->eth_stats_base.q_opackets[i] =
913 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i));
914 
915 		hw->eth_stats_base.q_obytes[i] =
916 				nn_cfg_readq(&hw->super, NFP_NET_CFG_TXR_STATS(i) + 0x8);
917 	}
918 
919 	hw->eth_stats_base.ipackets =
920 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_FRAMES);
921 
922 	hw->eth_stats_base.ibytes =
923 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_OCTETS);
924 
925 	hw->eth_stats_base.opackets =
926 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_FRAMES);
927 
928 	hw->eth_stats_base.obytes =
929 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_OCTETS);
930 
931 	/* Reading general device stats */
932 	hw->eth_stats_base.ierrors =
933 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_ERRORS);
934 
935 	hw->eth_stats_base.oerrors =
936 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_TX_ERRORS);
937 
938 	/* RX ring mbuf allocation failures */
939 	dev->data->rx_mbuf_alloc_failed = 0;
940 
941 	hw->eth_stats_base.imissed =
942 			nn_cfg_readq(&hw->super, NFP_NET_CFG_STATS_RX_DISCARDS);
943 
944 	return 0;
945 }
946 
947 uint32_t
948 nfp_net_xstats_size(const struct rte_eth_dev *dev)
949 {
950 	uint32_t count;
951 	struct nfp_net_hw *hw;
952 	const uint32_t size = RTE_DIM(nfp_net_xstats);
953 
954 	/* If the device is a VF, then there will be no MAC stats */
955 	hw = nfp_net_get_hw(dev);
956 	if (hw->mac_stats == NULL) {
957 		for (count = 0; count < size; count++) {
958 			if (nfp_net_xstats[count].group == NFP_XSTAT_GROUP_MAC)
959 				break;
960 		}
961 
962 		return count;
963 	}
964 
965 	return size;
966 }
967 
968 static const struct nfp_xstat *
969 nfp_net_xstats_info(const struct rte_eth_dev *dev,
970 		uint32_t index)
971 {
972 	if (index >= nfp_net_xstats_size(dev)) {
973 		PMD_DRV_LOG(ERR, "xstat index out of bounds");
974 		return NULL;
975 	}
976 
977 	return &nfp_net_xstats[index];
978 }
979 
980 static uint64_t
981 nfp_net_xstats_value(const struct rte_eth_dev *dev,
982 		uint32_t index,
983 		bool raw)
984 {
985 	uint64_t value;
986 	struct nfp_net_hw *hw;
987 	struct nfp_xstat xstat;
988 
989 	hw = nfp_net_get_hw(dev);
990 	xstat = nfp_net_xstats[index];
991 
992 	if (xstat.group == NFP_XSTAT_GROUP_MAC)
993 		value = nn_readq(hw->mac_stats + xstat.offset);
994 	else
995 		value = nn_cfg_readq(&hw->super, xstat.offset);
996 
997 	if (raw)
998 		return value;
999 
1000 	/*
1001 	 * A baseline value of each statistic counter is recorded when stats are "reset".
1002 	 * Thus, the value returned by this function need to be decremented by this
1003 	 * baseline value. The result is the count of this statistic since the last time
1004 	 * it was "reset".
1005 	 */
1006 	return value - hw->eth_xstats_base[index].value;
1007 }
1008 
1009 /* NOTE: All callers ensure dev is always set. */
1010 int
1011 nfp_net_xstats_get_names(struct rte_eth_dev *dev,
1012 		struct rte_eth_xstat_name *xstats_names,
1013 		unsigned int size)
1014 {
1015 	uint32_t id;
1016 	uint32_t nfp_size;
1017 	uint32_t read_size;
1018 
1019 	nfp_size = nfp_net_xstats_size(dev);
1020 
1021 	if (xstats_names == NULL)
1022 		return nfp_size;
1023 
1024 	/* Read at most NFP xstats number of names. */
1025 	read_size = RTE_MIN(size, nfp_size);
1026 
1027 	for (id = 0; id < read_size; id++)
1028 		rte_strlcpy(xstats_names[id].name, nfp_net_xstats[id].name,
1029 				RTE_ETH_XSTATS_NAME_SIZE);
1030 
1031 	return read_size;
1032 }
1033 
1034 /* NOTE: All callers ensure dev is always set. */
1035 int
1036 nfp_net_xstats_get(struct rte_eth_dev *dev,
1037 		struct rte_eth_xstat *xstats,
1038 		unsigned int n)
1039 {
1040 	uint32_t id;
1041 	uint32_t nfp_size;
1042 	uint32_t read_size;
1043 
1044 	nfp_size = nfp_net_xstats_size(dev);
1045 
1046 	if (xstats == NULL)
1047 		return nfp_size;
1048 
1049 	/* Read at most NFP xstats number of values. */
1050 	read_size = RTE_MIN(n, nfp_size);
1051 
1052 	for (id = 0; id < read_size; id++) {
1053 		xstats[id].id = id;
1054 		xstats[id].value = nfp_net_xstats_value(dev, id, false);
1055 	}
1056 
1057 	return read_size;
1058 }
1059 
1060 /*
1061  * NOTE: The only caller rte_eth_xstats_get_names_by_id() ensures dev,
1062  * ids, xstats_names and size are valid, and non-NULL.
1063  */
1064 int
1065 nfp_net_xstats_get_names_by_id(struct rte_eth_dev *dev,
1066 		const uint64_t *ids,
1067 		struct rte_eth_xstat_name *xstats_names,
1068 		unsigned int size)
1069 {
1070 	uint32_t i;
1071 	uint32_t read_size;
1072 
1073 	/* Read at most NFP xstats number of names. */
1074 	read_size = RTE_MIN(size, nfp_net_xstats_size(dev));
1075 
1076 	for (i = 0; i < read_size; i++) {
1077 		const struct nfp_xstat *xstat;
1078 
1079 		/* Make sure ID is valid for device. */
1080 		xstat = nfp_net_xstats_info(dev, ids[i]);
1081 		if (xstat == NULL)
1082 			return -EINVAL;
1083 
1084 		rte_strlcpy(xstats_names[i].name, xstat->name,
1085 				RTE_ETH_XSTATS_NAME_SIZE);
1086 	}
1087 
1088 	return read_size;
1089 }
1090 
1091 /*
1092  * NOTE: The only caller rte_eth_xstats_get_by_id() ensures dev,
1093  * ids, values and n are valid, and non-NULL.
1094  */
1095 int
1096 nfp_net_xstats_get_by_id(struct rte_eth_dev *dev,
1097 		const uint64_t *ids,
1098 		uint64_t *values,
1099 		unsigned int n)
1100 {
1101 	uint32_t i;
1102 	uint32_t read_size;
1103 
1104 	/* Read at most NFP xstats number of values. */
1105 	read_size = RTE_MIN(n, nfp_net_xstats_size(dev));
1106 
1107 	for (i = 0; i < read_size; i++) {
1108 		const struct nfp_xstat *xstat;
1109 
1110 		/* Make sure index is valid for device. */
1111 		xstat = nfp_net_xstats_info(dev, ids[i]);
1112 		if (xstat == NULL)
1113 			return -EINVAL;
1114 
1115 		values[i] = nfp_net_xstats_value(dev, ids[i], false);
1116 	}
1117 
1118 	return read_size;
1119 }
1120 
1121 int
1122 nfp_net_xstats_reset(struct rte_eth_dev *dev)
1123 {
1124 	uint32_t id;
1125 	uint32_t read_size;
1126 	struct nfp_net_hw *hw;
1127 
1128 	hw = nfp_net_get_hw(dev);
1129 	read_size = nfp_net_xstats_size(dev);
1130 
1131 	for (id = 0; id < read_size; id++) {
1132 		hw->eth_xstats_base[id].id = id;
1133 		hw->eth_xstats_base[id].value = nfp_net_xstats_value(dev, id, true);
1134 	}
1135 
1136 	/* Successfully reset xstats, now call function to reset basic stats. */
1137 	return nfp_net_stats_reset(dev);
1138 }
1139 
1140 void
1141 nfp_net_rx_desc_limits(struct nfp_net_hw *hw,
1142 		uint16_t *min_rx_desc,
1143 		uint16_t *max_rx_desc)
1144 {
1145 	*max_rx_desc = hw->dev_info->max_qc_size;
1146 	*min_rx_desc = hw->dev_info->min_qc_size;
1147 }
1148 
1149 void
1150 nfp_net_tx_desc_limits(struct nfp_net_hw *hw,
1151 		uint16_t *min_tx_desc,
1152 		uint16_t *max_tx_desc)
1153 {
1154 	uint16_t tx_dpp;
1155 
1156 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
1157 		tx_dpp = NFD3_TX_DESC_PER_PKT;
1158 	else
1159 		tx_dpp = NFDK_TX_DESC_PER_SIMPLE_PKT;
1160 
1161 	*max_tx_desc = hw->dev_info->max_qc_size / tx_dpp;
1162 	*min_tx_desc = hw->dev_info->min_qc_size / tx_dpp;
1163 }
1164 
1165 int
1166 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1167 {
1168 	uint32_t cap;
1169 	uint32_t cap_extend;
1170 	uint16_t min_rx_desc;
1171 	uint16_t max_rx_desc;
1172 	uint16_t min_tx_desc;
1173 	uint16_t max_tx_desc;
1174 	struct nfp_net_hw *hw;
1175 
1176 	hw = nfp_net_get_hw(dev);
1177 
1178 	nfp_net_rx_desc_limits(hw, &min_rx_desc, &max_rx_desc);
1179 	nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc);
1180 
1181 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1182 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1183 	dev_info->min_rx_bufsize = RTE_ETHER_MIN_MTU;
1184 	/*
1185 	 * The maximum rx packet length is set to the maximum layer 3 MTU,
1186 	 * plus layer 2, CRC and VLAN headers.
1187 	 * The maximum layer 3 MTU (max_mtu) is read from hardware,
1188 	 * which was set by the firmware loaded onto the card.
1189 	 */
1190 	dev_info->max_rx_pktlen = hw->max_mtu + NFP_ETH_OVERHEAD;
1191 	dev_info->max_mtu = hw->max_mtu;
1192 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1193 	/* Next should change when PF support is implemented */
1194 	dev_info->max_mac_addrs = 1;
1195 
1196 	cap = hw->super.cap;
1197 
1198 	if ((cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
1199 		dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1200 
1201 	if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
1202 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
1203 
1204 	if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
1205 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1206 				RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1207 				RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
1208 
1209 	if ((cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
1210 		dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
1211 
1212 	if ((cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
1213 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1214 				RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1215 				RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1216 
1217 	if ((cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
1218 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1219 		if ((cap & NFP_NET_CFG_CTRL_USO) != 0)
1220 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_UDP_TSO;
1221 		if ((cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
1222 			dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
1223 	}
1224 
1225 	if ((cap & NFP_NET_CFG_CTRL_GATHER) != 0)
1226 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1227 
1228 	cap_extend = hw->super.cap_ext;
1229 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0) {
1230 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
1231 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
1232 	}
1233 
1234 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1235 		.rx_thresh = {
1236 			.pthresh = DEFAULT_RX_PTHRESH,
1237 			.hthresh = DEFAULT_RX_HTHRESH,
1238 			.wthresh = DEFAULT_RX_WTHRESH,
1239 		},
1240 		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1241 		.rx_drop_en = 0,
1242 	};
1243 
1244 	dev_info->default_txconf = (struct rte_eth_txconf) {
1245 		.tx_thresh = {
1246 			.pthresh = DEFAULT_TX_PTHRESH,
1247 			.hthresh = DEFAULT_TX_HTHRESH,
1248 			.wthresh = DEFAULT_TX_WTHRESH,
1249 		},
1250 		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1251 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1252 	};
1253 
1254 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1255 		.nb_max = max_rx_desc,
1256 		.nb_min = min_rx_desc,
1257 		.nb_align = NFP_ALIGN_RING_DESC,
1258 	};
1259 
1260 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1261 		.nb_max = max_tx_desc,
1262 		.nb_min = min_tx_desc,
1263 		.nb_align = NFP_ALIGN_RING_DESC,
1264 		.nb_seg_max = NFP_TX_MAX_SEG,
1265 		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
1266 	};
1267 
1268 	if ((cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
1269 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
1270 		dev_info->flow_type_rss_offloads = NFP_NET_RSS_CAP;
1271 		dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1272 		dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1273 	}
1274 
1275 	/* Only PF supports getting speed capability. */
1276 	if (hw->pf_dev != NULL)
1277 		dev_info->speed_capa = hw->pf_dev->speed_capa;
1278 
1279 	return 0;
1280 }
1281 
1282 int
1283 nfp_net_common_init(struct rte_pci_device *pci_dev,
1284 		struct nfp_net_hw *hw)
1285 {
1286 	const int stride = 4;
1287 
1288 	hw->device_id = pci_dev->id.device_id;
1289 	hw->vendor_id = pci_dev->id.vendor_id;
1290 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1291 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1292 
1293 	hw->max_rx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_RXRINGS);
1294 	hw->max_tx_queues = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_TXRINGS);
1295 	if (hw->max_rx_queues == 0 || hw->max_tx_queues == 0) {
1296 		PMD_INIT_LOG(ERR, "Device %s can not be used, there are no valid queue "
1297 				"pairs for use", pci_dev->name);
1298 		return -ENODEV;
1299 	}
1300 
1301 	nfp_net_cfg_read_version(hw);
1302 	if (!nfp_net_is_valid_nfd_version(hw->ver))
1303 		return -EINVAL;
1304 
1305 	if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0)
1306 		return -ENODEV;
1307 
1308 	/* Get some of the read-only fields from the config BAR */
1309 	hw->super.cap = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP);
1310 	hw->super.cap_ext = nn_cfg_readl(&hw->super, NFP_NET_CFG_CAP_WORD1);
1311 	hw->max_mtu = nn_cfg_readl(&hw->super, NFP_NET_CFG_MAX_MTU);
1312 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
1313 
1314 	nfp_net_init_metadata_format(hw);
1315 
1316 	/* Read the Rx offset configured from firmware */
1317 	if (hw->ver.major < 2)
1318 		hw->rx_offset = NFP_NET_RX_OFFSET;
1319 	else
1320 		hw->rx_offset = nn_cfg_readl(&hw->super, NFP_NET_CFG_RX_OFFSET);
1321 
1322 	hw->super.ctrl = 0;
1323 	hw->stride_rx = stride;
1324 	hw->stride_tx = stride;
1325 
1326 	return 0;
1327 }
1328 
1329 const uint32_t *
1330 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
1331 {
1332 	struct nfp_net_hw *net_hw;
1333 	static const uint32_t ptypes[] = {
1334 		RTE_PTYPE_L2_ETHER,
1335 		RTE_PTYPE_L3_IPV4,
1336 		RTE_PTYPE_L3_IPV4_EXT,
1337 		RTE_PTYPE_L3_IPV6,
1338 		RTE_PTYPE_L3_IPV6_EXT,
1339 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1340 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1341 		RTE_PTYPE_L4_TCP,
1342 		RTE_PTYPE_L4_UDP,
1343 		RTE_PTYPE_L4_FRAG,
1344 		RTE_PTYPE_L4_NONFRAG,
1345 		RTE_PTYPE_L4_ICMP,
1346 		RTE_PTYPE_L4_SCTP,
1347 		RTE_PTYPE_TUNNEL_VXLAN,
1348 		RTE_PTYPE_TUNNEL_NVGRE,
1349 		RTE_PTYPE_TUNNEL_GENEVE,
1350 		RTE_PTYPE_INNER_L2_ETHER,
1351 		RTE_PTYPE_INNER_L3_IPV4,
1352 		RTE_PTYPE_INNER_L3_IPV4_EXT,
1353 		RTE_PTYPE_INNER_L3_IPV6,
1354 		RTE_PTYPE_INNER_L3_IPV6_EXT,
1355 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1356 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1357 		RTE_PTYPE_INNER_L4_TCP,
1358 		RTE_PTYPE_INNER_L4_UDP,
1359 		RTE_PTYPE_INNER_L4_FRAG,
1360 		RTE_PTYPE_INNER_L4_NONFRAG,
1361 		RTE_PTYPE_INNER_L4_ICMP,
1362 		RTE_PTYPE_INNER_L4_SCTP,
1363 	};
1364 
1365 	if (dev->rx_pkt_burst != nfp_net_recv_pkts)
1366 		return NULL;
1367 
1368 	net_hw = dev->data->dev_private;
1369 	if ((net_hw->super.ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
1370 		return NULL;
1371 
1372 	*no_of_elements = RTE_DIM(ptypes);
1373 	return ptypes;
1374 }
1375 
1376 int
1377 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev,
1378 		uint16_t queue_id)
1379 {
1380 	uint16_t base = 0;
1381 	struct nfp_net_hw *hw;
1382 	struct rte_pci_device *pci_dev;
1383 
1384 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1385 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1386 		base = 1;
1387 
1388 	/* Make sure all updates are written before un-masking */
1389 	rte_wmb();
1390 
1391 	hw = nfp_net_get_hw(dev);
1392 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id),
1393 			NFP_NET_CFG_ICR_UNMASKED);
1394 	return 0;
1395 }
1396 
1397 int
1398 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev,
1399 		uint16_t queue_id)
1400 {
1401 	uint16_t base = 0;
1402 	struct nfp_net_hw *hw;
1403 	struct rte_pci_device *pci_dev;
1404 
1405 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1406 	if (rte_intr_type_get(pci_dev->intr_handle) != RTE_INTR_HANDLE_UIO)
1407 		base = 1;
1408 
1409 	/* Make sure all updates are written before un-masking */
1410 	rte_wmb();
1411 
1412 	hw = nfp_net_get_hw(dev);
1413 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(base + queue_id), NFP_NET_CFG_ICR_RXTX);
1414 
1415 	return 0;
1416 }
1417 
1418 static void
1419 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1420 {
1421 	struct rte_eth_link link;
1422 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1423 
1424 	rte_eth_linkstatus_get(dev, &link);
1425 	if (link.link_status != 0)
1426 		PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1427 				dev->data->port_id, link.link_speed,
1428 				link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
1429 				"full-duplex" : "half-duplex");
1430 	else
1431 		PMD_DRV_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
1432 
1433 	PMD_DRV_LOG(INFO, "PCI Address: " PCI_PRI_FMT,
1434 			pci_dev->addr.domain, pci_dev->addr.bus,
1435 			pci_dev->addr.devid, pci_dev->addr.function);
1436 }
1437 
1438 /*
1439  * Unmask an interrupt
1440  *
1441  * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1442  * clear the ICR for the entry.
1443  */
1444 void
1445 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1446 {
1447 	struct nfp_net_hw *hw;
1448 	struct rte_pci_device *pci_dev;
1449 
1450 	hw = nfp_net_get_hw(dev);
1451 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1452 
1453 	/* Make sure all updates are written before un-masking */
1454 	rte_wmb();
1455 
1456 	if ((hw->super.ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) != 0) {
1457 		/* If MSI-X auto-masking is used, clear the entry */
1458 		rte_intr_ack(pci_dev->intr_handle);
1459 	} else {
1460 		nn_cfg_writeb(&hw->super, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1461 				NFP_NET_CFG_ICR_UNMASKED);
1462 	}
1463 }
1464 
1465 /**
1466  * Interrupt handler which shall be registered for alarm callback for delayed
1467  * handling specific interrupt to wait for the stable nic state. As the NIC
1468  * interrupt state is not stable for nfp after link is just down, it needs
1469  * to wait 4 seconds to get the stable status.
1470  *
1471  * @param param
1472  *   The address of parameter (struct rte_eth_dev *)
1473  */
1474 void
1475 nfp_net_dev_interrupt_delayed_handler(void *param)
1476 {
1477 	struct rte_eth_dev *dev = param;
1478 
1479 	nfp_net_link_update(dev, 0);
1480 	rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1481 
1482 	nfp_net_dev_link_status_print(dev);
1483 
1484 	/* Unmasking */
1485 	nfp_net_irq_unmask(dev);
1486 }
1487 
1488 void
1489 nfp_net_dev_interrupt_handler(void *param)
1490 {
1491 	int64_t timeout;
1492 	struct rte_eth_link link;
1493 	struct rte_eth_dev *dev = param;
1494 
1495 	PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1496 
1497 	rte_eth_linkstatus_get(dev, &link);
1498 
1499 	nfp_net_link_update(dev, 0);
1500 
1501 	/* Likely to up */
1502 	if (link.link_status == 0) {
1503 		/* Handle it 1 sec later, wait it being stable */
1504 		timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1505 	} else {  /* Likely to down */
1506 		/* Handle it 4 sec later, wait it being stable */
1507 		timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1508 	}
1509 
1510 	if (rte_eal_alarm_set(timeout * 1000,
1511 			nfp_net_dev_interrupt_delayed_handler,
1512 			(void *)dev) != 0) {
1513 		PMD_INIT_LOG(ERR, "Error setting alarm");
1514 		/* Unmasking */
1515 		nfp_net_irq_unmask(dev);
1516 	}
1517 }
1518 
1519 int
1520 nfp_net_dev_mtu_set(struct rte_eth_dev *dev,
1521 		uint16_t mtu)
1522 {
1523 	struct nfp_net_hw *hw;
1524 
1525 	hw = nfp_net_get_hw(dev);
1526 
1527 	/* MTU setting is forbidden if port is started */
1528 	if (dev->data->dev_started) {
1529 		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1530 				dev->data->port_id);
1531 		return -EBUSY;
1532 	}
1533 
1534 	/* MTU larger than current mbufsize not supported */
1535 	if (mtu > hw->flbufsz) {
1536 		PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) not supported",
1537 				mtu, hw->flbufsz);
1538 		return -ERANGE;
1539 	}
1540 
1541 	/* Writing to configuration space */
1542 	nn_cfg_writel(&hw->super, NFP_NET_CFG_MTU, mtu);
1543 
1544 	hw->mtu = mtu;
1545 
1546 	return 0;
1547 }
1548 
1549 int
1550 nfp_net_vlan_offload_set(struct rte_eth_dev *dev,
1551 		int mask)
1552 {
1553 	int ret;
1554 	uint32_t update;
1555 	uint32_t new_ctrl;
1556 	struct nfp_hw *hw;
1557 	uint64_t rx_offload;
1558 	struct nfp_net_hw *net_hw;
1559 	uint32_t rxvlan_ctrl = 0;
1560 
1561 	net_hw = nfp_net_get_hw(dev);
1562 	hw = &net_hw->super;
1563 	rx_offload = dev->data->dev_conf.rxmode.offloads;
1564 	new_ctrl = hw->ctrl;
1565 
1566 	/* VLAN stripping setting */
1567 	if ((mask & RTE_ETH_VLAN_STRIP_MASK) != 0) {
1568 		nfp_net_enable_rxvlan_cap(net_hw, &rxvlan_ctrl);
1569 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) != 0)
1570 			new_ctrl |= rxvlan_ctrl;
1571 		else
1572 			new_ctrl &= ~rxvlan_ctrl;
1573 	}
1574 
1575 	/* QinQ stripping setting */
1576 	if ((mask & RTE_ETH_QINQ_STRIP_MASK) != 0) {
1577 		if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0)
1578 			new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1579 		else
1580 			new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1581 	}
1582 
1583 	if (new_ctrl == hw->ctrl)
1584 		return 0;
1585 
1586 	update = NFP_NET_CFG_UPDATE_GEN;
1587 
1588 	ret = nfp_reconfig(hw, new_ctrl, update);
1589 	if (ret != 0)
1590 		return ret;
1591 
1592 	hw->ctrl = new_ctrl;
1593 
1594 	return 0;
1595 }
1596 
1597 static int
1598 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
1599 		struct rte_eth_rss_reta_entry64 *reta_conf,
1600 		uint16_t reta_size)
1601 {
1602 	uint16_t i;
1603 	uint16_t j;
1604 	uint16_t idx;
1605 	uint8_t mask;
1606 	uint32_t reta;
1607 	uint16_t shift;
1608 	struct nfp_hw *hw;
1609 	struct nfp_net_hw *net_hw;
1610 
1611 	net_hw = nfp_net_get_hw(dev);
1612 	hw = &net_hw->super;
1613 
1614 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1615 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
1616 				" doesn't match hardware can supported (%d)",
1617 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1618 		return -EINVAL;
1619 	}
1620 
1621 	/*
1622 	 * Update Redirection Table. There are 128 8bit-entries which can be
1623 	 * manage as 32 32bit-entries.
1624 	 */
1625 	for (i = 0; i < reta_size; i += 4) {
1626 		/* Handling 4 RSS entries per loop */
1627 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1628 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1629 		mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
1630 		if (mask == 0)
1631 			continue;
1632 
1633 		reta = 0;
1634 
1635 		/* If all 4 entries were set, don't need read RETA register */
1636 		if (mask != 0xF)
1637 			reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
1638 
1639 		for (j = 0; j < 4; j++) {
1640 			if ((mask & (0x1 << j)) == 0)
1641 				continue;
1642 
1643 			/* Clearing the entry bits */
1644 			if (mask != 0xF)
1645 				reta &= ~(0xFF << (8 * j));
1646 
1647 			reta |= reta_conf[idx].reta[shift + j] << (8 * j);
1648 		}
1649 
1650 		nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
1651 	}
1652 
1653 	return 0;
1654 }
1655 
1656 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
1657 int
1658 nfp_net_reta_update(struct rte_eth_dev *dev,
1659 		struct rte_eth_rss_reta_entry64 *reta_conf,
1660 		uint16_t reta_size)
1661 {
1662 	int ret;
1663 	uint32_t update;
1664 	struct nfp_hw *hw;
1665 	struct nfp_net_hw *net_hw;
1666 
1667 	net_hw = nfp_net_get_hw(dev);
1668 	hw = &net_hw->super;
1669 
1670 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1671 		return -EINVAL;
1672 
1673 	ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
1674 	if (ret != 0)
1675 		return ret;
1676 
1677 	update = NFP_NET_CFG_UPDATE_RSS;
1678 
1679 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1680 		return -EIO;
1681 
1682 	return 0;
1683 }
1684 
1685 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
1686 int
1687 nfp_net_reta_query(struct rte_eth_dev *dev,
1688 		struct rte_eth_rss_reta_entry64 *reta_conf,
1689 		uint16_t reta_size)
1690 {
1691 	uint16_t i;
1692 	uint16_t j;
1693 	uint16_t idx;
1694 	uint8_t mask;
1695 	uint32_t reta;
1696 	uint16_t shift;
1697 	struct nfp_hw *hw;
1698 	struct nfp_net_hw *net_hw;
1699 
1700 	net_hw = nfp_net_get_hw(dev);
1701 	hw = &net_hw->super;
1702 
1703 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1704 		return -EINVAL;
1705 
1706 	if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
1707 		PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%d)"
1708 				" doesn't match hardware can supported (%d)",
1709 				reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
1710 		return -EINVAL;
1711 	}
1712 
1713 	/*
1714 	 * Reading Redirection Table. There are 128 8bit-entries which can be
1715 	 * manage as 32 32bit-entries.
1716 	 */
1717 	for (i = 0; i < reta_size; i += 4) {
1718 		/* Handling 4 RSS entries per loop */
1719 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1720 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
1721 		mask = (reta_conf[idx].mask >> shift) & 0xF;
1722 
1723 		if (mask == 0)
1724 			continue;
1725 
1726 		reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
1727 		for (j = 0; j < 4; j++) {
1728 			if ((mask & (0x1 << j)) == 0)
1729 				continue;
1730 
1731 			reta_conf[idx].reta[shift + j] =
1732 					(uint8_t)((reta >> (8 * j)) & 0xF);
1733 		}
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 static int
1740 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
1741 		struct rte_eth_rss_conf *rss_conf)
1742 {
1743 	uint8_t i;
1744 	uint8_t key;
1745 	uint64_t rss_hf;
1746 	struct nfp_hw *hw;
1747 	struct nfp_net_hw *net_hw;
1748 	uint32_t cfg_rss_ctrl = 0;
1749 
1750 	net_hw = nfp_net_get_hw(dev);
1751 	hw = &net_hw->super;
1752 
1753 	/* Writing the key byte by byte */
1754 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1755 		memcpy(&key, &rss_conf->rss_key[i], 1);
1756 		nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
1757 	}
1758 
1759 	rss_hf = rss_conf->rss_hf;
1760 
1761 	if ((rss_hf & RTE_ETH_RSS_IPV4) != 0)
1762 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
1763 
1764 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1765 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
1766 
1767 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) != 0)
1768 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
1769 
1770 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_SCTP) != 0)
1771 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_SCTP;
1772 
1773 	if ((rss_hf & RTE_ETH_RSS_IPV6) != 0)
1774 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
1775 
1776 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) != 0)
1777 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
1778 
1779 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) != 0)
1780 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
1781 
1782 	if ((rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_SCTP) != 0)
1783 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_SCTP;
1784 
1785 	cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
1786 
1787 	if (rte_eth_dev_is_repr(dev))
1788 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_CRC32;
1789 	else
1790 		cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
1791 
1792 	/* Configuring where to apply the RSS hash */
1793 	nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
1794 
1795 	/* Writing the key size */
1796 	nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
1797 
1798 	return 0;
1799 }
1800 
1801 int
1802 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
1803 		struct rte_eth_rss_conf *rss_conf)
1804 {
1805 	uint32_t update;
1806 	uint64_t rss_hf;
1807 	struct nfp_hw *hw;
1808 	struct nfp_net_hw *net_hw;
1809 
1810 	net_hw = nfp_net_get_hw(dev);
1811 	hw = &net_hw->super;
1812 
1813 	rss_hf = rss_conf->rss_hf;
1814 
1815 	/* Checking if RSS is enabled */
1816 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0) {
1817 		if (rss_hf != 0) {
1818 			PMD_DRV_LOG(ERR, "RSS unsupported");
1819 			return -EINVAL;
1820 		}
1821 
1822 		return 0; /* Nothing to do */
1823 	}
1824 
1825 	if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
1826 		PMD_DRV_LOG(ERR, "RSS hash key too long");
1827 		return -EINVAL;
1828 	}
1829 
1830 	nfp_net_rss_hash_write(dev, rss_conf);
1831 
1832 	update = NFP_NET_CFG_UPDATE_RSS;
1833 
1834 	if (nfp_reconfig(hw, hw->ctrl, update) != 0)
1835 		return -EIO;
1836 
1837 	return 0;
1838 }
1839 
1840 int
1841 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
1842 		struct rte_eth_rss_conf *rss_conf)
1843 {
1844 	uint8_t i;
1845 	uint8_t key;
1846 	uint64_t rss_hf;
1847 	struct nfp_hw *hw;
1848 	uint32_t cfg_rss_ctrl;
1849 	struct nfp_net_hw *net_hw;
1850 
1851 	net_hw = nfp_net_get_hw(dev);
1852 	hw = &net_hw->super;
1853 
1854 	if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
1855 		return -EINVAL;
1856 
1857 	rss_hf = rss_conf->rss_hf;
1858 	cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
1859 
1860 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
1861 		rss_hf |= RTE_ETH_RSS_IPV4;
1862 
1863 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP) != 0)
1864 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1865 
1866 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP) != 0)
1867 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1868 
1869 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP) != 0)
1870 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1871 
1872 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP) != 0)
1873 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1874 
1875 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6) != 0)
1876 		rss_hf |= RTE_ETH_RSS_IPV6;
1877 
1878 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_SCTP) != 0)
1879 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_SCTP;
1880 
1881 	if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_SCTP) != 0)
1882 		rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_SCTP;
1883 
1884 	/* Propagate current RSS hash functions to caller */
1885 	rss_conf->rss_hf = rss_hf;
1886 
1887 	/* Reading the key size */
1888 	rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
1889 
1890 	/* Reading the key byte a byte */
1891 	for (i = 0; i < rss_conf->rss_key_len; i++) {
1892 		key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
1893 		memcpy(&rss_conf->rss_key[i], &key, 1);
1894 	}
1895 
1896 	return 0;
1897 }
1898 
1899 int
1900 nfp_net_rss_config_default(struct rte_eth_dev *dev)
1901 {
1902 	int ret;
1903 	uint8_t i;
1904 	uint8_t j;
1905 	uint16_t queue = 0;
1906 	struct rte_eth_conf *dev_conf;
1907 	struct rte_eth_rss_conf rss_conf;
1908 	uint16_t rx_queues = dev->data->nb_rx_queues;
1909 	struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
1910 
1911 	nfp_reta_conf[0].mask = ~0x0;
1912 	nfp_reta_conf[1].mask = ~0x0;
1913 
1914 	for (i = 0; i < 0x40; i += 8) {
1915 		for (j = i; j < (i + 8); j++) {
1916 			nfp_reta_conf[0].reta[j] = queue;
1917 			nfp_reta_conf[1].reta[j] = queue++;
1918 			queue %= rx_queues;
1919 		}
1920 	}
1921 
1922 	ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
1923 	if (ret != 0)
1924 		return ret;
1925 
1926 	dev_conf = &dev->data->dev_conf;
1927 	if (dev_conf == NULL) {
1928 		PMD_DRV_LOG(ERR, "Wrong rss conf");
1929 		return -EINVAL;
1930 	}
1931 
1932 	rss_conf = dev_conf->rx_adv_conf.rss_conf;
1933 	ret = nfp_net_rss_hash_write(dev, &rss_conf);
1934 
1935 	return ret;
1936 }
1937 
1938 void
1939 nfp_net_stop_rx_queue(struct rte_eth_dev *dev)
1940 {
1941 	uint16_t i;
1942 	struct nfp_net_rxq *this_rx_q;
1943 
1944 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1945 		this_rx_q = dev->data->rx_queues[i];
1946 		nfp_net_reset_rx_queue(this_rx_q);
1947 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1948 	}
1949 }
1950 
1951 void
1952 nfp_net_close_rx_queue(struct rte_eth_dev *dev)
1953 {
1954 	uint16_t i;
1955 	struct nfp_net_rxq *this_rx_q;
1956 
1957 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1958 		this_rx_q = dev->data->rx_queues[i];
1959 		nfp_net_reset_rx_queue(this_rx_q);
1960 		nfp_net_rx_queue_release(dev, i);
1961 	}
1962 }
1963 
1964 void
1965 nfp_net_stop_tx_queue(struct rte_eth_dev *dev)
1966 {
1967 	uint16_t i;
1968 	struct nfp_net_txq *this_tx_q;
1969 
1970 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1971 		this_tx_q = dev->data->tx_queues[i];
1972 		nfp_net_reset_tx_queue(this_tx_q);
1973 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1974 	}
1975 }
1976 
1977 void
1978 nfp_net_close_tx_queue(struct rte_eth_dev *dev)
1979 {
1980 	uint16_t i;
1981 	struct nfp_net_txq *this_tx_q;
1982 
1983 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1984 		this_tx_q = dev->data->tx_queues[i];
1985 		nfp_net_reset_tx_queue(this_tx_q);
1986 		nfp_net_tx_queue_release(dev, i);
1987 	}
1988 }
1989 
1990 int
1991 nfp_net_set_vxlan_port(struct nfp_net_hw *net_hw,
1992 		size_t idx,
1993 		uint16_t port)
1994 {
1995 	int ret;
1996 	uint32_t i;
1997 	struct nfp_hw *hw = &net_hw->super;
1998 
1999 	if (idx >= NFP_NET_N_VXLAN_PORTS) {
2000 		PMD_DRV_LOG(ERR, "The idx value is out of range.");
2001 		return -ERANGE;
2002 	}
2003 
2004 	net_hw->vxlan_ports[idx] = port;
2005 
2006 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2007 		nn_cfg_writel(hw, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2008 				(net_hw->vxlan_ports[i + 1] << 16) | net_hw->vxlan_ports[i]);
2009 	}
2010 
2011 	rte_spinlock_lock(&hw->reconfig_lock);
2012 
2013 	nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_VXLAN);
2014 	rte_wmb();
2015 
2016 	ret = nfp_reconfig_real(hw, NFP_NET_CFG_UPDATE_VXLAN);
2017 
2018 	rte_spinlock_unlock(&hw->reconfig_lock);
2019 
2020 	return ret;
2021 }
2022 
2023 /*
2024  * The firmware with NFD3 can not handle DMA address requiring more
2025  * than 40 bits.
2026  */
2027 int
2028 nfp_net_check_dma_mask(struct nfp_net_hw *hw,
2029 		char *name)
2030 {
2031 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3 &&
2032 			rte_mem_check_dma_mask(40) != 0) {
2033 		PMD_DRV_LOG(ERR, "Device %s can't be used: restricted dma mask to 40 bits!",
2034 				name);
2035 		return -ENODEV;
2036 	}
2037 
2038 	return 0;
2039 }
2040 
2041 void
2042 nfp_net_init_metadata_format(struct nfp_net_hw *hw)
2043 {
2044 	/*
2045 	 * ABI 4.x and ctrl vNIC always use chained metadata, in other cases we allow use of
2046 	 * single metadata if only RSS(v1) is supported by hw capability, and RSS(v2)
2047 	 * also indicate that we are using chained metadata.
2048 	 */
2049 	if (hw->ver.major == 4) {
2050 		hw->meta_format = NFP_NET_METAFORMAT_CHAINED;
2051 	} else if ((hw->super.cap & NFP_NET_CFG_CTRL_CHAIN_META) != 0) {
2052 		hw->meta_format = NFP_NET_METAFORMAT_CHAINED;
2053 		/*
2054 		 * RSS is incompatible with chained metadata. hw->super.cap just represents
2055 		 * firmware's ability rather than the firmware's configuration. We decide
2056 		 * to reduce the confusion to allow us can use hw->super.cap to identify RSS later.
2057 		 */
2058 		hw->super.cap &= ~NFP_NET_CFG_CTRL_RSS;
2059 	} else {
2060 		hw->meta_format = NFP_NET_METAFORMAT_SINGLE;
2061 	}
2062 }
2063 
2064 void
2065 nfp_net_cfg_read_version(struct nfp_net_hw *hw)
2066 {
2067 	union {
2068 		uint32_t whole;
2069 		struct nfp_net_fw_ver split;
2070 	} version;
2071 
2072 	version.whole = nn_cfg_readl(&hw->super, NFP_NET_CFG_VERSION);
2073 	hw->ver = version.split;
2074 }
2075 
2076 static void
2077 nfp_net_get_nsp_info(struct nfp_net_hw *hw,
2078 		char *nsp_version)
2079 {
2080 	struct nfp_nsp *nsp;
2081 
2082 	nsp = nfp_nsp_open(hw->cpp);
2083 	if (nsp == NULL)
2084 		return;
2085 
2086 	snprintf(nsp_version, FW_VER_LEN, "%hu.%hu",
2087 			nfp_nsp_get_abi_ver_major(nsp),
2088 			nfp_nsp_get_abi_ver_minor(nsp));
2089 
2090 	nfp_nsp_close(nsp);
2091 }
2092 
2093 void
2094 nfp_net_get_fw_version(struct nfp_net_hw *hw,
2095 		uint32_t *mip_version)
2096 {
2097 	struct nfp_mip *mip;
2098 
2099 	mip = nfp_mip_open(hw->cpp);
2100 	if (mip == NULL) {
2101 		*mip_version = 0;
2102 		return;
2103 	}
2104 
2105 	*mip_version = nfp_mip_fw_version(mip);
2106 
2107 	nfp_mip_close(mip);
2108 }
2109 
2110 static void
2111 nfp_net_get_mip_name(struct nfp_net_hw *hw,
2112 		char *mip_name)
2113 {
2114 	struct nfp_mip *mip;
2115 
2116 	mip = nfp_mip_open(hw->cpp);
2117 	if (mip == NULL)
2118 		return;
2119 
2120 	snprintf(mip_name, FW_VER_LEN, "%s", nfp_mip_name(mip));
2121 
2122 	nfp_mip_close(mip);
2123 }
2124 
2125 static void
2126 nfp_net_get_app_name(struct nfp_net_hw *hw,
2127 		char *app_name)
2128 {
2129 	switch (hw->pf_dev->app_fw_id) {
2130 	case NFP_APP_FW_CORE_NIC:
2131 		snprintf(app_name, FW_VER_LEN, "%s", "nic");
2132 		break;
2133 	case NFP_APP_FW_FLOWER_NIC:
2134 		snprintf(app_name, FW_VER_LEN, "%s", "flower");
2135 		break;
2136 	default:
2137 		snprintf(app_name, FW_VER_LEN, "%s", "unknown");
2138 		break;
2139 	}
2140 }
2141 
2142 int
2143 nfp_net_firmware_version_get(struct rte_eth_dev *dev,
2144 		char *fw_version,
2145 		size_t fw_size)
2146 {
2147 	struct nfp_net_hw *hw;
2148 	char mip_name[FW_VER_LEN];
2149 	char app_name[FW_VER_LEN];
2150 	char nsp_version[FW_VER_LEN];
2151 	char vnic_version[FW_VER_LEN];
2152 
2153 	if (fw_size < FW_VER_LEN)
2154 		return FW_VER_LEN;
2155 
2156 	hw = nfp_net_get_hw(dev);
2157 
2158 	if (rte_eth_dev_is_repr(dev)) {
2159 		snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d",
2160 			hw->ver.extend, hw->ver.class,
2161 			hw->ver.major, hw->ver.minor);
2162 	} else {
2163 		snprintf(vnic_version, FW_VER_LEN, "*");
2164 	}
2165 
2166 	nfp_net_get_nsp_info(hw, nsp_version);
2167 	nfp_net_get_mip_name(hw, mip_name);
2168 	nfp_net_get_app_name(hw, app_name);
2169 
2170 	snprintf(fw_version, FW_VER_LEN, "%s %s %s %s",
2171 			vnic_version, nsp_version, mip_name, app_name);
2172 
2173 	return 0;
2174 }
2175 
2176 bool
2177 nfp_net_is_valid_nfd_version(struct nfp_net_fw_ver version)
2178 {
2179 	uint8_t nfd_version = version.extend;
2180 
2181 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFD3)
2182 		return true;
2183 
2184 	if (nfd_version == NFP_NET_CFG_VERSION_DP_NFDK) {
2185 		if (version.major < 5) {
2186 			PMD_INIT_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
2187 					version.major);
2188 			return false;
2189 		}
2190 
2191 		return true;
2192 	}
2193 
2194 	return false;
2195 }
2196 
2197 /* Disable rx and tx functions to allow for reconfiguring. */
2198 int
2199 nfp_net_stop(struct rte_eth_dev *dev)
2200 {
2201 	struct nfp_cpp *cpp;
2202 	struct nfp_net_hw *hw;
2203 
2204 	hw = nfp_net_get_hw(dev);
2205 
2206 	nfp_net_disable_queues(dev);
2207 
2208 	/* Clear queues */
2209 	nfp_net_stop_tx_queue(dev);
2210 	nfp_net_stop_rx_queue(dev);
2211 
2212 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2213 		cpp = hw->cpp;
2214 	else
2215 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
2216 
2217 	nfp_eth_set_configured(cpp, hw->nfp_idx, 0);
2218 
2219 	return 0;
2220 }
2221 
2222 static enum rte_eth_fc_mode
2223 nfp_net_get_pause_mode(struct nfp_eth_table_port *eth_port)
2224 {
2225 	enum rte_eth_fc_mode mode;
2226 
2227 	if (eth_port->rx_pause_enabled) {
2228 		if (eth_port->tx_pause_enabled)
2229 			mode = RTE_ETH_FC_FULL;
2230 		else
2231 			mode = RTE_ETH_FC_RX_PAUSE;
2232 	} else {
2233 		if (eth_port->tx_pause_enabled)
2234 			mode = RTE_ETH_FC_TX_PAUSE;
2235 		else
2236 			mode = RTE_ETH_FC_NONE;
2237 	}
2238 
2239 	return mode;
2240 }
2241 
2242 int
2243 nfp_net_flow_ctrl_get(struct rte_eth_dev *dev,
2244 		struct rte_eth_fc_conf *fc_conf)
2245 {
2246 	struct nfp_net_hw *net_hw;
2247 	struct nfp_eth_table *nfp_eth_table;
2248 	struct nfp_eth_table_port *eth_port;
2249 
2250 	net_hw = nfp_net_get_hw(dev);
2251 	if (net_hw->pf_dev == NULL)
2252 		return -EINVAL;
2253 
2254 	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
2255 	eth_port = &nfp_eth_table->ports[dev->data->port_id];
2256 
2257 	/* Currently only RX/TX switch are supported */
2258 	fc_conf->mode = nfp_net_get_pause_mode(eth_port);
2259 
2260 	return 0;
2261 }
2262 
2263 static int
2264 nfp_net_pause_frame_set(struct nfp_net_hw *net_hw,
2265 		struct nfp_eth_table_port *eth_port,
2266 		enum rte_eth_fc_mode mode)
2267 {
2268 	int err;
2269 	bool flag;
2270 	struct nfp_nsp *nsp;
2271 
2272 	nsp = nfp_eth_config_start(net_hw->cpp, eth_port->index);
2273 	if (nsp == NULL) {
2274 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
2275 		return -EIO;
2276 	}
2277 
2278 	flag = (mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2279 	err = nfp_eth_set_tx_pause(nsp, flag);
2280 	if (err != 0) {
2281 		PMD_DRV_LOG(ERR, "Failed to configure TX pause frame.");
2282 		nfp_eth_config_cleanup_end(nsp);
2283 		return err;
2284 	}
2285 
2286 	flag = (mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2287 	err = nfp_eth_set_rx_pause(nsp, flag);
2288 	if (err != 0) {
2289 		PMD_DRV_LOG(ERR, "Failed to configure RX pause frame.");
2290 		nfp_eth_config_cleanup_end(nsp);
2291 		return err;
2292 	}
2293 
2294 	err = nfp_eth_config_commit_end(nsp);
2295 	if (err != 0) {
2296 		PMD_DRV_LOG(ERR, "Failed to configure pause frame.");
2297 		return err;
2298 	}
2299 
2300 	return 0;
2301 }
2302 
2303 int
2304 nfp_net_flow_ctrl_set(struct rte_eth_dev *dev,
2305 		struct rte_eth_fc_conf *fc_conf)
2306 {
2307 	int ret;
2308 	struct nfp_net_hw *net_hw;
2309 	enum rte_eth_fc_mode set_mode;
2310 	enum rte_eth_fc_mode original_mode;
2311 	struct nfp_eth_table *nfp_eth_table;
2312 	struct nfp_eth_table_port *eth_port;
2313 
2314 	net_hw = nfp_net_get_hw(dev);
2315 	if (net_hw->pf_dev == NULL)
2316 		return -EINVAL;
2317 
2318 	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
2319 	eth_port = &nfp_eth_table->ports[net_hw->idx];
2320 
2321 	original_mode = nfp_net_get_pause_mode(eth_port);
2322 	set_mode = fc_conf->mode;
2323 
2324 	if (set_mode == original_mode)
2325 		return 0;
2326 
2327 	ret = nfp_net_pause_frame_set(net_hw, eth_port, set_mode);
2328 	if (ret != 0)
2329 		return ret;
2330 
2331 	/* Update eth_table after modifying RX/TX pause frame mode. */
2332 	eth_port->tx_pause_enabled = (set_mode & RTE_ETH_FC_TX_PAUSE) == 0 ? false : true;
2333 	eth_port->rx_pause_enabled = (set_mode & RTE_ETH_FC_RX_PAUSE) == 0 ? false : true;
2334 
2335 	return 0;
2336 }
2337 
2338 int
2339 nfp_net_fec_get_capability(struct rte_eth_dev *dev,
2340 		struct rte_eth_fec_capa *speed_fec_capa,
2341 		__rte_unused unsigned int num)
2342 {
2343 	uint16_t speed;
2344 	struct nfp_net_hw *hw;
2345 	uint32_t supported_fec;
2346 	struct nfp_eth_table *nfp_eth_table;
2347 	struct nfp_eth_table_port *eth_port;
2348 
2349 	hw = nfp_net_get_hw(dev);
2350 	if (hw->pf_dev == NULL)
2351 		return -EINVAL;
2352 
2353 	nfp_eth_table = hw->pf_dev->nfp_eth_table;
2354 	eth_port = &nfp_eth_table->ports[hw->idx];
2355 
2356 	speed = eth_port->speed;
2357 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2358 	if (speed == 0 || supported_fec == 0) {
2359 		PMD_DRV_LOG(ERR, "FEC modes supported or Speed is invalid.");
2360 		return -EINVAL;
2361 	}
2362 
2363 	if (speed_fec_capa == NULL)
2364 		return NFP_FEC_CAPA_ENTRY_NUM;
2365 
2366 	speed_fec_capa->speed = speed;
2367 
2368 	if ((supported_fec & NFP_FEC_AUTO) != 0)
2369 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2370 	if ((supported_fec & NFP_FEC_BASER) != 0)
2371 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2372 	if ((supported_fec & NFP_FEC_REED_SOLOMON) != 0)
2373 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2374 	if ((supported_fec & NFP_FEC_DISABLED) != 0)
2375 		speed_fec_capa->capa |= RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2376 
2377 	return NFP_FEC_CAPA_ENTRY_NUM;
2378 }
2379 
2380 static uint32_t
2381 nfp_net_fec_nfp_to_rte(enum nfp_eth_fec fec)
2382 {
2383 	switch (fec) {
2384 	case NFP_FEC_AUTO_BIT:
2385 		return RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
2386 	case NFP_FEC_BASER_BIT:
2387 		return RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
2388 	case NFP_FEC_REED_SOLOMON_BIT:
2389 		return RTE_ETH_FEC_MODE_CAPA_MASK(RS);
2390 	case NFP_FEC_DISABLED_BIT:
2391 		return RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
2392 	default:
2393 		PMD_DRV_LOG(ERR, "FEC mode is invalid.");
2394 		return 0;
2395 	}
2396 }
2397 
2398 int
2399 nfp_net_fec_get(struct rte_eth_dev *dev,
2400 		uint32_t *fec_capa)
2401 {
2402 	struct nfp_net_hw *hw;
2403 	struct nfp_eth_table *nfp_eth_table;
2404 	struct nfp_eth_table_port *eth_port;
2405 
2406 	hw = nfp_net_get_hw(dev);
2407 	if (hw->pf_dev == NULL)
2408 		return -EINVAL;
2409 
2410 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN) {
2411 		nfp_eth_table = nfp_eth_read_ports(hw->cpp);
2412 		hw->pf_dev->nfp_eth_table->ports[hw->idx] = nfp_eth_table->ports[hw->idx];
2413 		free(nfp_eth_table);
2414 	}
2415 
2416 	nfp_eth_table = hw->pf_dev->nfp_eth_table;
2417 	eth_port = &nfp_eth_table->ports[hw->idx];
2418 
2419 	if (!nfp_eth_can_support_fec(eth_port)) {
2420 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2421 		return -ENOTSUP;
2422 	}
2423 
2424 	/*
2425 	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
2426 	 * configured FEC mode is returned.
2427 	 * If link is up, current FEC mode is returned.
2428 	 */
2429 	if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN)
2430 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->fec);
2431 	else
2432 		*fec_capa = nfp_net_fec_nfp_to_rte(eth_port->act_fec);
2433 
2434 	if (*fec_capa == 0)
2435 		return -EINVAL;
2436 
2437 	return 0;
2438 }
2439 
2440 static enum nfp_eth_fec
2441 nfp_net_fec_rte_to_nfp(uint32_t fec)
2442 {
2443 	switch (fec) {
2444 	case RTE_BIT32(RTE_ETH_FEC_AUTO):
2445 		return NFP_FEC_AUTO_BIT;
2446 	case RTE_BIT32(RTE_ETH_FEC_NOFEC):
2447 		return NFP_FEC_DISABLED_BIT;
2448 	case RTE_BIT32(RTE_ETH_FEC_RS):
2449 		return NFP_FEC_REED_SOLOMON_BIT;
2450 	case RTE_BIT32(RTE_ETH_FEC_BASER):
2451 		return NFP_FEC_BASER_BIT;
2452 	default:
2453 		return NFP_FEC_INVALID_BIT;
2454 	}
2455 }
2456 
2457 int
2458 nfp_net_fec_set(struct rte_eth_dev *dev,
2459 		uint32_t fec_capa)
2460 {
2461 	enum nfp_eth_fec fec;
2462 	struct nfp_net_hw *hw;
2463 	uint32_t supported_fec;
2464 	struct nfp_eth_table *nfp_eth_table;
2465 	struct nfp_eth_table_port *eth_port;
2466 
2467 	hw = nfp_net_get_hw(dev);
2468 	if (hw->pf_dev == NULL)
2469 		return -EINVAL;
2470 
2471 	nfp_eth_table = hw->pf_dev->nfp_eth_table;
2472 	eth_port = &nfp_eth_table->ports[hw->idx];
2473 
2474 	supported_fec = nfp_eth_supported_fec_modes(eth_port);
2475 	if (supported_fec == 0) {
2476 		PMD_DRV_LOG(ERR, "NFP can not support FEC.");
2477 		return -ENOTSUP;
2478 	}
2479 
2480 	fec = nfp_net_fec_rte_to_nfp(fec_capa);
2481 	if (fec == NFP_FEC_INVALID_BIT) {
2482 		PMD_DRV_LOG(ERR, "FEC modes is invalid.");
2483 		return -EINVAL;
2484 	}
2485 
2486 	if ((RTE_BIT32(fec) & supported_fec) == 0) {
2487 		PMD_DRV_LOG(ERR, "Unsupported FEC mode is set.");
2488 		return -EIO;
2489 	}
2490 
2491 	return nfp_eth_set_fec(hw->cpp, eth_port->index, fec);
2492 }
2493