xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision 1ec9a3afeb51465b23a2d4734bddd0c39581406b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <unistd.h>
9 
10 #include <eal_firmware.h>
11 #include <rte_alarm.h>
12 #include <rte_kvargs.h>
13 
14 #include "flower/nfp_flower.h"
15 #include "nfd3/nfp_nfd3.h"
16 #include "nfdk/nfp_nfdk.h"
17 #include "nfpcore/nfp_cpp.h"
18 #include "nfpcore/nfp_elf.h"
19 #include "nfpcore/nfp_hwinfo.h"
20 #include "nfpcore/nfp_rtsym.h"
21 #include "nfpcore/nfp_nsp.h"
22 #include "nfpcore/nfp6000_pcie.h"
23 #include "nfpcore/nfp_resource.h"
24 #include "nfpcore/nfp_sync.h"
25 
26 #include "nfp_cpp_bridge.h"
27 #include "nfp_ipsec.h"
28 #include "nfp_logs.h"
29 #include "nfp_net_flow.h"
30 
31 /* 64-bit per app capabilities */
32 #define NFP_NET_APP_CAP_SP_INDIFF       RTE_BIT64(0) /* Indifferent to port speed */
33 
34 #define NFP_PF_DRIVER_NAME net_nfp_pf
35 #define NFP_PF_FORCE_RELOAD_FW   "force_reload_fw"
36 
37 struct nfp_net_init {
38 	/** Sequential physical port number, only valid for CoreNIC firmware */
39 	uint8_t idx;
40 
41 	/** Internal port number as seen from NFP */
42 	uint8_t nfp_idx;
43 
44 	struct nfp_net_hw_priv *hw_priv;
45 };
46 
47 static int
48 nfp_devarg_handle_int(const char *key,
49 		const char *value,
50 		void *extra_args)
51 {
52 	char *end_ptr;
53 	uint64_t *num = extra_args;
54 
55 	if (value == NULL)
56 		return -EPERM;
57 
58 	*num = strtoul(value, &end_ptr, 10);
59 	if (*num == ULONG_MAX) {
60 		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param", key, value);
61 		return -ERANGE;
62 	} else if (value == end_ptr) {
63 		return -EPERM;
64 	}
65 
66 	return 0;
67 }
68 
69 static void
70 nfp_devarg_parse_force_reload_fw(struct rte_kvargs *kvlist,
71 		bool *force_reload_fw)
72 {
73 	int ret;
74 	uint64_t value;
75 
76 
77 	if (rte_kvargs_count(kvlist, NFP_PF_FORCE_RELOAD_FW) != 1)
78 		return;
79 
80 	ret = rte_kvargs_process(kvlist, NFP_PF_FORCE_RELOAD_FW, &nfp_devarg_handle_int, &value);
81 	if (ret != 0)
82 		return;
83 
84 	if (value == 1)
85 		*force_reload_fw = true;
86 	else if (value == 0)
87 		*force_reload_fw = false;
88 	else
89 		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1",
90 				NFP_PF_FORCE_RELOAD_FW);
91 }
92 
93 static void
94 nfp_devargs_parse(struct nfp_devargs *nfp_devargs_param,
95 		const struct rte_devargs *devargs)
96 {
97 	struct rte_kvargs *kvlist;
98 
99 	if (devargs == NULL)
100 		return;
101 
102 	kvlist = rte_kvargs_parse(devargs->args, NULL);
103 	if (kvlist == NULL)
104 		return;
105 
106 	nfp_devarg_parse_force_reload_fw(kvlist, &nfp_devargs_param->force_reload_fw);
107 
108 	rte_kvargs_free(kvlist);
109 }
110 
111 static void
112 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
113 		uint16_t port,
114 		struct nfp_net_hw_priv *hw_priv)
115 {
116 	struct nfp_net_hw *hw;
117 	struct nfp_eth_table *nfp_eth_table;
118 
119 	/* Grab a pointer to the correct physical port */
120 	hw = app_fw_nic->ports[port];
121 
122 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
123 
124 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
125 }
126 
127 static uint32_t
128 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap)
129 {
130 	switch (speeds_bitmap) {
131 	case RTE_ETH_LINK_SPEED_10M_HD:
132 		return RTE_ETH_SPEED_NUM_10M;
133 	case RTE_ETH_LINK_SPEED_10M:
134 		return RTE_ETH_SPEED_NUM_10M;
135 	case RTE_ETH_LINK_SPEED_100M_HD:
136 		return RTE_ETH_SPEED_NUM_100M;
137 	case RTE_ETH_LINK_SPEED_100M:
138 		return RTE_ETH_SPEED_NUM_100M;
139 	case RTE_ETH_LINK_SPEED_1G:
140 		return RTE_ETH_SPEED_NUM_1G;
141 	case RTE_ETH_LINK_SPEED_2_5G:
142 		return RTE_ETH_SPEED_NUM_2_5G;
143 	case RTE_ETH_LINK_SPEED_5G:
144 		return RTE_ETH_SPEED_NUM_5G;
145 	case RTE_ETH_LINK_SPEED_10G:
146 		return RTE_ETH_SPEED_NUM_10G;
147 	case RTE_ETH_LINK_SPEED_20G:
148 		return RTE_ETH_SPEED_NUM_20G;
149 	case RTE_ETH_LINK_SPEED_25G:
150 		return RTE_ETH_SPEED_NUM_25G;
151 	case RTE_ETH_LINK_SPEED_40G:
152 		return RTE_ETH_SPEED_NUM_40G;
153 	case RTE_ETH_LINK_SPEED_50G:
154 		return RTE_ETH_SPEED_NUM_50G;
155 	case RTE_ETH_LINK_SPEED_56G:
156 		return RTE_ETH_SPEED_NUM_56G;
157 	case RTE_ETH_LINK_SPEED_100G:
158 		return RTE_ETH_SPEED_NUM_100G;
159 	case RTE_ETH_LINK_SPEED_200G:
160 		return RTE_ETH_SPEED_NUM_200G;
161 	case RTE_ETH_LINK_SPEED_400G:
162 		return RTE_ETH_SPEED_NUM_400G;
163 	default:
164 		return RTE_ETH_SPEED_NUM_NONE;
165 	}
166 }
167 
168 static int
169 nfp_net_nfp4000_speed_configure_check(uint16_t port_id,
170 		uint32_t configure_speed,
171 		struct nfp_eth_table *nfp_eth_table)
172 {
173 	switch (port_id) {
174 	case 0:
175 		if (configure_speed == RTE_ETH_SPEED_NUM_25G &&
176 				nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) {
177 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
178 			return -ENOTSUP;
179 		}
180 		break;
181 	case 1:
182 		if (configure_speed == RTE_ETH_SPEED_NUM_10G &&
183 				nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) {
184 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
185 			return -ENOTSUP;
186 		}
187 		break;
188 	default:
189 		PMD_DRV_LOG(ERR, "The port id is invalid.");
190 		return -EINVAL;
191 	}
192 
193 	return 0;
194 }
195 
196 static int
197 nfp_net_speed_configure(struct rte_eth_dev *dev)
198 {
199 	int ret;
200 	uint32_t speed_capa;
201 	struct nfp_nsp *nsp;
202 	uint32_t link_speeds;
203 	uint32_t configure_speed;
204 	struct nfp_eth_table_port *eth_port;
205 	struct nfp_eth_table *nfp_eth_table;
206 	struct nfp_net_hw *net_hw = dev->data->dev_private;
207 	struct nfp_net_hw_priv *hw_priv = dev->process_private;
208 
209 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
210 	eth_port = &nfp_eth_table->ports[net_hw->idx];
211 
212 	speed_capa = hw_priv->pf_dev->speed_capa;
213 	if (speed_capa == 0) {
214 		PMD_DRV_LOG(ERR, "Speed_capa is invalid.");
215 		return -EINVAL;
216 	}
217 
218 	link_speeds = dev->data->dev_conf.link_speeds;
219 	configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds);
220 	if (configure_speed == RTE_ETH_SPEED_NUM_NONE &&
221 			link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
222 		PMD_DRV_LOG(ERR, "Configured speed is invalid.");
223 		return -EINVAL;
224 	}
225 
226 	/* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */
227 	if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) {
228 		ret = nfp_net_nfp4000_speed_configure_check(net_hw->idx,
229 				configure_speed, nfp_eth_table);
230 		if (ret != 0) {
231 			PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000.");
232 			return ret;
233 		}
234 	}
235 
236 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
237 	if (nsp == NULL) {
238 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
239 		return -EIO;
240 	}
241 
242 	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
243 		if (eth_port->supp_aneg) {
244 			ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO);
245 			if (ret != 0) {
246 				PMD_DRV_LOG(ERR, "Failed to set ANEG enable.");
247 				goto config_cleanup;
248 			}
249 		}
250 	} else {
251 		ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED);
252 		if (ret != 0) {
253 			PMD_DRV_LOG(ERR, "Failed to set ANEG disable.");
254 			goto config_cleanup;
255 		}
256 
257 		ret = nfp_eth_set_speed(nsp, configure_speed);
258 		if (ret != 0) {
259 			PMD_DRV_LOG(ERR, "Failed to set speed.");
260 			goto config_cleanup;
261 		}
262 	}
263 
264 	return nfp_eth_config_commit_end(nsp);
265 
266 config_cleanup:
267 	nfp_eth_config_cleanup_end(nsp);
268 
269 	return ret;
270 }
271 
272 static int
273 nfp_net_start(struct rte_eth_dev *dev)
274 {
275 	int ret;
276 	uint16_t i;
277 	struct nfp_hw *hw;
278 	uint32_t new_ctrl;
279 	uint32_t update = 0;
280 	uint32_t cap_extend;
281 	uint32_t intr_vector;
282 	uint32_t ctrl_extend = 0;
283 	struct nfp_net_hw *net_hw;
284 	struct nfp_pf_dev *pf_dev;
285 	struct rte_eth_rxmode *rxmode;
286 	struct nfp_net_hw_priv *hw_priv;
287 	struct nfp_app_fw_nic *app_fw_nic;
288 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
289 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
290 
291 	net_hw = dev->data->dev_private;
292 	hw_priv = dev->process_private;
293 	pf_dev = hw_priv->pf_dev;
294 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
295 	hw = &net_hw->super;
296 
297 	/* Disabling queues just in case... */
298 	nfp_net_disable_queues(dev);
299 
300 	/* Enabling the required queues in the device */
301 	nfp_net_enable_queues(dev);
302 
303 	/* Configure the port speed and the auto-negotiation mode. */
304 	ret = nfp_net_speed_configure(dev);
305 	if (ret < 0) {
306 		PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode.");
307 		return ret;
308 	}
309 
310 	/* Check and configure queue intr-vector mapping */
311 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
312 		if (app_fw_nic->multiport) {
313 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
314 					"with NFP multiport PF");
315 				return -EINVAL;
316 		}
317 
318 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
319 			/*
320 			 * Better not to share LSC with RX interrupts.
321 			 * Unregistering LSC interrupt handler.
322 			 */
323 			rte_intr_callback_unregister(intr_handle,
324 					nfp_net_dev_interrupt_handler, (void *)dev);
325 
326 			if (dev->data->nb_rx_queues > 1) {
327 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
328 						"supports 1 queue with UIO");
329 				return -EIO;
330 			}
331 		}
332 
333 		intr_vector = dev->data->nb_rx_queues;
334 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
335 			return -1;
336 
337 		nfp_configure_rx_interrupt(dev, intr_handle);
338 		update = NFP_NET_CFG_UPDATE_MSIX;
339 	}
340 
341 	/* Checking MTU set */
342 	if (dev->data->mtu > net_hw->flbufsz) {
343 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
344 				dev->data->mtu, net_hw->flbufsz);
345 		return -ERANGE;
346 	}
347 
348 	rte_intr_enable(intr_handle);
349 
350 	new_ctrl = nfp_check_offloads(dev);
351 
352 	/* Writing configuration parameters in the device */
353 	nfp_net_params_setup(net_hw);
354 
355 	rxmode = &dev->data->dev_conf.rxmode;
356 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
357 		nfp_net_rss_config_default(dev);
358 		update |= NFP_NET_CFG_UPDATE_RSS;
359 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
360 	}
361 
362 	/* Enable device */
363 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
364 
365 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
366 
367 	/* Enable vxlan */
368 	if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) {
369 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
370 		update |= NFP_NET_CFG_UPDATE_VXLAN;
371 	}
372 
373 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
374 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
375 
376 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
377 		new_ctrl |= NFP_NET_CFG_CTRL_TXRWB;
378 
379 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
380 		return -EIO;
381 
382 	hw->ctrl = new_ctrl;
383 
384 	/* Enable packet type offload by extend ctrl word1. */
385 	cap_extend = hw->cap_ext;
386 	if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
387 		ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE;
388 
389 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0)
390 		ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC |
391 				NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP |
392 				NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
393 
394 	/* Enable flow steer by extend ctrl word1. */
395 	if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
396 		ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER;
397 
398 	update = NFP_NET_CFG_UPDATE_GEN;
399 	if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0)
400 		return -EIO;
401 
402 	hw->ctrl_ext = ctrl_extend;
403 
404 	/*
405 	 * Allocating rte mbufs for configured rx queues.
406 	 * This requires queues being enabled before.
407 	 */
408 	if (nfp_net_rx_freelist_setup(dev) != 0) {
409 		ret = -ENOMEM;
410 		goto error;
411 	}
412 
413 	/* Configure the physical port up */
414 	nfp_eth_set_configured(pf_dev->cpp, net_hw->nfp_idx, 1);
415 
416 	for (i = 0; i < dev->data->nb_rx_queues; i++)
417 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
418 	for (i = 0; i < dev->data->nb_tx_queues; i++)
419 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
420 
421 	return 0;
422 
423 error:
424 	/*
425 	 * An error returned by this function should mean the app
426 	 * exiting and then the system releasing all the memory
427 	 * allocated even memory coming from hugepages.
428 	 *
429 	 * The device could be enabled at this point with some queues
430 	 * ready for getting packets. This is true if the call to
431 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
432 	 * fails for subsequent queues.
433 	 *
434 	 * This should make the app exiting but better if we tell the
435 	 * device first.
436 	 */
437 	nfp_net_disable_queues(dev);
438 
439 	return ret;
440 }
441 
442 /* Set the link up. */
443 static int
444 nfp_net_set_link_up(struct rte_eth_dev *dev)
445 {
446 	struct nfp_net_hw *hw;
447 	struct nfp_net_hw_priv *hw_priv;
448 
449 	hw = dev->data->dev_private;
450 	hw_priv = dev->process_private;
451 
452 	return nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 1);
453 }
454 
455 /* Set the link down. */
456 static int
457 nfp_net_set_link_down(struct rte_eth_dev *dev)
458 {
459 	struct nfp_net_hw *hw;
460 	struct nfp_net_hw_priv *hw_priv;
461 
462 	hw = dev->data->dev_private;
463 	hw_priv = dev->process_private;
464 
465 	return nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0);
466 }
467 
468 static uint8_t
469 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
470 		uint8_t phy_port)
471 {
472 	if (pf_dev->multi_pf.enabled)
473 		return pf_dev->multi_pf.function_id;
474 
475 	return phy_port;
476 }
477 
478 static void
479 nfp_net_beat_timer(void *arg)
480 {
481 	uint64_t cur_sec;
482 	struct nfp_multi_pf *multi_pf = arg;
483 
484 	cur_sec = rte_rdtsc();
485 	nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id));
486 
487 	/* Beat once per second. */
488 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
489 			(void *)multi_pf) < 0) {
490 		PMD_DRV_LOG(ERR, "Error setting alarm");
491 	}
492 }
493 
494 static int
495 nfp_net_keepalive_init(struct nfp_cpp *cpp,
496 		struct nfp_multi_pf *multi_pf)
497 {
498 	uint8_t *base;
499 	uint64_t addr;
500 	uint32_t size;
501 	uint32_t cpp_id;
502 	struct nfp_resource *res;
503 
504 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE);
505 	if (res == NULL)
506 		return -EIO;
507 
508 	cpp_id = nfp_resource_cpp_id(res);
509 	addr = nfp_resource_address(res);
510 	size = nfp_resource_size(res);
511 
512 	nfp_resource_release(res);
513 
514 	/* Allocate a fixed area for keepalive. */
515 	base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area);
516 	if (base == NULL) {
517 		PMD_DRV_LOG(ERR, "Failed to map area for keepalive.");
518 		return -EIO;
519 	}
520 
521 	multi_pf->beat_addr = base;
522 
523 	return 0;
524 }
525 
526 static void
527 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf)
528 {
529 	nfp_cpp_area_release_free(multi_pf->beat_area);
530 }
531 
532 static int
533 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf)
534 {
535 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
536 			(void *)multi_pf) < 0) {
537 		PMD_DRV_LOG(ERR, "Error setting alarm");
538 		return -EIO;
539 	}
540 
541 	return 0;
542 }
543 
544 static void
545 nfp_net_keepalive_clear(uint8_t *beat_addr,
546 		uint8_t function_id)
547 {
548 	nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id));
549 }
550 
551 static void
552 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info,
553 		struct nfp_multi_pf *multi_pf)
554 {
555 	uint8_t port_num;
556 
557 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
558 		if (port_num == multi_pf->function_id)
559 			continue;
560 
561 		nfp_net_keepalive_clear(multi_pf->beat_addr, port_num);
562 	}
563 }
564 
565 static void
566 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf)
567 {
568 	/* Cancel keepalive for multiple PF setup */
569 	rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf);
570 }
571 
572 static int
573 nfp_net_uninit(struct rte_eth_dev *eth_dev)
574 {
575 	struct nfp_net_hw *net_hw;
576 	struct nfp_net_hw_priv *hw_priv;
577 
578 	net_hw = eth_dev->data->dev_private;
579 	hw_priv = eth_dev->process_private;
580 
581 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
582 		nfp_net_flow_priv_uninit(hw_priv->pf_dev, net_hw->idx);
583 
584 	rte_free(net_hw->eth_xstats_base);
585 	if ((net_hw->super.cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
586 		nfp_net_txrwb_free(eth_dev);
587 	nfp_ipsec_uninit(eth_dev);
588 
589 	return 0;
590 }
591 
592 static void
593 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev,
594 		uint8_t id,
595 		struct rte_eth_dev *eth_dev)
596 {
597 	struct nfp_app_fw_nic *app_fw_nic;
598 
599 	app_fw_nic = pf_dev->app_fw_priv;
600 	if (app_fw_nic->ports[id] != NULL) {
601 		nfp_net_uninit(eth_dev);
602 		app_fw_nic->ports[id] = NULL;
603 	}
604 }
605 
606 static void
607 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
608 {
609 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
610 	rte_free(pf_dev->app_fw_priv);
611 }
612 
613 void
614 nfp_pf_uninit(struct nfp_net_hw_priv *hw_priv)
615 {
616 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
617 
618 	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
619 	nfp_cpp_area_release_free(pf_dev->qc_area);
620 	free(pf_dev->sym_tbl);
621 	if (pf_dev->multi_pf.enabled) {
622 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
623 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
624 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
625 	}
626 	free(pf_dev->nfp_eth_table);
627 	free(pf_dev->hwinfo);
628 	nfp_cpp_free(pf_dev->cpp);
629 	nfp_sync_free(pf_dev->sync);
630 	rte_free(pf_dev);
631 	rte_free(hw_priv);
632 }
633 
634 static int
635 nfp_pf_secondary_uninit(struct nfp_net_hw_priv *hw_priv)
636 {
637 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
638 
639 	free(pf_dev->sym_tbl);
640 	nfp_cpp_free(pf_dev->cpp);
641 	nfp_sync_free(pf_dev->sync);
642 	rte_free(pf_dev);
643 	rte_free(hw_priv);
644 
645 	return 0;
646 }
647 
648 /* Reset and stop device. The device can not be restarted. */
649 static int
650 nfp_net_close(struct rte_eth_dev *dev)
651 {
652 	uint8_t i;
653 	uint8_t id;
654 	struct nfp_net_hw *hw;
655 	struct nfp_pf_dev *pf_dev;
656 	struct rte_pci_device *pci_dev;
657 	struct nfp_net_hw_priv *hw_priv;
658 	struct nfp_app_fw_nic *app_fw_nic;
659 
660 	hw_priv = dev->process_private;
661 
662 	/*
663 	 * In secondary process, a released eth device can be found by its name
664 	 * in shared memory.
665 	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
666 	 * eth device has been released.
667 	 */
668 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
669 		if (dev->state == RTE_ETH_DEV_UNUSED)
670 			return 0;
671 
672 		nfp_pf_secondary_uninit(hw_priv);
673 		return 0;
674 	}
675 
676 	hw = dev->data->dev_private;
677 	pf_dev = hw_priv->pf_dev;
678 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
679 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
680 
681 	/*
682 	 * We assume that the DPDK application is stopping all the
683 	 * threads/queues before calling the device close function.
684 	 */
685 	nfp_net_disable_queues(dev);
686 
687 	/* Clear queues */
688 	nfp_net_close_tx_queue(dev);
689 	nfp_net_close_rx_queue(dev);
690 
691 	/* Cancel possible impending LSC work here before releasing the port */
692 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
693 
694 	/* Only free PF resources after all physical ports have been closed */
695 	/* Mark this port as unused and free device priv resources */
696 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
697 
698 	if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC)
699 		return -EINVAL;
700 
701 	nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx, dev);
702 
703 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
704 		id = nfp_function_id_get(pf_dev, i);
705 
706 		/* Check to see if ports are still in use */
707 		if (app_fw_nic->ports[id] != NULL)
708 			return 0;
709 	}
710 
711 	/* Enable in nfp_net_start() */
712 	rte_intr_disable(pci_dev->intr_handle);
713 
714 	/* Register in nfp_net_init() */
715 	rte_intr_callback_unregister(pci_dev->intr_handle,
716 			nfp_net_dev_interrupt_handler, (void *)dev);
717 
718 	nfp_uninit_app_fw_nic(pf_dev);
719 	nfp_pf_uninit(hw_priv);
720 
721 	return 0;
722 }
723 
724 static int
725 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
726 		uint16_t port,
727 		uint32_t *idx)
728 {
729 	uint32_t i;
730 	int free_idx = -1;
731 
732 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
733 		if (hw->vxlan_ports[i] == port) {
734 			free_idx = i;
735 			break;
736 		}
737 
738 		if (hw->vxlan_usecnt[i] == 0) {
739 			free_idx = i;
740 			break;
741 		}
742 	}
743 
744 	if (free_idx == -1)
745 		return -EINVAL;
746 
747 	*idx = free_idx;
748 
749 	return 0;
750 }
751 
752 static int
753 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
754 		struct rte_eth_udp_tunnel *tunnel_udp)
755 {
756 	int ret;
757 	uint32_t idx;
758 	uint16_t vxlan_port;
759 	struct nfp_net_hw *hw;
760 	enum rte_eth_tunnel_type tnl_type;
761 
762 	hw = dev->data->dev_private;
763 	vxlan_port = tunnel_udp->udp_port;
764 	tnl_type   = tunnel_udp->prot_type;
765 
766 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
767 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
768 		return -ENOTSUP;
769 	}
770 
771 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
772 	if (ret != 0) {
773 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
774 		return -EINVAL;
775 	}
776 
777 	if (hw->vxlan_usecnt[idx] == 0) {
778 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
779 		if (ret != 0) {
780 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
781 			return -EINVAL;
782 		}
783 	}
784 
785 	hw->vxlan_usecnt[idx]++;
786 
787 	return 0;
788 }
789 
790 static int
791 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
792 		struct rte_eth_udp_tunnel *tunnel_udp)
793 {
794 	int ret;
795 	uint32_t idx;
796 	uint16_t vxlan_port;
797 	struct nfp_net_hw *hw;
798 	enum rte_eth_tunnel_type tnl_type;
799 
800 	hw = dev->data->dev_private;
801 	vxlan_port = tunnel_udp->udp_port;
802 	tnl_type   = tunnel_udp->prot_type;
803 
804 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
805 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
806 		return -ENOTSUP;
807 	}
808 
809 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
810 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
811 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
812 		return -EINVAL;
813 	}
814 
815 	hw->vxlan_usecnt[idx]--;
816 
817 	if (hw->vxlan_usecnt[idx] == 0) {
818 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
819 		if (ret != 0) {
820 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
821 			return -EINVAL;
822 		}
823 	}
824 
825 	return 0;
826 }
827 
828 /* Initialise and register driver with DPDK Application */
829 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
830 	.dev_configure          = nfp_net_configure,
831 	.dev_start              = nfp_net_start,
832 	.dev_stop               = nfp_net_stop,
833 	.dev_set_link_up        = nfp_net_set_link_up,
834 	.dev_set_link_down      = nfp_net_set_link_down,
835 	.dev_close              = nfp_net_close,
836 	.promiscuous_enable     = nfp_net_promisc_enable,
837 	.promiscuous_disable    = nfp_net_promisc_disable,
838 	.allmulticast_enable    = nfp_net_allmulticast_enable,
839 	.allmulticast_disable   = nfp_net_allmulticast_disable,
840 	.link_update            = nfp_net_link_update,
841 	.stats_get              = nfp_net_stats_get,
842 	.stats_reset            = nfp_net_stats_reset,
843 	.xstats_get             = nfp_net_xstats_get,
844 	.xstats_reset           = nfp_net_xstats_reset,
845 	.xstats_get_names       = nfp_net_xstats_get_names,
846 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
847 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
848 	.dev_infos_get          = nfp_net_infos_get,
849 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
850 	.mtu_set                = nfp_net_dev_mtu_set,
851 	.mac_addr_set           = nfp_net_set_mac_addr,
852 	.vlan_offload_set       = nfp_net_vlan_offload_set,
853 	.reta_update            = nfp_net_reta_update,
854 	.reta_query             = nfp_net_reta_query,
855 	.rss_hash_update        = nfp_net_rss_hash_update,
856 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
857 	.rx_queue_setup         = nfp_net_rx_queue_setup,
858 	.rx_queue_release       = nfp_net_rx_queue_release,
859 	.tx_queue_setup         = nfp_net_tx_queue_setup,
860 	.tx_queue_release       = nfp_net_tx_queue_release,
861 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
862 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
863 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
864 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
865 	.fw_version_get         = nfp_net_firmware_version_get,
866 	.flow_ctrl_get          = nfp_net_flow_ctrl_get,
867 	.flow_ctrl_set          = nfp_net_flow_ctrl_set,
868 	.flow_ops_get           = nfp_net_flow_ops_get,
869 	.fec_get_capability     = nfp_net_fec_get_capability,
870 	.fec_get                = nfp_net_fec_get,
871 	.fec_set                = nfp_net_fec_set,
872 };
873 
874 static inline void
875 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw,
876 		struct rte_eth_dev *eth_dev)
877 {
878 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
879 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
880 	else
881 		eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts;
882 
883 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
884 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
885 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
886 }
887 
888 static int
889 nfp_net_init(struct rte_eth_dev *eth_dev,
890 		void *para)
891 {
892 	int err;
893 	uint16_t port;
894 	uint64_t rx_base;
895 	uint64_t tx_base;
896 	struct nfp_hw *hw;
897 	struct nfp_net_hw *net_hw;
898 	struct nfp_pf_dev *pf_dev;
899 	struct nfp_net_init *hw_init;
900 	struct rte_pci_device *pci_dev;
901 	struct nfp_net_hw_priv *hw_priv;
902 	struct nfp_app_fw_nic *app_fw_nic;
903 
904 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
905 	net_hw = eth_dev->data->dev_private;
906 
907 	hw_init = para;
908 	net_hw->idx      = hw_init->idx;
909 	net_hw->nfp_idx  = hw_init->nfp_idx;
910 	eth_dev->process_private = hw_init->hw_priv;
911 
912 	/* Use backpointer here to the PF of this eth_dev */
913 	hw_priv = eth_dev->process_private;
914 	pf_dev = hw_priv->pf_dev;
915 
916 	/* Use backpointer to the CoreNIC app struct */
917 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
918 
919 	/* Add this device to the PF's array of physical ports */
920 	app_fw_nic->ports[net_hw->idx] = net_hw;
921 
922 	port = net_hw->idx;
923 	if (port > 7) {
924 		PMD_DRV_LOG(ERR, "Port value is wrong");
925 		return -ENODEV;
926 	}
927 
928 	hw = &net_hw->super;
929 
930 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
931 			"NFP internal port number: %d", port, net_hw->nfp_idx);
932 
933 	rte_eth_copy_pci_info(eth_dev, pci_dev);
934 
935 	if (pf_dev->multi_pf.enabled)
936 		hw->ctrl_bar = pf_dev->ctrl_bar;
937 	else
938 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
939 
940 	net_hw->mac_stats = pf_dev->mac_stats_bar +
941 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
942 
943 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
944 	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
945 
946 	err = nfp_net_common_init(pci_dev, net_hw);
947 	if (err != 0)
948 		return err;
949 
950 	err = nfp_net_tlv_caps_parse(eth_dev);
951 	if (err != 0) {
952 		PMD_INIT_LOG(ERR, "Failed to parser TLV caps");
953 		return err;
954 	}
955 
956 	err = nfp_ipsec_init(eth_dev);
957 	if (err != 0) {
958 		PMD_INIT_LOG(ERR, "Failed to init IPsec module");
959 		return err;
960 	}
961 
962 	nfp_net_ethdev_ops_mount(net_hw, eth_dev);
963 
964 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
965 			nfp_net_xstats_size(eth_dev), 0);
966 	if (net_hw->eth_xstats_base == NULL) {
967 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
968 				pci_dev->device.name);
969 		err = -ENOMEM;
970 		goto ipsec_exit;
971 	}
972 
973 	/* Work out where in the BAR the queues start. */
974 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
975 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
976 
977 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
978 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
979 
980 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
981 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
982 
983 	nfp_net_cfg_queue_setup(net_hw);
984 	net_hw->mtu = RTE_ETHER_MTU;
985 
986 	/* VLAN insertion is incompatible with LSOv2 */
987 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
988 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
989 
990 	nfp_net_log_device_information(net_hw);
991 
992 	/* Initializing spinlock for reconfigs */
993 	rte_spinlock_init(&hw->reconfig_lock);
994 
995 	/* Allocating memory for mac addr */
996 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
997 	if (eth_dev->data->mac_addrs == NULL) {
998 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
999 		err = -ENOMEM;
1000 		goto xstats_free;
1001 	}
1002 
1003 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0) {
1004 		err = nfp_net_txrwb_alloc(eth_dev);
1005 		if (err != 0)
1006 			goto xstats_free;
1007 	}
1008 
1009 	nfp_net_pf_read_mac(app_fw_nic, port, hw_priv);
1010 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1011 
1012 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
1013 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
1014 		/* Using random mac addresses for VFs */
1015 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
1016 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1017 	}
1018 
1019 	/* Copying mac address to DPDK eth_dev struct */
1020 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
1021 
1022 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
1023 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
1024 
1025 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1026 
1027 	PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x "
1028 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
1029 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1030 			pci_dev->id.device_id,
1031 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
1032 
1033 	/* Registering LSC interrupt handler */
1034 	rte_intr_callback_register(pci_dev->intr_handle,
1035 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
1036 	/* Telling the firmware about the LSC interrupt entry */
1037 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1038 	/* Unmask the LSC interrupt */
1039 	nfp_net_irq_unmask(eth_dev);
1040 	/* Recording current stats counters values */
1041 	nfp_net_stats_reset(eth_dev);
1042 
1043 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) {
1044 		err = nfp_net_flow_priv_init(pf_dev, port);
1045 		if (err != 0) {
1046 			PMD_INIT_LOG(ERR, "Init net flow priv failed");
1047 			goto txrwb_free;
1048 		}
1049 	}
1050 
1051 	return 0;
1052 
1053 txrwb_free:
1054 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
1055 		nfp_net_txrwb_free(eth_dev);
1056 xstats_free:
1057 	rte_free(net_hw->eth_xstats_base);
1058 ipsec_exit:
1059 	nfp_ipsec_uninit(eth_dev);
1060 
1061 	return err;
1062 }
1063 
1064 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
1065 
1066 static int
1067 nfp_fw_get_name(struct rte_pci_device *dev,
1068 		struct nfp_nsp *nsp,
1069 		char *card,
1070 		char *fw_name,
1071 		size_t fw_size)
1072 {
1073 	char serial[40];
1074 	uint16_t interface;
1075 	uint32_t cpp_serial_len;
1076 	const uint8_t *cpp_serial;
1077 	struct nfp_cpp *cpp = nfp_nsp_cpp(nsp);
1078 
1079 	cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial);
1080 	if (cpp_serial_len != NFP_SERIAL_LEN)
1081 		return -ERANGE;
1082 
1083 	interface = nfp_cpp_interface(cpp);
1084 
1085 	/* Looking for firmware file in order of priority */
1086 
1087 	/* First try to find a firmware image specific for this device */
1088 	snprintf(serial, sizeof(serial),
1089 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
1090 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
1091 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
1092 	snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
1093 
1094 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1095 	if (access(fw_name, F_OK) == 0)
1096 		return 0;
1097 
1098 	/* Then try the PCI name */
1099 	snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH,
1100 			dev->name);
1101 
1102 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1103 	if (access(fw_name, F_OK) == 0)
1104 		return 0;
1105 
1106 	/* Finally try the card type and media */
1107 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card);
1108 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1109 	if (access(fw_name, F_OK) == 0)
1110 		return 0;
1111 
1112 	return -ENOENT;
1113 }
1114 
1115 static int
1116 nfp_fw_upload(struct nfp_nsp *nsp,
1117 		char *fw_name)
1118 {
1119 	int err;
1120 	void *fw_buf;
1121 	size_t fsize;
1122 
1123 	err = rte_firmware_read(fw_name, &fw_buf, &fsize);
1124 	if (err != 0) {
1125 		PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name);
1126 		return -ENOENT;
1127 	}
1128 
1129 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
1130 			fw_name, fsize);
1131 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
1132 	if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) {
1133 		free(fw_buf);
1134 		PMD_DRV_LOG(ERR, "Firmware load failed.");
1135 		return -EIO;
1136 	}
1137 
1138 	PMD_DRV_LOG(INFO, "Done");
1139 
1140 	free(fw_buf);
1141 
1142 	return 0;
1143 }
1144 
1145 static void
1146 nfp_fw_unload(struct nfp_cpp *cpp)
1147 {
1148 	struct nfp_nsp *nsp;
1149 
1150 	nsp = nfp_nsp_open(cpp);
1151 	if (nsp == NULL)
1152 		return;
1153 
1154 	nfp_nsp_device_soft_reset(nsp);
1155 	nfp_nsp_close(nsp);
1156 }
1157 
1158 static int
1159 nfp_fw_check_change(struct nfp_cpp *cpp,
1160 		char *fw_name,
1161 		bool *fw_changed)
1162 {
1163 	int ret;
1164 	uint32_t new_version = 0;
1165 	uint32_t old_version = 0;
1166 
1167 	ret = nfp_elf_get_fw_version(&new_version, fw_name);
1168 	if (ret != 0)
1169 		return ret;
1170 
1171 	nfp_net_get_fw_version(cpp, &old_version);
1172 
1173 	if (new_version != old_version) {
1174 		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u",
1175 				new_version, old_version);
1176 		*fw_changed = true;
1177 	} else {
1178 		PMD_DRV_LOG(INFO, "FW version is not changed and is %u", new_version);
1179 		*fw_changed = false;
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 static int
1186 nfp_fw_reload(struct nfp_nsp *nsp,
1187 		char *fw_name)
1188 {
1189 	int err;
1190 
1191 	nfp_nsp_device_soft_reset(nsp);
1192 	err = nfp_fw_upload(nsp, fw_name);
1193 	if (err != 0)
1194 		PMD_DRV_LOG(ERR, "NFP firmware load failed");
1195 
1196 	return err;
1197 }
1198 
1199 static bool
1200 nfp_fw_skip_load(const struct nfp_dev_info *dev_info,
1201 		struct nfp_multi_pf *multi_pf,
1202 		bool *reload_fw)
1203 {
1204 	uint8_t i;
1205 	uint64_t tmp_beat;
1206 	uint32_t port_num;
1207 	uint8_t in_use = 0;
1208 	uint64_t beat[dev_info->pf_num_per_unit];
1209 	uint32_t offset[dev_info->pf_num_per_unit];
1210 	uint8_t abnormal = dev_info->pf_num_per_unit;
1211 
1212 	sleep(1);
1213 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1214 		if (port_num == multi_pf->function_id) {
1215 			abnormal--;
1216 			continue;
1217 		}
1218 
1219 		offset[port_num] = NFP_BEAT_OFFSET(port_num);
1220 		beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]);
1221 		if (beat[port_num] == 0)
1222 			abnormal--;
1223 	}
1224 
1225 	if (abnormal == 0)
1226 		return true;
1227 
1228 	for (i = 0; i < 3; i++) {
1229 		sleep(1);
1230 		for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1231 			if (port_num == multi_pf->function_id)
1232 				continue;
1233 
1234 			if (beat[port_num] == 0)
1235 				continue;
1236 
1237 			tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]);
1238 			if (tmp_beat != beat[port_num]) {
1239 				in_use++;
1240 				abnormal--;
1241 				beat[port_num] = 0;
1242 				if (*reload_fw) {
1243 					*reload_fw = false;
1244 					PMD_DRV_LOG(ERR, "The param %s does not work",
1245 							NFP_PF_FORCE_RELOAD_FW);
1246 				}
1247 			}
1248 		}
1249 
1250 		if (abnormal == 0)
1251 			return true;
1252 	}
1253 
1254 	if (in_use != 0) {
1255 		PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.",
1256 				abnormal);
1257 		return true;
1258 	}
1259 
1260 	return false;
1261 }
1262 static int
1263 nfp_fw_reload_for_single_pf(struct nfp_nsp *nsp,
1264 		char *fw_name,
1265 		struct nfp_cpp *cpp,
1266 		bool force_reload_fw)
1267 {
1268 	int ret;
1269 	bool fw_changed = true;
1270 
1271 	if (nfp_nsp_fw_loaded(nsp) && !force_reload_fw) {
1272 		ret = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1273 		if (ret != 0)
1274 			return ret;
1275 	}
1276 
1277 	if (!fw_changed)
1278 		return 0;
1279 
1280 	ret = nfp_fw_reload(nsp, fw_name);
1281 	if (ret != 0)
1282 		return ret;
1283 
1284 	return 0;
1285 }
1286 
1287 static int
1288 nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp,
1289 		char *fw_name,
1290 		struct nfp_cpp *cpp,
1291 		const struct nfp_dev_info *dev_info,
1292 		struct nfp_multi_pf *multi_pf,
1293 		bool force_reload_fw)
1294 {
1295 	int err;
1296 	bool fw_changed = true;
1297 	bool skip_load_fw = false;
1298 	bool reload_fw = force_reload_fw;
1299 
1300 	err = nfp_net_keepalive_init(cpp, multi_pf);
1301 	if (err != 0) {
1302 		PMD_DRV_LOG(ERR, "NFP init beat failed");
1303 		return err;
1304 	}
1305 
1306 	err = nfp_net_keepalive_start(multi_pf);
1307 	if (err != 0) {
1308 		PMD_DRV_LOG(ERR, "NFP write beat failed");
1309 		goto keepalive_uninit;
1310 	}
1311 
1312 	if (nfp_nsp_fw_loaded(nsp) && !reload_fw) {
1313 		err = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1314 		if (err != 0)
1315 			goto keepalive_stop;
1316 	}
1317 
1318 	if (!fw_changed || reload_fw)
1319 		skip_load_fw = nfp_fw_skip_load(dev_info, multi_pf, &reload_fw);
1320 
1321 	if (skip_load_fw && !reload_fw)
1322 		return 0;
1323 
1324 	err = nfp_fw_reload(nsp, fw_name);
1325 	if (err != 0)
1326 		goto keepalive_stop;
1327 
1328 	nfp_net_keepalive_clear_others(dev_info, multi_pf);
1329 
1330 	return 0;
1331 
1332 keepalive_stop:
1333 	nfp_net_keepalive_stop(multi_pf);
1334 keepalive_uninit:
1335 	nfp_net_keepalive_uninit(multi_pf);
1336 
1337 	return err;
1338 }
1339 
1340 static int
1341 nfp_fw_setup(struct rte_pci_device *dev,
1342 		struct nfp_cpp *cpp,
1343 		struct nfp_eth_table *nfp_eth_table,
1344 		struct nfp_hwinfo *hwinfo,
1345 		const struct nfp_dev_info *dev_info,
1346 		struct nfp_multi_pf *multi_pf,
1347 		bool force_reload_fw)
1348 {
1349 	int err;
1350 	char fw_name[125];
1351 	char card_desc[100];
1352 	struct nfp_nsp *nsp;
1353 	const char *nfp_fw_model;
1354 
1355 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
1356 	if (nfp_fw_model == NULL)
1357 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
1358 
1359 	if (nfp_fw_model != NULL) {
1360 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
1361 	} else {
1362 		PMD_DRV_LOG(ERR, "firmware model NOT found");
1363 		return -EIO;
1364 	}
1365 
1366 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
1367 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
1368 				nfp_eth_table->count);
1369 		return -EIO;
1370 	}
1371 
1372 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
1373 			nfp_eth_table->count);
1374 
1375 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
1376 
1377 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
1378 			nfp_fw_model, nfp_eth_table->count,
1379 			nfp_eth_table->ports[0].speed / 1000);
1380 
1381 	nsp = nfp_nsp_open(cpp);
1382 	if (nsp == NULL) {
1383 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1384 		return -EIO;
1385 	}
1386 
1387 	err = nfp_fw_get_name(dev, nsp, card_desc, fw_name, sizeof(fw_name));
1388 	if (err != 0) {
1389 		PMD_DRV_LOG(ERR, "Can't find suitable firmware.");
1390 		nfp_nsp_close(nsp);
1391 		return err;
1392 	}
1393 
1394 	if (multi_pf->enabled)
1395 		err = nfp_fw_reload_for_multi_pf(nsp, fw_name, cpp, dev_info, multi_pf,
1396 				force_reload_fw);
1397 	else
1398 		err = nfp_fw_reload_for_single_pf(nsp, fw_name, cpp, force_reload_fw);
1399 
1400 	nfp_nsp_close(nsp);
1401 	return err;
1402 }
1403 
1404 static inline bool
1405 nfp_check_multi_pf_from_fw(uint32_t total_vnics)
1406 {
1407 	if (total_vnics == 1)
1408 		return true;
1409 
1410 	return false;
1411 }
1412 
1413 static inline bool
1414 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev,
1415 		struct nfp_cpp *cpp)
1416 {
1417 	bool flag;
1418 	struct nfp_nsp *nsp;
1419 
1420 	nsp = nfp_nsp_open(cpp);
1421 	if (nsp == NULL) {
1422 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1423 		return false;
1424 	}
1425 
1426 	flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) &&
1427 			(pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC);
1428 
1429 	nfp_nsp_close(nsp);
1430 	return flag;
1431 }
1432 
1433 static int
1434 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
1435 {
1436 	int err = 0;
1437 	uint64_t tx_base;
1438 	uint8_t *ctrl_bar;
1439 	struct nfp_hw *hw;
1440 	uint32_t cap_extend;
1441 	struct nfp_net_hw net_hw;
1442 	struct nfp_cpp_area *area;
1443 	char name[RTE_ETH_NAME_MAX_LEN];
1444 
1445 	memset(&net_hw, 0, sizeof(struct nfp_net_hw));
1446 
1447 	/* Map the symbol table */
1448 	snprintf(name, sizeof(name), "_pf%u_net_bar0",
1449 			pf_dev->multi_pf.function_id);
1450 	ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, NFP_NET_CFG_BAR_SZ,
1451 			&area);
1452 	if (ctrl_bar == NULL) {
1453 		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol");
1454 		return -ENODEV;
1455 	}
1456 
1457 	hw = &net_hw.super;
1458 	hw->ctrl_bar = ctrl_bar;
1459 
1460 	cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
1461 	if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
1462 		PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF");
1463 		err = -EINVAL;
1464 		goto end;
1465 	}
1466 
1467 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1468 	net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1469 	nfp_net_cfg_queue_setup(&net_hw);
1470 	rte_spinlock_init(&hw->reconfig_lock);
1471 	nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN);
1472 end:
1473 	nfp_cpp_area_release_free(area);
1474 	return err;
1475 }
1476 
1477 static int
1478 nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
1479 {
1480 	uint8_t i;
1481 	uint8_t id;
1482 	int ret = 0;
1483 	uint32_t total_vnics;
1484 	struct nfp_app_fw_nic *app_fw_nic;
1485 	struct nfp_eth_table *nfp_eth_table;
1486 	char bar_name[RTE_ETH_NAME_MAX_LEN];
1487 	char port_name[RTE_ETH_NAME_MAX_LEN];
1488 	char vnic_name[RTE_ETH_NAME_MAX_LEN];
1489 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
1490 	struct nfp_net_init hw_init = {
1491 		.hw_priv = hw_priv,
1492 	};
1493 
1494 	nfp_eth_table = pf_dev->nfp_eth_table;
1495 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
1496 	id = nfp_function_id_get(pf_dev, 0);
1497 
1498 	/* Allocate memory for the CoreNIC app */
1499 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
1500 	if (app_fw_nic == NULL)
1501 		return -ENOMEM;
1502 
1503 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
1504 	pf_dev->app_fw_priv = app_fw_nic;
1505 
1506 	/* Read the number of vNIC's created for the PF */
1507 	snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id);
1508 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret);
1509 	if (ret != 0 || total_vnics == 0 || total_vnics > 8) {
1510 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name);
1511 		ret = -ENODEV;
1512 		goto app_cleanup;
1513 	}
1514 
1515 	if (pf_dev->multi_pf.enabled) {
1516 		if (!nfp_check_multi_pf_from_fw(total_vnics)) {
1517 			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf");
1518 			ret = -ENODEV;
1519 			goto app_cleanup;
1520 		}
1521 	} else {
1522 		/*
1523 		 * For coreNIC the number of vNICs exposed should be the same as the
1524 		 * number of physical ports.
1525 		 */
1526 		if (total_vnics != nfp_eth_table->count) {
1527 			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
1528 			ret = -ENODEV;
1529 			goto app_cleanup;
1530 		}
1531 	}
1532 
1533 	/* Populate coreNIC app properties */
1534 	app_fw_nic->total_phyports = total_vnics;
1535 	if (total_vnics > 1)
1536 		app_fw_nic->multiport = true;
1537 
1538 	/* Map the symbol table */
1539 	snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id);
1540 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name,
1541 			app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ,
1542 			&pf_dev->ctrl_area);
1543 	if (pf_dev->ctrl_bar == NULL) {
1544 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name);
1545 		ret = -EIO;
1546 		goto app_cleanup;
1547 	}
1548 
1549 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
1550 
1551 	/* Loop through all physical ports on PF */
1552 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1553 		if (pf_dev->multi_pf.enabled)
1554 			snprintf(port_name, sizeof(port_name), "%s",
1555 					pf_dev->pci_dev->device.name);
1556 		else
1557 			snprintf(port_name, sizeof(port_name), "%s_port%u",
1558 					pf_dev->pci_dev->device.name, i);
1559 
1560 		id = nfp_function_id_get(pf_dev, i);
1561 		hw_init.idx = id;
1562 		hw_init.nfp_idx = nfp_eth_table->ports[id].index;
1563 		ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name,
1564 				sizeof(struct nfp_net_hw), NULL, NULL,
1565 				nfp_net_init, &hw_init);
1566 		if (ret != 0)
1567 			goto port_cleanup;
1568 
1569 	} /* End loop, all ports on this PF */
1570 
1571 	return 0;
1572 
1573 port_cleanup:
1574 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1575 		struct rte_eth_dev *eth_dev;
1576 
1577 		if (pf_dev->multi_pf.enabled)
1578 			snprintf(port_name, sizeof(port_name), "%s",
1579 					pf_dev->pci_dev->device.name);
1580 		else
1581 			snprintf(port_name, sizeof(port_name), "%s_port%u",
1582 					pf_dev->pci_dev->device.name, i);
1583 		eth_dev = rte_eth_dev_get_by_name(port_name);
1584 		if (eth_dev != NULL)
1585 			rte_eth_dev_destroy(eth_dev, nfp_net_uninit);
1586 	}
1587 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
1588 app_cleanup:
1589 	rte_free(app_fw_nic);
1590 
1591 	return ret;
1592 }
1593 
1594 static int
1595 nfp_net_hwinfo_set(uint8_t function_id,
1596 		struct nfp_rtsym_table *sym_tbl,
1597 		struct nfp_cpp *cpp,
1598 		enum nfp_app_fw_id app_fw_id)
1599 {
1600 	int ret = 0;
1601 	uint64_t app_cap;
1602 	struct nfp_nsp *nsp;
1603 	uint8_t sp_indiff = 1;
1604 	char hw_info[RTE_ETH_NAME_MAX_LEN];
1605 	char app_cap_name[RTE_ETH_NAME_MAX_LEN];
1606 
1607 	if (app_fw_id != NFP_APP_FW_FLOWER_NIC) {
1608 		/* Read the app capabilities of the firmware loaded */
1609 		snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id);
1610 		app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret);
1611 		if (ret != 0) {
1612 			PMD_INIT_LOG(ERR, "Could not read app_fw_cap from firmware.");
1613 			return ret;
1614 		}
1615 
1616 		/* Calculate the value of sp_indiff and write to hw_info */
1617 		sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF;
1618 	}
1619 
1620 	snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff);
1621 
1622 	nsp = nfp_nsp_open(cpp);
1623 	if (nsp == NULL) {
1624 		PMD_INIT_LOG(ERR, "Could not get NSP.");
1625 		return -EIO;
1626 	}
1627 
1628 	ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info));
1629 	nfp_nsp_close(nsp);
1630 	if (ret != 0) {
1631 		PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo.");
1632 		return ret;
1633 	}
1634 
1635 	return 0;
1636 }
1637 
1638 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
1639 	[NFP_MEDIA_W0_RJ45_10M]     = RTE_ETH_LINK_SPEED_10M,
1640 	[NFP_MEDIA_W0_RJ45_10M_HD]  = RTE_ETH_LINK_SPEED_10M_HD,
1641 	[NFP_MEDIA_W0_RJ45_100M]    = RTE_ETH_LINK_SPEED_100M,
1642 	[NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD,
1643 	[NFP_MEDIA_W0_RJ45_1G]      = RTE_ETH_LINK_SPEED_1G,
1644 	[NFP_MEDIA_W0_RJ45_2P5G]    = RTE_ETH_LINK_SPEED_2_5G,
1645 	[NFP_MEDIA_W0_RJ45_5G]      = RTE_ETH_LINK_SPEED_5G,
1646 	[NFP_MEDIA_W0_RJ45_10G]     = RTE_ETH_LINK_SPEED_10G,
1647 	[NFP_MEDIA_1000BASE_CX]     = RTE_ETH_LINK_SPEED_1G,
1648 	[NFP_MEDIA_1000BASE_KX]     = RTE_ETH_LINK_SPEED_1G,
1649 	[NFP_MEDIA_10GBASE_KX4]     = RTE_ETH_LINK_SPEED_10G,
1650 	[NFP_MEDIA_10GBASE_KR]      = RTE_ETH_LINK_SPEED_10G,
1651 	[NFP_MEDIA_10GBASE_CX4]     = RTE_ETH_LINK_SPEED_10G,
1652 	[NFP_MEDIA_10GBASE_CR]      = RTE_ETH_LINK_SPEED_10G,
1653 	[NFP_MEDIA_10GBASE_SR]      = RTE_ETH_LINK_SPEED_10G,
1654 	[NFP_MEDIA_10GBASE_ER]      = RTE_ETH_LINK_SPEED_10G,
1655 	[NFP_MEDIA_25GBASE_KR]      = RTE_ETH_LINK_SPEED_25G,
1656 	[NFP_MEDIA_25GBASE_KR_S]    = RTE_ETH_LINK_SPEED_25G,
1657 	[NFP_MEDIA_25GBASE_CR]      = RTE_ETH_LINK_SPEED_25G,
1658 	[NFP_MEDIA_25GBASE_CR_S]    = RTE_ETH_LINK_SPEED_25G,
1659 	[NFP_MEDIA_25GBASE_SR]      = RTE_ETH_LINK_SPEED_25G,
1660 	[NFP_MEDIA_40GBASE_CR4]     = RTE_ETH_LINK_SPEED_40G,
1661 	[NFP_MEDIA_40GBASE_KR4]     = RTE_ETH_LINK_SPEED_40G,
1662 	[NFP_MEDIA_40GBASE_SR4]     = RTE_ETH_LINK_SPEED_40G,
1663 	[NFP_MEDIA_40GBASE_LR4]     = RTE_ETH_LINK_SPEED_40G,
1664 	[NFP_MEDIA_50GBASE_KR]      = RTE_ETH_LINK_SPEED_50G,
1665 	[NFP_MEDIA_50GBASE_SR]      = RTE_ETH_LINK_SPEED_50G,
1666 	[NFP_MEDIA_50GBASE_CR]      = RTE_ETH_LINK_SPEED_50G,
1667 	[NFP_MEDIA_50GBASE_LR]      = RTE_ETH_LINK_SPEED_50G,
1668 	[NFP_MEDIA_50GBASE_ER]      = RTE_ETH_LINK_SPEED_50G,
1669 	[NFP_MEDIA_50GBASE_FR]      = RTE_ETH_LINK_SPEED_50G,
1670 	[NFP_MEDIA_100GBASE_KR4]    = RTE_ETH_LINK_SPEED_100G,
1671 	[NFP_MEDIA_100GBASE_SR4]    = RTE_ETH_LINK_SPEED_100G,
1672 	[NFP_MEDIA_100GBASE_CR4]    = RTE_ETH_LINK_SPEED_100G,
1673 	[NFP_MEDIA_100GBASE_KP4]    = RTE_ETH_LINK_SPEED_100G,
1674 	[NFP_MEDIA_100GBASE_CR10]   = RTE_ETH_LINK_SPEED_100G,
1675 	[NFP_MEDIA_10GBASE_LR]      = RTE_ETH_LINK_SPEED_10G,
1676 	[NFP_MEDIA_25GBASE_LR]      = RTE_ETH_LINK_SPEED_25G,
1677 	[NFP_MEDIA_25GBASE_ER]      = RTE_ETH_LINK_SPEED_25G
1678 };
1679 
1680 static int
1681 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf,
1682 		struct nfp_pf_dev *pf_dev)
1683 {
1684 	uint32_t i;
1685 	uint32_t j;
1686 	uint32_t offset;
1687 	uint32_t speed_capa = 0;
1688 	uint64_t supported_modes;
1689 
1690 	for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) {
1691 		supported_modes = media_buf->supported_modes[i];
1692 		offset = i * UINT64_BIT;
1693 		for (j = 0; j < UINT64_BIT; j++) {
1694 			if (supported_modes == 0)
1695 				break;
1696 
1697 			if ((supported_modes & 1) != 0) {
1698 				if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) {
1699 					PMD_DRV_LOG(ERR, "Invalid offset of media table.");
1700 					return -EINVAL;
1701 				}
1702 
1703 				speed_capa |= nfp_eth_media_table[j + offset];
1704 			}
1705 
1706 			supported_modes = supported_modes >> 1;
1707 		}
1708 	}
1709 
1710 	pf_dev->speed_capa = speed_capa;
1711 
1712 	return pf_dev->speed_capa == 0 ? -EINVAL : 0;
1713 }
1714 
1715 static int
1716 nfp_net_speed_capa_get(struct nfp_pf_dev *pf_dev,
1717 		uint32_t port_id)
1718 {
1719 	int ret;
1720 	struct nfp_nsp *nsp;
1721 	struct nfp_eth_media_buf media_buf;
1722 
1723 	media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index;
1724 	pf_dev->speed_capa = 0;
1725 
1726 	nsp = nfp_nsp_open(pf_dev->cpp);
1727 	if (nsp == NULL) {
1728 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
1729 		return -EIO;
1730 	}
1731 
1732 	ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf));
1733 	nfp_nsp_close(nsp);
1734 	if (ret != 0) {
1735 		PMD_DRV_LOG(ERR, "Failed to read media.");
1736 		return ret;
1737 	}
1738 
1739 	ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev);
1740 	if (ret < 0) {
1741 		PMD_DRV_LOG(ERR, "Speed capability is invalid.");
1742 		return ret;
1743 	}
1744 
1745 	return 0;
1746 }
1747 
1748 static int
1749 nfp_pf_init(struct rte_pci_device *pci_dev)
1750 {
1751 	void *sync;
1752 	uint32_t i;
1753 	uint32_t id;
1754 	int ret = 0;
1755 	uint64_t addr;
1756 	uint32_t index;
1757 	uint32_t cpp_id;
1758 	uint8_t function_id;
1759 	struct nfp_cpp *cpp;
1760 	struct nfp_pf_dev *pf_dev;
1761 	struct nfp_hwinfo *hwinfo;
1762 	enum nfp_app_fw_id app_fw_id;
1763 	char name[RTE_ETH_NAME_MAX_LEN];
1764 	struct nfp_rtsym_table *sym_tbl;
1765 	struct nfp_net_hw_priv *hw_priv;
1766 	char app_name[RTE_ETH_NAME_MAX_LEN];
1767 	struct nfp_eth_table *nfp_eth_table;
1768 	const struct nfp_dev_info *dev_info;
1769 
1770 	if (pci_dev == NULL)
1771 		return -ENODEV;
1772 
1773 	if (pci_dev->mem_resource[0].addr == NULL) {
1774 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
1775 		return -ENODEV;
1776 	}
1777 
1778 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
1779 	if (dev_info == NULL) {
1780 		PMD_INIT_LOG(ERR, "Not supported device ID");
1781 		return -ENODEV;
1782 	}
1783 
1784 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
1785 	if (hw_priv == NULL) {
1786 		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
1787 		return -ENOMEM;
1788 	}
1789 
1790 	/* Allocate memory for the PF "device" */
1791 	function_id = (pci_dev->addr.function) & 0x07;
1792 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
1793 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
1794 	if (pf_dev == NULL) {
1795 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
1796 		ret = -ENOMEM;
1797 		goto hw_priv_free;
1798 	}
1799 
1800 	sync = nfp_sync_alloc();
1801 	if (sync == NULL) {
1802 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
1803 		ret = -ENOMEM;
1804 		goto pf_cleanup;
1805 	}
1806 
1807 	/*
1808 	 * When device bound to UIO, the device could be used, by mistake,
1809 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1810 	 * could lead to a serious problem when configuring the NFP CPP
1811 	 * interface. Here we avoid this telling to the CPP init code to
1812 	 * use a lock file if UIO is being used.
1813 	 */
1814 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1815 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
1816 	else
1817 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
1818 
1819 	if (cpp == NULL) {
1820 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1821 		ret = -EIO;
1822 		goto sync_free;
1823 	}
1824 
1825 	hwinfo = nfp_hwinfo_read(cpp);
1826 	if (hwinfo == NULL) {
1827 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
1828 		ret = -EIO;
1829 		goto cpp_cleanup;
1830 	}
1831 
1832 	/* Read the number of physical ports from hardware */
1833 	nfp_eth_table = nfp_eth_read_ports(cpp);
1834 	if (nfp_eth_table == NULL) {
1835 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
1836 		ret = -EIO;
1837 		goto hwinfo_cleanup;
1838 	}
1839 
1840 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp);
1841 	pf_dev->multi_pf.function_id = function_id;
1842 
1843 	/* Force the physical port down to clear the possible DMA error */
1844 	for (i = 0; i < nfp_eth_table->count; i++) {
1845 		id = nfp_function_id_get(pf_dev, i);
1846 		index = nfp_eth_table->ports[id].index;
1847 		nfp_eth_set_configured(cpp, index, 0);
1848 	}
1849 
1850 	nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs);
1851 
1852 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo,
1853 			dev_info, &pf_dev->multi_pf, pf_dev->devargs.force_reload_fw) != 0) {
1854 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
1855 		ret = -EIO;
1856 		goto eth_table_cleanup;
1857 	}
1858 
1859 	/* Now the symbol table should be there */
1860 	sym_tbl = nfp_rtsym_table_read(cpp);
1861 	if (sym_tbl == NULL) {
1862 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
1863 		ret = -EIO;
1864 		goto fw_cleanup;
1865 	}
1866 
1867 	/* Read the app ID of the firmware loaded */
1868 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
1869 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
1870 	if (ret != 0) {
1871 		PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name);
1872 		ret = -EIO;
1873 		goto sym_tbl_cleanup;
1874 	}
1875 
1876 	/* Write sp_indiff to hw_info */
1877 	ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp, app_fw_id);
1878 	if (ret != 0) {
1879 		PMD_INIT_LOG(ERR, "Failed to set hwinfo.");
1880 		ret = -EIO;
1881 		goto sym_tbl_cleanup;
1882 	}
1883 
1884 	/* Populate the newly created PF device */
1885 	pf_dev->app_fw_id = app_fw_id;
1886 	pf_dev->cpp = cpp;
1887 	pf_dev->hwinfo = hwinfo;
1888 	pf_dev->sym_tbl = sym_tbl;
1889 	pf_dev->pci_dev = pci_dev;
1890 	pf_dev->nfp_eth_table = nfp_eth_table;
1891 	pf_dev->sync = sync;
1892 
1893 	/* Get the speed capability */
1894 	for (i = 0; i < nfp_eth_table->count; i++) {
1895 		id = nfp_function_id_get(pf_dev, i);
1896 		ret = nfp_net_speed_capa_get(pf_dev, id);
1897 		if (ret != 0) {
1898 			PMD_INIT_LOG(ERR, "Failed to get speed capability.");
1899 			ret = -EIO;
1900 			goto sym_tbl_cleanup;
1901 		}
1902 	}
1903 
1904 	/* Configure access to tx/rx vNIC BARs */
1905 	addr = nfp_qcp_queue_offset(dev_info, 0);
1906 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
1907 
1908 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
1909 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
1910 	if (pf_dev->qc_bar == NULL) {
1911 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
1912 		ret = -EIO;
1913 		goto sym_tbl_cleanup;
1914 	}
1915 
1916 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
1917 
1918 	pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats",
1919 			NFP_MAC_STATS_SIZE * nfp_eth_table->max_index,
1920 			&pf_dev->mac_stats_area);
1921 	if (pf_dev->mac_stats_bar == NULL) {
1922 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats");
1923 		goto hwqueues_cleanup;
1924 	}
1925 
1926 	hw_priv->pf_dev = pf_dev;
1927 	hw_priv->dev_info = dev_info;
1928 
1929 	/*
1930 	 * PF initialization has been done at this point. Call app specific
1931 	 * init code now.
1932 	 */
1933 	switch (pf_dev->app_fw_id) {
1934 	case NFP_APP_FW_CORE_NIC:
1935 		if (pf_dev->multi_pf.enabled) {
1936 			ret = nfp_enable_multi_pf(pf_dev);
1937 			if (ret != 0)
1938 				goto mac_stats_cleanup;
1939 		}
1940 
1941 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1942 		ret = nfp_init_app_fw_nic(hw_priv);
1943 		if (ret != 0) {
1944 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1945 			goto mac_stats_cleanup;
1946 		}
1947 		break;
1948 	case NFP_APP_FW_FLOWER_NIC:
1949 		PMD_INIT_LOG(INFO, "Initializing Flower");
1950 		ret = nfp_init_app_fw_flower(hw_priv);
1951 		if (ret != 0) {
1952 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1953 			goto mac_stats_cleanup;
1954 		}
1955 		break;
1956 	default:
1957 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1958 		ret = -EINVAL;
1959 		goto mac_stats_cleanup;
1960 	}
1961 
1962 	/* Register the CPP bridge service here for primary use */
1963 	ret = nfp_enable_cpp_service(pf_dev);
1964 	if (ret != 0)
1965 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1966 
1967 	return 0;
1968 
1969 mac_stats_cleanup:
1970 	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
1971 hwqueues_cleanup:
1972 	nfp_cpp_area_release_free(pf_dev->qc_area);
1973 sym_tbl_cleanup:
1974 	free(sym_tbl);
1975 fw_cleanup:
1976 	nfp_fw_unload(cpp);
1977 	nfp_net_keepalive_stop(&pf_dev->multi_pf);
1978 	nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
1979 	nfp_net_keepalive_uninit(&pf_dev->multi_pf);
1980 eth_table_cleanup:
1981 	free(nfp_eth_table);
1982 hwinfo_cleanup:
1983 	free(hwinfo);
1984 cpp_cleanup:
1985 	nfp_cpp_free(cpp);
1986 sync_free:
1987 	nfp_sync_free(sync);
1988 pf_cleanup:
1989 	rte_free(pf_dev);
1990 hw_priv_free:
1991 	rte_free(hw_priv);
1992 
1993 	return ret;
1994 }
1995 
1996 static int
1997 nfp_secondary_net_init(struct rte_eth_dev *eth_dev,
1998 		void *para)
1999 {
2000 	struct nfp_net_hw *net_hw;
2001 
2002 	net_hw = eth_dev->data->dev_private;
2003 	nfp_net_ethdev_ops_mount(net_hw, eth_dev);
2004 
2005 	eth_dev->process_private = para;
2006 
2007 	return 0;
2008 }
2009 
2010 static int
2011 nfp_secondary_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
2012 {
2013 	uint32_t i;
2014 	int err = 0;
2015 	int ret = 0;
2016 	uint8_t function_id;
2017 	uint32_t total_vnics;
2018 	char pf_name[RTE_ETH_NAME_MAX_LEN];
2019 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
2020 
2021 	/* Read the number of vNIC's created for the PF */
2022 	function_id = (pf_dev->pci_dev->addr.function) & 0x07;
2023 	snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id);
2024 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err);
2025 	if (err != 0 || total_vnics == 0 || total_vnics > 8) {
2026 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name);
2027 		return -ENODEV;
2028 	}
2029 
2030 	for (i = 0; i < total_vnics; i++) {
2031 		char port_name[RTE_ETH_NAME_MAX_LEN];
2032 
2033 		if (nfp_check_multi_pf_from_fw(total_vnics))
2034 			snprintf(port_name, sizeof(port_name), "%s",
2035 					pf_dev->pci_dev->device.name);
2036 		else
2037 			snprintf(port_name, sizeof(port_name), "%s_port%u",
2038 					pf_dev->pci_dev->device.name, i);
2039 
2040 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
2041 		ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name, 0,
2042 				NULL, NULL, nfp_secondary_net_init, hw_priv);
2043 		if (ret != 0) {
2044 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
2045 			ret = -ENODEV;
2046 			break;
2047 		}
2048 	}
2049 
2050 	return ret;
2051 }
2052 
2053 /*
2054  * When attaching to the NFP4000/6000 PF on a secondary process there
2055  * is no need to initialise the PF again. Only minimal work is required
2056  * here.
2057  */
2058 static int
2059 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
2060 {
2061 	void *sync;
2062 	int ret = 0;
2063 	struct nfp_cpp *cpp;
2064 	uint8_t function_id;
2065 	struct nfp_pf_dev *pf_dev;
2066 	enum nfp_app_fw_id app_fw_id;
2067 	char name[RTE_ETH_NAME_MAX_LEN];
2068 	struct nfp_rtsym_table *sym_tbl;
2069 	struct nfp_net_hw_priv *hw_priv;
2070 	const struct nfp_dev_info *dev_info;
2071 	char app_name[RTE_ETH_NAME_MAX_LEN];
2072 
2073 	if (pci_dev == NULL)
2074 		return -ENODEV;
2075 
2076 	if (pci_dev->mem_resource[0].addr == NULL) {
2077 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2078 		return -ENODEV;
2079 	}
2080 
2081 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2082 	if (dev_info == NULL) {
2083 		PMD_INIT_LOG(ERR, "Not supported device ID");
2084 		return -ENODEV;
2085 	}
2086 
2087 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
2088 	if (hw_priv == NULL) {
2089 		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
2090 		return -ENOMEM;
2091 	}
2092 
2093 	/* Allocate memory for the PF "device" */
2094 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
2095 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2096 	if (pf_dev == NULL) {
2097 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
2098 		ret = -ENOMEM;
2099 		goto hw_priv_free;
2100 	}
2101 
2102 	sync = nfp_sync_alloc();
2103 	if (sync == NULL) {
2104 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2105 		ret = -ENOMEM;
2106 		goto pf_cleanup;
2107 	}
2108 
2109 	/*
2110 	 * When device bound to UIO, the device could be used, by mistake,
2111 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2112 	 * could lead to a serious problem when configuring the NFP CPP
2113 	 * interface. Here we avoid this telling to the CPP init code to
2114 	 * use a lock file if UIO is being used.
2115 	 */
2116 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2117 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2118 	else
2119 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2120 
2121 	if (cpp == NULL) {
2122 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2123 		ret = -EIO;
2124 		goto sync_free;
2125 	}
2126 
2127 	/*
2128 	 * We don't have access to the PF created in the primary process
2129 	 * here so we have to read the number of ports from firmware.
2130 	 */
2131 	sym_tbl = nfp_rtsym_table_read(cpp);
2132 	if (sym_tbl == NULL) {
2133 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
2134 		ret = -EIO;
2135 		goto cpp_cleanup;
2136 	}
2137 
2138 	/* Read the app ID of the firmware loaded */
2139 	function_id = pci_dev->addr.function & 0x7;
2140 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2141 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2142 	if (ret != 0) {
2143 		PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name);
2144 		ret = -EIO;
2145 		goto sym_tbl_cleanup;
2146 	}
2147 
2148 	/* Populate the newly created PF device */
2149 	pf_dev->app_fw_id = app_fw_id;
2150 	pf_dev->cpp = cpp;
2151 	pf_dev->sym_tbl = sym_tbl;
2152 	pf_dev->pci_dev = pci_dev;
2153 	pf_dev->sync = sync;
2154 
2155 	hw_priv->pf_dev = pf_dev;
2156 	hw_priv->dev_info = dev_info;
2157 
2158 	/* Call app specific init code now */
2159 	switch (app_fw_id) {
2160 	case NFP_APP_FW_CORE_NIC:
2161 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
2162 		ret = nfp_secondary_init_app_fw_nic(hw_priv);
2163 		if (ret != 0) {
2164 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2165 			goto sym_tbl_cleanup;
2166 		}
2167 		break;
2168 	case NFP_APP_FW_FLOWER_NIC:
2169 		PMD_INIT_LOG(INFO, "Initializing Flower");
2170 		ret = nfp_secondary_init_app_fw_flower(hw_priv);
2171 		if (ret != 0) {
2172 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2173 			goto sym_tbl_cleanup;
2174 		}
2175 		break;
2176 	default:
2177 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
2178 		ret = -EINVAL;
2179 		goto sym_tbl_cleanup;
2180 	}
2181 
2182 	return 0;
2183 
2184 sym_tbl_cleanup:
2185 	free(sym_tbl);
2186 cpp_cleanup:
2187 	nfp_cpp_free(cpp);
2188 sync_free:
2189 	nfp_sync_free(sync);
2190 pf_cleanup:
2191 	rte_free(pf_dev);
2192 hw_priv_free:
2193 	rte_free(hw_priv);
2194 
2195 	return ret;
2196 }
2197 
2198 static int
2199 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2200 		struct rte_pci_device *dev)
2201 {
2202 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2203 		return nfp_pf_init(dev);
2204 	else
2205 		return nfp_pf_secondary_init(dev);
2206 }
2207 
2208 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2209 	{
2210 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2211 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2212 	},
2213 	{
2214 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2215 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2216 	},
2217 	{
2218 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2219 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2220 	},
2221 	{
2222 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2223 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2224 	},
2225 	{
2226 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2227 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2228 	},
2229 	{
2230 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2231 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2232 	},
2233 	{
2234 		.vendor_id = 0,
2235 	},
2236 };
2237 
2238 static int
2239 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
2240 {
2241 	uint16_t port_id;
2242 	struct rte_pci_device *pci_dev;
2243 
2244 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2245 
2246 	/* Free up all physical ports under PF */
2247 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2248 		rte_eth_dev_close(port_id);
2249 	/*
2250 	 * Ports can be closed and freed but hotplugging is not
2251 	 * currently supported.
2252 	 */
2253 	return -ENOTSUP;
2254 }
2255 
2256 static int
2257 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2258 {
2259 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
2260 }
2261 
2262 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2263 	.id_table = pci_id_nfp_pf_net_map,
2264 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2265 	.probe = nfp_pf_pci_probe,
2266 	.remove = eth_nfp_pci_remove,
2267 };
2268 
2269 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd);
2270 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map);
2271 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
2272 RTE_PMD_REGISTER_PARAM_STRING(NFP_PF_DRIVER_NAME, NFP_PF_FORCE_RELOAD_FW "=<0|1>");
2273