xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision e4f0e2158b8e210065e91f45fd83aee118cbbd96)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <eal_firmware.h>
9 #include <rte_alarm.h>
10 
11 #include "flower/nfp_flower.h"
12 #include "nfd3/nfp_nfd3.h"
13 #include "nfdk/nfp_nfdk.h"
14 #include "nfpcore/nfp_cpp.h"
15 #include "nfpcore/nfp_hwinfo.h"
16 #include "nfpcore/nfp_rtsym.h"
17 #include "nfpcore/nfp_nsp.h"
18 #include "nfpcore/nfp6000_pcie.h"
19 
20 #include "nfp_cpp_bridge.h"
21 #include "nfp_ipsec.h"
22 #include "nfp_logs.h"
23 
24 static int
25 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
26 		uint16_t port)
27 {
28 	struct nfp_net_hw *hw;
29 	struct nfp_eth_table *nfp_eth_table;
30 
31 	/* Grab a pointer to the correct physical port */
32 	hw = app_fw_nic->ports[port];
33 
34 	nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp);
35 
36 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
37 
38 	free(nfp_eth_table);
39 
40 	return 0;
41 }
42 
43 static int
44 nfp_net_start(struct rte_eth_dev *dev)
45 {
46 	int ret;
47 	uint16_t i;
48 	struct nfp_hw *hw;
49 	uint32_t new_ctrl;
50 	uint32_t update = 0;
51 	uint32_t cap_extend;
52 	uint32_t intr_vector;
53 	uint32_t ctrl_extend = 0;
54 	struct nfp_net_hw *net_hw;
55 	struct nfp_pf_dev *pf_dev;
56 	struct rte_eth_conf *dev_conf;
57 	struct rte_eth_rxmode *rxmode;
58 	struct nfp_app_fw_nic *app_fw_nic;
59 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
60 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
61 
62 	net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
63 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
64 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
65 	hw = &net_hw->super;
66 
67 	/* Disabling queues just in case... */
68 	nfp_net_disable_queues(dev);
69 
70 	/* Enabling the required queues in the device */
71 	nfp_net_enable_queues(dev);
72 
73 	/* Check and configure queue intr-vector mapping */
74 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
75 		if (app_fw_nic->multiport) {
76 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
77 					"with NFP multiport PF");
78 				return -EINVAL;
79 		}
80 
81 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
82 			/*
83 			 * Better not to share LSC with RX interrupts.
84 			 * Unregistering LSC interrupt handler.
85 			 */
86 			rte_intr_callback_unregister(intr_handle,
87 					nfp_net_dev_interrupt_handler, (void *)dev);
88 
89 			if (dev->data->nb_rx_queues > 1) {
90 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
91 						"supports 1 queue with UIO");
92 				return -EIO;
93 			}
94 		}
95 
96 		intr_vector = dev->data->nb_rx_queues;
97 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
98 			return -1;
99 
100 		nfp_configure_rx_interrupt(dev, intr_handle);
101 		update = NFP_NET_CFG_UPDATE_MSIX;
102 	}
103 
104 	/* Checking MTU set */
105 	if (dev->data->mtu > net_hw->flbufsz) {
106 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
107 				dev->data->mtu, net_hw->flbufsz);
108 		return -ERANGE;
109 	}
110 
111 	rte_intr_enable(intr_handle);
112 
113 	new_ctrl = nfp_check_offloads(dev);
114 
115 	/* Writing configuration parameters in the device */
116 	nfp_net_params_setup(net_hw);
117 
118 	dev_conf = &dev->data->dev_conf;
119 	rxmode = &dev_conf->rxmode;
120 
121 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
122 		nfp_net_rss_config_default(dev);
123 		update |= NFP_NET_CFG_UPDATE_RSS;
124 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
125 	}
126 
127 	/* Enable device */
128 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
129 
130 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
131 
132 	/* Enable vxlan */
133 	if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) {
134 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
135 		update |= NFP_NET_CFG_UPDATE_VXLAN;
136 	}
137 
138 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
139 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
140 
141 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
142 		return -EIO;
143 
144 	/* Enable packet type offload by extend ctrl word1. */
145 	cap_extend = hw->cap_ext;
146 	if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
147 		ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE;
148 
149 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0)
150 		ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP
151 				| NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
152 
153 	update = NFP_NET_CFG_UPDATE_GEN;
154 	if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0)
155 		return -EIO;
156 
157 	hw->ctrl_ext = ctrl_extend;
158 
159 	/*
160 	 * Allocating rte mbufs for configured rx queues.
161 	 * This requires queues being enabled before.
162 	 */
163 	if (nfp_net_rx_freelist_setup(dev) != 0) {
164 		ret = -ENOMEM;
165 		goto error;
166 	}
167 
168 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
169 		/* Configure the physical port up */
170 		nfp_eth_set_configured(net_hw->cpp, net_hw->nfp_idx, 1);
171 	else
172 		nfp_eth_set_configured(dev->process_private, net_hw->nfp_idx, 1);
173 
174 	hw->ctrl = new_ctrl;
175 
176 	for (i = 0; i < dev->data->nb_rx_queues; i++)
177 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
178 	for (i = 0; i < dev->data->nb_tx_queues; i++)
179 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
180 
181 	return 0;
182 
183 error:
184 	/*
185 	 * An error returned by this function should mean the app
186 	 * exiting and then the system releasing all the memory
187 	 * allocated even memory coming from hugepages.
188 	 *
189 	 * The device could be enabled at this point with some queues
190 	 * ready for getting packets. This is true if the call to
191 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
192 	 * fails for subsequent queues.
193 	 *
194 	 * This should make the app exiting but better if we tell the
195 	 * device first.
196 	 */
197 	nfp_net_disable_queues(dev);
198 
199 	return ret;
200 }
201 
202 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
203 static int
204 nfp_net_stop(struct rte_eth_dev *dev)
205 {
206 	struct nfp_net_hw *hw;
207 
208 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
209 
210 	nfp_net_disable_queues(dev);
211 
212 	/* Clear queues */
213 	nfp_net_stop_tx_queue(dev);
214 	nfp_net_stop_rx_queue(dev);
215 
216 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
217 		/* Configure the physical port down */
218 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
219 	else
220 		nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
221 
222 	return 0;
223 }
224 
225 /* Set the link up. */
226 static int
227 nfp_net_set_link_up(struct rte_eth_dev *dev)
228 {
229 	struct nfp_net_hw *hw;
230 
231 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
232 
233 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
234 		/* Configure the physical port down */
235 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
236 	else
237 		return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1);
238 }
239 
240 /* Set the link down. */
241 static int
242 nfp_net_set_link_down(struct rte_eth_dev *dev)
243 {
244 	struct nfp_net_hw *hw;
245 
246 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
247 
248 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
249 		/* Configure the physical port down */
250 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
251 	else
252 		return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
253 }
254 
255 /* Reset and stop device. The device can not be restarted. */
256 static int
257 nfp_net_close(struct rte_eth_dev *dev)
258 {
259 	uint8_t i;
260 	struct nfp_net_hw *hw;
261 	struct nfp_pf_dev *pf_dev;
262 	struct rte_pci_device *pci_dev;
263 	struct nfp_app_fw_nic *app_fw_nic;
264 
265 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
266 		return 0;
267 
268 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
269 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
270 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
271 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
272 
273 	/*
274 	 * We assume that the DPDK application is stopping all the
275 	 * threads/queues before calling the device close function.
276 	 */
277 	nfp_net_disable_queues(dev);
278 
279 	/* Clear queues */
280 	nfp_net_close_tx_queue(dev);
281 	nfp_net_close_rx_queue(dev);
282 
283 	/* Clear ipsec */
284 	nfp_ipsec_uninit(dev);
285 
286 	/* Cancel possible impending LSC work here before releasing the port */
287 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
288 
289 	/* Only free PF resources after all physical ports have been closed */
290 	/* Mark this port as unused and free device priv resources */
291 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
292 	app_fw_nic->ports[hw->idx] = NULL;
293 
294 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
295 		/* Check to see if ports are still in use */
296 		if (app_fw_nic->ports[i] != NULL)
297 			return 0;
298 	}
299 
300 	/* Now it is safe to free all PF resources */
301 	PMD_INIT_LOG(INFO, "Freeing PF resources");
302 	nfp_cpp_area_free(pf_dev->ctrl_area);
303 	nfp_cpp_area_free(pf_dev->qc_area);
304 	free(pf_dev->hwinfo);
305 	free(pf_dev->sym_tbl);
306 	nfp_cpp_free(pf_dev->cpp);
307 	rte_free(app_fw_nic);
308 	rte_free(pf_dev);
309 
310 	rte_intr_disable(pci_dev->intr_handle);
311 
312 	/* Unregister callback func from eal lib */
313 	rte_intr_callback_unregister(pci_dev->intr_handle,
314 			nfp_net_dev_interrupt_handler, (void *)dev);
315 
316 	return 0;
317 }
318 
319 static int
320 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
321 		uint16_t port,
322 		uint32_t *idx)
323 {
324 	uint32_t i;
325 	int free_idx = -1;
326 
327 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
328 		if (hw->vxlan_ports[i] == port) {
329 			free_idx = i;
330 			break;
331 		}
332 
333 		if (hw->vxlan_usecnt[i] == 0) {
334 			free_idx = i;
335 			break;
336 		}
337 	}
338 
339 	if (free_idx == -1)
340 		return -EINVAL;
341 
342 	*idx = free_idx;
343 
344 	return 0;
345 }
346 
347 static int
348 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
349 		struct rte_eth_udp_tunnel *tunnel_udp)
350 {
351 	int ret;
352 	uint32_t idx;
353 	uint16_t vxlan_port;
354 	struct nfp_net_hw *hw;
355 	enum rte_eth_tunnel_type tnl_type;
356 
357 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
358 	vxlan_port = tunnel_udp->udp_port;
359 	tnl_type   = tunnel_udp->prot_type;
360 
361 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
362 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
363 		return -ENOTSUP;
364 	}
365 
366 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
367 	if (ret != 0) {
368 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
369 		return -EINVAL;
370 	}
371 
372 	if (hw->vxlan_usecnt[idx] == 0) {
373 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
374 		if (ret != 0) {
375 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
376 			return -EINVAL;
377 		}
378 	}
379 
380 	hw->vxlan_usecnt[idx]++;
381 
382 	return 0;
383 }
384 
385 static int
386 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
387 		struct rte_eth_udp_tunnel *tunnel_udp)
388 {
389 	int ret;
390 	uint32_t idx;
391 	uint16_t vxlan_port;
392 	struct nfp_net_hw *hw;
393 	enum rte_eth_tunnel_type tnl_type;
394 
395 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
396 	vxlan_port = tunnel_udp->udp_port;
397 	tnl_type   = tunnel_udp->prot_type;
398 
399 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
400 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
401 		return -ENOTSUP;
402 	}
403 
404 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
405 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
406 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
407 		return -EINVAL;
408 	}
409 
410 	hw->vxlan_usecnt[idx]--;
411 
412 	if (hw->vxlan_usecnt[idx] == 0) {
413 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
414 		if (ret != 0) {
415 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
416 			return -EINVAL;
417 		}
418 	}
419 
420 	return 0;
421 }
422 
423 /* Initialise and register driver with DPDK Application */
424 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
425 	.dev_configure          = nfp_net_configure,
426 	.dev_start              = nfp_net_start,
427 	.dev_stop               = nfp_net_stop,
428 	.dev_set_link_up        = nfp_net_set_link_up,
429 	.dev_set_link_down      = nfp_net_set_link_down,
430 	.dev_close              = nfp_net_close,
431 	.promiscuous_enable     = nfp_net_promisc_enable,
432 	.promiscuous_disable    = nfp_net_promisc_disable,
433 	.link_update            = nfp_net_link_update,
434 	.stats_get              = nfp_net_stats_get,
435 	.stats_reset            = nfp_net_stats_reset,
436 	.xstats_get             = nfp_net_xstats_get,
437 	.xstats_reset           = nfp_net_xstats_reset,
438 	.xstats_get_names       = nfp_net_xstats_get_names,
439 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
440 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
441 	.dev_infos_get          = nfp_net_infos_get,
442 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
443 	.mtu_set                = nfp_net_dev_mtu_set,
444 	.mac_addr_set           = nfp_net_set_mac_addr,
445 	.vlan_offload_set       = nfp_net_vlan_offload_set,
446 	.reta_update            = nfp_net_reta_update,
447 	.reta_query             = nfp_net_reta_query,
448 	.rss_hash_update        = nfp_net_rss_hash_update,
449 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
450 	.rx_queue_setup         = nfp_net_rx_queue_setup,
451 	.rx_queue_release       = nfp_net_rx_queue_release,
452 	.tx_queue_setup         = nfp_net_tx_queue_setup,
453 	.tx_queue_release       = nfp_net_tx_queue_release,
454 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
455 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
456 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
457 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
458 	.fw_version_get         = nfp_net_firmware_version_get,
459 };
460 
461 static inline void
462 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw,
463 		struct rte_eth_dev *eth_dev)
464 {
465 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
466 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
467 	else
468 		eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts;
469 
470 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
471 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
472 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
473 }
474 
475 static int
476 nfp_net_init(struct rte_eth_dev *eth_dev)
477 {
478 	int err;
479 	uint16_t port;
480 	uint64_t rx_base;
481 	uint64_t tx_base;
482 	struct nfp_hw *hw;
483 	struct nfp_net_hw *net_hw;
484 	struct nfp_pf_dev *pf_dev;
485 	struct rte_pci_device *pci_dev;
486 	struct nfp_app_fw_nic *app_fw_nic;
487 
488 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
489 
490 	/* Use backpointer here to the PF of this eth_dev */
491 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
492 
493 	/* Use backpointer to the CoreNIC app struct */
494 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
495 
496 	port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
497 	if (port > 7) {
498 		PMD_DRV_LOG(ERR, "Port value is wrong");
499 		return -ENODEV;
500 	}
501 
502 	/*
503 	 * Use PF array of physical ports to get pointer to
504 	 * this specific port.
505 	 */
506 	net_hw = app_fw_nic->ports[port];
507 	hw = &net_hw->super;
508 
509 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
510 			"NFP internal port number: %d", port, net_hw->nfp_idx);
511 
512 	rte_eth_copy_pci_info(eth_dev, pci_dev);
513 
514 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
515 	if (hw->ctrl_bar == NULL) {
516 		PMD_DRV_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured");
517 		return -ENODEV;
518 	}
519 
520 	if (port == 0) {
521 		uint32_t min_size;
522 
523 		hw->ctrl_bar = pf_dev->ctrl_bar;
524 		min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index;
525 		net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats",
526 				min_size, &net_hw->mac_stats_area);
527 		if (net_hw->mac_stats_bar == NULL) {
528 			PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
529 			return -EIO;
530 		}
531 
532 		net_hw->mac_stats = net_hw->mac_stats_bar;
533 	} else {
534 		if (pf_dev->ctrl_bar == NULL)
535 			return -ENODEV;
536 
537 		/* Use port offset in pf ctrl_bar for this ports control bar */
538 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
539 		net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar +
540 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
541 	}
542 
543 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
544 	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
545 
546 	err = nfp_net_common_init(pci_dev, net_hw);
547 	if (err != 0)
548 		return err;
549 
550 	err = nfp_net_tlv_caps_parse(eth_dev);
551 	if (err != 0) {
552 		PMD_INIT_LOG(ERR, "Failed to parser TLV caps");
553 		return err;
554 	}
555 
556 	err = nfp_ipsec_init(eth_dev);
557 	if (err != 0) {
558 		PMD_INIT_LOG(ERR, "Failed to init IPsec module");
559 		return err;
560 	}
561 
562 	nfp_net_ethdev_ops_mount(net_hw, eth_dev);
563 
564 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
565 			nfp_net_xstats_size(eth_dev), 0);
566 	if (net_hw->eth_xstats_base == NULL) {
567 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
568 				pci_dev->device.name);
569 		return -ENOMEM;
570 	}
571 
572 	/* Work out where in the BAR the queues start. */
573 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
574 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
575 
576 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
577 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
578 	eth_dev->data->dev_private = net_hw;
579 
580 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
581 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
582 
583 	nfp_net_cfg_queue_setup(net_hw);
584 	net_hw->mtu = RTE_ETHER_MTU;
585 
586 	/* VLAN insertion is incompatible with LSOv2 */
587 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
588 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
589 
590 	nfp_net_log_device_information(net_hw);
591 
592 	/* Initializing spinlock for reconfigs */
593 	rte_spinlock_init(&hw->reconfig_lock);
594 
595 	/* Allocating memory for mac addr */
596 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
597 	if (eth_dev->data->mac_addrs == NULL) {
598 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
599 		return -ENOMEM;
600 	}
601 
602 	nfp_net_pf_read_mac(app_fw_nic, port);
603 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
604 
605 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
606 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
607 		/* Using random mac addresses for VFs */
608 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
609 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
610 	}
611 
612 	/* Copying mac address to DPDK eth_dev struct */
613 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
614 
615 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
616 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
617 
618 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
619 
620 	PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x "
621 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
622 			eth_dev->data->port_id, pci_dev->id.vendor_id,
623 			pci_dev->id.device_id,
624 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
625 
626 	/* Registering LSC interrupt handler */
627 	rte_intr_callback_register(pci_dev->intr_handle,
628 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
629 	/* Telling the firmware about the LSC interrupt entry */
630 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
631 	/* Recording current stats counters values */
632 	nfp_net_stats_reset(eth_dev);
633 
634 	return 0;
635 }
636 
637 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
638 
639 static int
640 nfp_fw_upload(struct rte_pci_device *dev,
641 		struct nfp_nsp *nsp,
642 		char *card)
643 {
644 	void *fw_buf;
645 	size_t fsize;
646 	char serial[40];
647 	char fw_name[125];
648 	uint16_t interface;
649 	uint32_t cpp_serial_len;
650 	const uint8_t *cpp_serial;
651 	struct nfp_cpp *cpp = nfp_nsp_cpp(nsp);
652 
653 	cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial);
654 	if (cpp_serial_len != NFP_SERIAL_LEN)
655 		return -ERANGE;
656 
657 	interface = nfp_cpp_interface(cpp);
658 
659 	/* Looking for firmware file in order of priority */
660 
661 	/* First try to find a firmware image specific for this device */
662 	snprintf(serial, sizeof(serial),
663 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
664 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
665 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
666 	snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH, serial);
667 
668 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
669 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
670 		goto load_fw;
671 
672 	/* Then try the PCI name */
673 	snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
674 			dev->name);
675 
676 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
677 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
678 		goto load_fw;
679 
680 	/* Finally try the card type and media */
681 	snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
682 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
683 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
684 		goto load_fw;
685 
686 	PMD_DRV_LOG(ERR, "Can't find suitable firmware.");
687 	return -ENOENT;
688 
689 load_fw:
690 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
691 			fw_name, fsize);
692 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
693 	nfp_nsp_load_fw(nsp, fw_buf, fsize);
694 	PMD_DRV_LOG(INFO, "Done");
695 
696 	free(fw_buf);
697 
698 	return 0;
699 }
700 
701 static int
702 nfp_fw_setup(struct rte_pci_device *dev,
703 		struct nfp_cpp *cpp,
704 		struct nfp_eth_table *nfp_eth_table,
705 		struct nfp_hwinfo *hwinfo)
706 {
707 	int err;
708 	char card_desc[100];
709 	struct nfp_nsp *nsp;
710 	const char *nfp_fw_model;
711 
712 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
713 	if (nfp_fw_model == NULL)
714 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
715 
716 	if (nfp_fw_model != NULL) {
717 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
718 	} else {
719 		PMD_DRV_LOG(ERR, "firmware model NOT found");
720 		return -EIO;
721 	}
722 
723 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
724 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
725 				nfp_eth_table->count);
726 		return -EIO;
727 	}
728 
729 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
730 			nfp_eth_table->count);
731 
732 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
733 
734 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
735 			nfp_fw_model, nfp_eth_table->count,
736 			nfp_eth_table->ports[0].speed / 1000);
737 
738 	nsp = nfp_nsp_open(cpp);
739 	if (nsp == NULL) {
740 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
741 		return -EIO;
742 	}
743 
744 	nfp_nsp_device_soft_reset(nsp);
745 	err = nfp_fw_upload(dev, nsp, card_desc);
746 
747 	nfp_nsp_close(nsp);
748 	return err;
749 }
750 
751 static int
752 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
753 		const struct nfp_dev_info *dev_info)
754 {
755 	uint8_t i;
756 	int ret = 0;
757 	uint32_t total_vnics;
758 	struct nfp_net_hw *hw;
759 	unsigned int numa_node;
760 	struct rte_eth_dev *eth_dev;
761 	struct nfp_app_fw_nic *app_fw_nic;
762 	struct nfp_eth_table *nfp_eth_table;
763 	char port_name[RTE_ETH_NAME_MAX_LEN];
764 
765 	nfp_eth_table = pf_dev->nfp_eth_table;
766 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
767 
768 	/* Allocate memory for the CoreNIC app */
769 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
770 	if (app_fw_nic == NULL)
771 		return -ENOMEM;
772 
773 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
774 	pf_dev->app_fw_priv = app_fw_nic;
775 
776 	/* Read the number of vNIC's created for the PF */
777 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &ret);
778 	if (ret != 0 || total_vnics == 0 || total_vnics > 8) {
779 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
780 		ret = -ENODEV;
781 		goto app_cleanup;
782 	}
783 
784 	/*
785 	 * For coreNIC the number of vNICs exposed should be the same as the
786 	 * number of physical ports.
787 	 */
788 	if (total_vnics != nfp_eth_table->count) {
789 		PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
790 		ret = -ENODEV;
791 		goto app_cleanup;
792 	}
793 
794 	/* Populate coreNIC app properties */
795 	app_fw_nic->total_phyports = total_vnics;
796 	app_fw_nic->pf_dev = pf_dev;
797 	if (total_vnics > 1)
798 		app_fw_nic->multiport = true;
799 
800 	/* Map the symbol table */
801 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
802 			app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ,
803 			&pf_dev->ctrl_area);
804 	if (pf_dev->ctrl_bar == NULL) {
805 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
806 		ret = -EIO;
807 		goto app_cleanup;
808 	}
809 
810 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
811 
812 	/* Loop through all physical ports on PF */
813 	numa_node = rte_socket_id();
814 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
815 		snprintf(port_name, sizeof(port_name), "%s_port%d",
816 				pf_dev->pci_dev->device.name, i);
817 
818 		/* Allocate a eth_dev for this phyport */
819 		eth_dev = rte_eth_dev_allocate(port_name);
820 		if (eth_dev == NULL) {
821 			ret = -ENODEV;
822 			goto port_cleanup;
823 		}
824 
825 		/* Allocate memory for this phyport */
826 		eth_dev->data->dev_private = rte_zmalloc_socket(port_name,
827 				sizeof(struct nfp_net_hw),
828 				RTE_CACHE_LINE_SIZE, numa_node);
829 		if (eth_dev->data->dev_private == NULL) {
830 			ret = -ENOMEM;
831 			rte_eth_dev_release_port(eth_dev);
832 			goto port_cleanup;
833 		}
834 
835 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
836 
837 		/* Add this device to the PF's array of physical ports */
838 		app_fw_nic->ports[i] = hw;
839 
840 		hw->dev_info = dev_info;
841 		hw->pf_dev = pf_dev;
842 		hw->cpp = pf_dev->cpp;
843 		hw->eth_dev = eth_dev;
844 		hw->idx = i;
845 		hw->nfp_idx = nfp_eth_table->ports[i].index;
846 
847 		eth_dev->device = &pf_dev->pci_dev->device;
848 
849 		/*
850 		 * Ctrl/tx/rx BAR mappings and remaining init happens in
851 		 * @nfp_net_init()
852 		 */
853 		ret = nfp_net_init(eth_dev);
854 		if (ret != 0) {
855 			ret = -ENODEV;
856 			goto port_cleanup;
857 		}
858 
859 		rte_eth_dev_probing_finish(eth_dev);
860 
861 	} /* End loop, all ports on this PF */
862 
863 	return 0;
864 
865 port_cleanup:
866 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
867 		if (app_fw_nic->ports[i] != NULL &&
868 				app_fw_nic->ports[i]->eth_dev != NULL) {
869 			struct rte_eth_dev *tmp_dev;
870 			tmp_dev = app_fw_nic->ports[i]->eth_dev;
871 			nfp_ipsec_uninit(tmp_dev);
872 			rte_eth_dev_release_port(tmp_dev);
873 			app_fw_nic->ports[i] = NULL;
874 		}
875 	}
876 	nfp_cpp_area_free(pf_dev->ctrl_area);
877 app_cleanup:
878 	rte_free(app_fw_nic);
879 
880 	return ret;
881 }
882 
883 static int
884 nfp_pf_init(struct rte_pci_device *pci_dev)
885 {
886 	int ret = 0;
887 	uint64_t addr;
888 	uint32_t cpp_id;
889 	struct nfp_cpp *cpp;
890 	struct nfp_pf_dev *pf_dev;
891 	struct nfp_hwinfo *hwinfo;
892 	enum nfp_app_fw_id app_fw_id;
893 	char name[RTE_ETH_NAME_MAX_LEN];
894 	struct nfp_rtsym_table *sym_tbl;
895 	struct nfp_eth_table *nfp_eth_table;
896 	const struct nfp_dev_info *dev_info;
897 
898 	if (pci_dev == NULL)
899 		return -ENODEV;
900 
901 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
902 	if (dev_info == NULL) {
903 		PMD_INIT_LOG(ERR, "Not supported device ID");
904 		return -ENODEV;
905 	}
906 
907 	/*
908 	 * When device bound to UIO, the device could be used, by mistake,
909 	 * by two DPDK apps, and the UIO driver does not avoid it. This
910 	 * could lead to a serious problem when configuring the NFP CPP
911 	 * interface. Here we avoid this telling to the CPP init code to
912 	 * use a lock file if UIO is being used.
913 	 */
914 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
915 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
916 	else
917 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
918 
919 	if (cpp == NULL) {
920 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
921 		return -EIO;
922 	}
923 
924 	hwinfo = nfp_hwinfo_read(cpp);
925 	if (hwinfo == NULL) {
926 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
927 		ret = -EIO;
928 		goto cpp_cleanup;
929 	}
930 
931 	/* Read the number of physical ports from hardware */
932 	nfp_eth_table = nfp_eth_read_ports(cpp);
933 	if (nfp_eth_table == NULL) {
934 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
935 		ret = -EIO;
936 		goto hwinfo_cleanup;
937 	}
938 
939 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo) != 0) {
940 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
941 		ret = -EIO;
942 		goto eth_table_cleanup;
943 	}
944 
945 	/* Now the symbol table should be there */
946 	sym_tbl = nfp_rtsym_table_read(cpp);
947 	if (sym_tbl == NULL) {
948 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
949 		ret = -EIO;
950 		goto eth_table_cleanup;
951 	}
952 
953 	/* Read the app ID of the firmware loaded */
954 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret);
955 	if (ret != 0) {
956 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
957 		ret = -EIO;
958 		goto sym_tbl_cleanup;
959 	}
960 
961 	/* Allocate memory for the PF "device" */
962 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
963 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
964 	if (pf_dev == NULL) {
965 		ret = -ENOMEM;
966 		goto sym_tbl_cleanup;
967 	}
968 
969 	/* Populate the newly created PF device */
970 	pf_dev->app_fw_id = app_fw_id;
971 	pf_dev->cpp = cpp;
972 	pf_dev->hwinfo = hwinfo;
973 	pf_dev->sym_tbl = sym_tbl;
974 	pf_dev->pci_dev = pci_dev;
975 	pf_dev->nfp_eth_table = nfp_eth_table;
976 
977 	/* Configure access to tx/rx vNIC BARs */
978 	addr = nfp_qcp_queue_offset(dev_info, 0);
979 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
980 
981 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
982 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
983 	if (pf_dev->qc_bar == NULL) {
984 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
985 		ret = -EIO;
986 		goto pf_cleanup;
987 	}
988 
989 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
990 
991 	/*
992 	 * PF initialization has been done at this point. Call app specific
993 	 * init code now.
994 	 */
995 	switch (pf_dev->app_fw_id) {
996 	case NFP_APP_FW_CORE_NIC:
997 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
998 		ret = nfp_init_app_fw_nic(pf_dev, dev_info);
999 		if (ret != 0) {
1000 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1001 			goto hwqueues_cleanup;
1002 		}
1003 		break;
1004 	case NFP_APP_FW_FLOWER_NIC:
1005 		PMD_INIT_LOG(INFO, "Initializing Flower");
1006 		ret = nfp_init_app_fw_flower(pf_dev, dev_info);
1007 		if (ret != 0) {
1008 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1009 			goto hwqueues_cleanup;
1010 		}
1011 		break;
1012 	default:
1013 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1014 		ret = -EINVAL;
1015 		goto hwqueues_cleanup;
1016 	}
1017 
1018 	/* Register the CPP bridge service here for primary use */
1019 	ret = nfp_enable_cpp_service(pf_dev);
1020 	if (ret != 0)
1021 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1022 
1023 	return 0;
1024 
1025 hwqueues_cleanup:
1026 	nfp_cpp_area_free(pf_dev->qc_area);
1027 pf_cleanup:
1028 	rte_free(pf_dev);
1029 sym_tbl_cleanup:
1030 	free(sym_tbl);
1031 eth_table_cleanup:
1032 	free(nfp_eth_table);
1033 hwinfo_cleanup:
1034 	free(hwinfo);
1035 cpp_cleanup:
1036 	nfp_cpp_free(cpp);
1037 
1038 	return ret;
1039 }
1040 
1041 static int
1042 nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
1043 		struct nfp_rtsym_table *sym_tbl,
1044 		struct nfp_cpp *cpp)
1045 {
1046 	uint32_t i;
1047 	int err = 0;
1048 	int ret = 0;
1049 	uint32_t total_vnics;
1050 	struct nfp_net_hw *hw;
1051 
1052 	/* Read the number of vNIC's created for the PF */
1053 	total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
1054 	if (err != 0 || total_vnics == 0 || total_vnics > 8) {
1055 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
1056 		return -ENODEV;
1057 	}
1058 
1059 	for (i = 0; i < total_vnics; i++) {
1060 		struct rte_eth_dev *eth_dev;
1061 		char port_name[RTE_ETH_NAME_MAX_LEN];
1062 		snprintf(port_name, sizeof(port_name), "%s_port%u",
1063 				pci_dev->device.name, i);
1064 
1065 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
1066 		eth_dev = rte_eth_dev_attach_secondary(port_name);
1067 		if (eth_dev == NULL) {
1068 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
1069 			ret = -ENODEV;
1070 			break;
1071 		}
1072 
1073 		eth_dev->process_private = cpp;
1074 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1075 		nfp_net_ethdev_ops_mount(hw, eth_dev);
1076 
1077 		rte_eth_dev_probing_finish(eth_dev);
1078 	}
1079 
1080 	return ret;
1081 }
1082 
1083 /*
1084  * When attaching to the NFP4000/6000 PF on a secondary process there
1085  * is no need to initialise the PF again. Only minimal work is required
1086  * here.
1087  */
1088 static int
1089 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
1090 {
1091 	int ret = 0;
1092 	struct nfp_cpp *cpp;
1093 	enum nfp_app_fw_id app_fw_id;
1094 	struct nfp_rtsym_table *sym_tbl;
1095 	const struct nfp_dev_info *dev_info;
1096 
1097 	if (pci_dev == NULL)
1098 		return -ENODEV;
1099 
1100 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
1101 	if (dev_info == NULL) {
1102 		PMD_INIT_LOG(ERR, "Not supported device ID");
1103 		return -ENODEV;
1104 	}
1105 
1106 	/*
1107 	 * When device bound to UIO, the device could be used, by mistake,
1108 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1109 	 * could lead to a serious problem when configuring the NFP CPP
1110 	 * interface. Here we avoid this telling to the CPP init code to
1111 	 * use a lock file if UIO is being used.
1112 	 */
1113 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1114 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
1115 	else
1116 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
1117 
1118 	if (cpp == NULL) {
1119 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1120 		return -EIO;
1121 	}
1122 
1123 	/*
1124 	 * We don't have access to the PF created in the primary process
1125 	 * here so we have to read the number of ports from firmware.
1126 	 */
1127 	sym_tbl = nfp_rtsym_table_read(cpp);
1128 	if (sym_tbl == NULL) {
1129 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
1130 		return -EIO;
1131 	}
1132 
1133 	/* Read the app ID of the firmware loaded */
1134 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &ret);
1135 	if (ret != 0) {
1136 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
1137 		goto sym_tbl_cleanup;
1138 	}
1139 
1140 	switch (app_fw_id) {
1141 	case NFP_APP_FW_CORE_NIC:
1142 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1143 		ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp);
1144 		if (ret != 0) {
1145 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1146 			goto sym_tbl_cleanup;
1147 		}
1148 		break;
1149 	case NFP_APP_FW_FLOWER_NIC:
1150 		PMD_INIT_LOG(INFO, "Initializing Flower");
1151 		ret = nfp_secondary_init_app_fw_flower(cpp);
1152 		if (ret != 0) {
1153 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1154 			goto sym_tbl_cleanup;
1155 		}
1156 		break;
1157 	default:
1158 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1159 		ret = -EINVAL;
1160 		goto sym_tbl_cleanup;
1161 	}
1162 
1163 sym_tbl_cleanup:
1164 	free(sym_tbl);
1165 
1166 	return ret;
1167 }
1168 
1169 static int
1170 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1171 		struct rte_pci_device *dev)
1172 {
1173 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1174 		return nfp_pf_init(dev);
1175 	else
1176 		return nfp_pf_secondary_init(dev);
1177 }
1178 
1179 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
1180 	{
1181 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1182 				PCI_DEVICE_ID_NFP3800_PF_NIC)
1183 	},
1184 	{
1185 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1186 				PCI_DEVICE_ID_NFP4000_PF_NIC)
1187 	},
1188 	{
1189 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1190 				PCI_DEVICE_ID_NFP6000_PF_NIC)
1191 	},
1192 	{
1193 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1194 				PCI_DEVICE_ID_NFP3800_PF_NIC)
1195 	},
1196 	{
1197 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1198 				PCI_DEVICE_ID_NFP4000_PF_NIC)
1199 	},
1200 	{
1201 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1202 				PCI_DEVICE_ID_NFP6000_PF_NIC)
1203 	},
1204 	{
1205 		.vendor_id = 0,
1206 	},
1207 };
1208 
1209 static int
1210 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
1211 {
1212 	uint16_t port_id;
1213 	struct rte_pci_device *pci_dev;
1214 
1215 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1216 
1217 	/* Free up all physical ports under PF */
1218 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1219 		rte_eth_dev_close(port_id);
1220 	/*
1221 	 * Ports can be closed and freed but hotplugging is not
1222 	 * currently supported.
1223 	 */
1224 	return -ENOTSUP;
1225 }
1226 
1227 static int
1228 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
1229 {
1230 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
1231 }
1232 
1233 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
1234 	.id_table = pci_id_nfp_pf_net_map,
1235 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1236 	.probe = nfp_pf_pci_probe,
1237 	.remove = eth_nfp_pci_remove,
1238 };
1239 
1240 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
1241 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
1242 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
1243