xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
14  */
15 
16 #include <rte_common.h>
17 #include <ethdev_driver.h>
18 #include <ethdev_pci.h>
19 #include <dev_driver.h>
20 #include <rte_ether.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_mempool.h>
24 #include <rte_service_component.h>
25 #include <rte_alarm.h>
26 #include "eal_firmware.h"
27 
28 #include "nfpcore/nfp_cpp.h"
29 #include "nfpcore/nfp_nffw.h"
30 #include "nfpcore/nfp_hwinfo.h"
31 #include "nfpcore/nfp_mip.h"
32 #include "nfpcore/nfp_rtsym.h"
33 #include "nfpcore/nfp_nsp.h"
34 
35 #include "nfp_common.h"
36 #include "nfp_ctrl.h"
37 #include "nfp_rxtx.h"
38 #include "nfp_logs.h"
39 #include "nfp_cpp_bridge.h"
40 
41 #include "flower/nfp_flower.h"
42 
43 static int
44 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port)
45 {
46 	struct nfp_eth_table *nfp_eth_table;
47 	struct nfp_net_hw *hw = NULL;
48 
49 	/* Grab a pointer to the correct physical port */
50 	hw = app_fw_nic->ports[port];
51 
52 	nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp);
53 
54 	nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
55 			 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
56 
57 	free(nfp_eth_table);
58 	return 0;
59 }
60 
61 static int
62 nfp_net_start(struct rte_eth_dev *dev)
63 {
64 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
65 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
66 	uint32_t new_ctrl, update = 0;
67 	struct nfp_net_hw *hw;
68 	struct nfp_pf_dev *pf_dev;
69 	struct nfp_app_fw_nic *app_fw_nic;
70 	struct rte_eth_conf *dev_conf;
71 	struct rte_eth_rxmode *rxmode;
72 	uint32_t intr_vector;
73 	int ret;
74 
75 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
76 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
77 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
78 
79 	PMD_INIT_LOG(DEBUG, "Start");
80 
81 	/* Disabling queues just in case... */
82 	nfp_net_disable_queues(dev);
83 
84 	/* Enabling the required queues in the device */
85 	nfp_net_enable_queues(dev);
86 
87 	/* check and configure queue intr-vector mapping */
88 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
89 		if (app_fw_nic->multiport) {
90 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
91 					  "with NFP multiport PF");
92 				return -EINVAL;
93 		}
94 		if (rte_intr_type_get(intr_handle) ==
95 						RTE_INTR_HANDLE_UIO) {
96 			/*
97 			 * Better not to share LSC with RX interrupts.
98 			 * Unregistering LSC interrupt handler
99 			 */
100 			rte_intr_callback_unregister(pci_dev->intr_handle,
101 				nfp_net_dev_interrupt_handler, (void *)dev);
102 
103 			if (dev->data->nb_rx_queues > 1) {
104 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
105 					     "supports 1 queue with UIO");
106 				return -EIO;
107 			}
108 		}
109 		intr_vector = dev->data->nb_rx_queues;
110 		if (rte_intr_efd_enable(intr_handle, intr_vector))
111 			return -1;
112 
113 		nfp_configure_rx_interrupt(dev, intr_handle);
114 		update = NFP_NET_CFG_UPDATE_MSIX;
115 	}
116 
117 	/* Checking MTU set */
118 	if (dev->data->mtu > hw->flbufsz) {
119 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
120 				dev->data->mtu, hw->flbufsz);
121 		return -ERANGE;
122 	}
123 
124 	rte_intr_enable(intr_handle);
125 
126 	new_ctrl = nfp_check_offloads(dev);
127 
128 	/* Writing configuration parameters in the device */
129 	nfp_net_params_setup(hw);
130 
131 	dev_conf = &dev->data->dev_conf;
132 	rxmode = &dev_conf->rxmode;
133 
134 	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
135 		nfp_net_rss_config_default(dev);
136 		update |= NFP_NET_CFG_UPDATE_RSS;
137 		if (hw->cap & NFP_NET_CFG_CTRL_RSS2)
138 			new_ctrl |= NFP_NET_CFG_CTRL_RSS2;
139 		else
140 			new_ctrl |= NFP_NET_CFG_CTRL_RSS;
141 	}
142 
143 	/* Enable device */
144 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
145 
146 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
147 
148 	/* Enable vxlan */
149 	if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) {
150 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
151 		update |= NFP_NET_CFG_UPDATE_VXLAN;
152 	}
153 
154 	if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
155 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
156 
157 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
158 	if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
159 		return -EIO;
160 
161 	/*
162 	 * Allocating rte mbufs for configured rx queues.
163 	 * This requires queues being enabled before
164 	 */
165 	if (nfp_net_rx_freelist_setup(dev) < 0) {
166 		ret = -ENOMEM;
167 		goto error;
168 	}
169 
170 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
171 		/* Configure the physical port up */
172 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
173 	else
174 		nfp_eth_set_configured(dev->process_private,
175 				       hw->nfp_idx, 1);
176 
177 	hw->ctrl = new_ctrl;
178 
179 	return 0;
180 
181 error:
182 	/*
183 	 * An error returned by this function should mean the app
184 	 * exiting and then the system releasing all the memory
185 	 * allocated even memory coming from hugepages.
186 	 *
187 	 * The device could be enabled at this point with some queues
188 	 * ready for getting packets. This is true if the call to
189 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
190 	 * fails for subsequent queues.
191 	 *
192 	 * This should make the app exiting but better if we tell the
193 	 * device first.
194 	 */
195 	nfp_net_disable_queues(dev);
196 
197 	return ret;
198 }
199 
200 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
201 static int
202 nfp_net_stop(struct rte_eth_dev *dev)
203 {
204 	struct nfp_net_hw *hw;
205 
206 	PMD_INIT_LOG(DEBUG, "Stop");
207 
208 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
209 
210 	nfp_net_disable_queues(dev);
211 
212 	/* Clear queues */
213 	nfp_net_stop_tx_queue(dev);
214 
215 	nfp_net_stop_rx_queue(dev);
216 
217 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
218 		/* Configure the physical port down */
219 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
220 	else
221 		nfp_eth_set_configured(dev->process_private,
222 				       hw->nfp_idx, 0);
223 
224 	return 0;
225 }
226 
227 /* Set the link up. */
228 static int
229 nfp_net_set_link_up(struct rte_eth_dev *dev)
230 {
231 	struct nfp_net_hw *hw;
232 
233 	PMD_DRV_LOG(DEBUG, "Set link up");
234 
235 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
236 
237 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
238 		/* Configure the physical port down */
239 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
240 	else
241 		return nfp_eth_set_configured(dev->process_private,
242 					      hw->nfp_idx, 1);
243 }
244 
245 /* Set the link down. */
246 static int
247 nfp_net_set_link_down(struct rte_eth_dev *dev)
248 {
249 	struct nfp_net_hw *hw;
250 
251 	PMD_DRV_LOG(DEBUG, "Set link down");
252 
253 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
254 
255 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
256 		/* Configure the physical port down */
257 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
258 	else
259 		return nfp_eth_set_configured(dev->process_private,
260 					      hw->nfp_idx, 0);
261 }
262 
263 /* Reset and stop device. The device can not be restarted. */
264 static int
265 nfp_net_close(struct rte_eth_dev *dev)
266 {
267 	struct nfp_net_hw *hw;
268 	struct rte_pci_device *pci_dev;
269 	struct nfp_pf_dev *pf_dev;
270 	struct nfp_app_fw_nic *app_fw_nic;
271 	int i;
272 
273 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
274 		return 0;
275 
276 	PMD_INIT_LOG(DEBUG, "Close");
277 
278 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
279 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
280 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
281 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
282 
283 	/*
284 	 * We assume that the DPDK application is stopping all the
285 	 * threads/queues before calling the device close function.
286 	 */
287 
288 	nfp_net_disable_queues(dev);
289 
290 	/* Clear queues */
291 	nfp_net_close_tx_queue(dev);
292 
293 	nfp_net_close_rx_queue(dev);
294 
295 	/* Cancel possible impending LSC work here before releasing the port*/
296 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
297 			     (void *)dev);
298 
299 	/* Only free PF resources after all physical ports have been closed */
300 	/* Mark this port as unused and free device priv resources*/
301 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
302 	app_fw_nic->ports[hw->idx] = NULL;
303 	rte_eth_dev_release_port(dev);
304 
305 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
306 		/* Check to see if ports are still in use */
307 		if (app_fw_nic->ports[i])
308 			return 0;
309 	}
310 
311 	/* Now it is safe to free all PF resources */
312 	PMD_INIT_LOG(INFO, "Freeing PF resources");
313 	nfp_cpp_area_free(pf_dev->ctrl_area);
314 	nfp_cpp_area_free(pf_dev->hwqueues_area);
315 	free(pf_dev->hwinfo);
316 	free(pf_dev->sym_tbl);
317 	nfp_cpp_free(pf_dev->cpp);
318 	rte_free(app_fw_nic);
319 	rte_free(pf_dev);
320 
321 	rte_intr_disable(pci_dev->intr_handle);
322 
323 	/* unregister callback func from eal lib */
324 	rte_intr_callback_unregister(pci_dev->intr_handle,
325 			nfp_net_dev_interrupt_handler, (void *)dev);
326 
327 	/*
328 	 * The ixgbe PMD disables the pcie master on the
329 	 * device. The i40e does not...
330 	 */
331 
332 	return 0;
333 }
334 
335 static int
336 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
337 		uint16_t port,
338 		uint32_t *idx)
339 {
340 	uint32_t i;
341 	int free_idx = -1;
342 
343 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
344 		if (hw->vxlan_ports[i] == port) {
345 			free_idx = i;
346 			break;
347 		}
348 
349 		if (hw->vxlan_usecnt[i] == 0) {
350 			free_idx = i;
351 			break;
352 		}
353 	}
354 
355 	if (free_idx == -1)
356 		return -EINVAL;
357 
358 	*idx = free_idx;
359 
360 	return 0;
361 }
362 
363 static int
364 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
365 		struct rte_eth_udp_tunnel *tunnel_udp)
366 {
367 	int ret;
368 	uint32_t idx;
369 	uint16_t vxlan_port;
370 	struct nfp_net_hw *hw;
371 	enum rte_eth_tunnel_type tnl_type;
372 
373 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
374 	vxlan_port = tunnel_udp->udp_port;
375 	tnl_type   = tunnel_udp->prot_type;
376 
377 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
378 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
379 		return -ENOTSUP;
380 	}
381 
382 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
383 	if (ret != 0) {
384 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
385 		return -EINVAL;
386 	}
387 
388 	if (hw->vxlan_usecnt[idx] == 0) {
389 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
390 		if (ret != 0) {
391 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
392 			return -EINVAL;
393 		}
394 	}
395 
396 	hw->vxlan_usecnt[idx]++;
397 
398 	return 0;
399 }
400 
401 static int
402 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
403 		struct rte_eth_udp_tunnel *tunnel_udp)
404 {
405 	int ret;
406 	uint32_t idx;
407 	uint16_t vxlan_port;
408 	struct nfp_net_hw *hw;
409 	enum rte_eth_tunnel_type tnl_type;
410 
411 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
412 	vxlan_port = tunnel_udp->udp_port;
413 	tnl_type   = tunnel_udp->prot_type;
414 
415 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
416 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
417 		return -ENOTSUP;
418 	}
419 
420 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
421 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
422 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
423 		return -EINVAL;
424 	}
425 
426 	hw->vxlan_usecnt[idx]--;
427 
428 	if (hw->vxlan_usecnt[idx] == 0) {
429 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
430 		if (ret != 0) {
431 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
432 			return -EINVAL;
433 		}
434 	}
435 
436 	return 0;
437 }
438 
439 /* Initialise and register driver with DPDK Application */
440 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
441 	.dev_configure		= nfp_net_configure,
442 	.dev_start		= nfp_net_start,
443 	.dev_stop		= nfp_net_stop,
444 	.dev_set_link_up	= nfp_net_set_link_up,
445 	.dev_set_link_down	= nfp_net_set_link_down,
446 	.dev_close		= nfp_net_close,
447 	.promiscuous_enable	= nfp_net_promisc_enable,
448 	.promiscuous_disable	= nfp_net_promisc_disable,
449 	.link_update		= nfp_net_link_update,
450 	.stats_get		= nfp_net_stats_get,
451 	.stats_reset		= nfp_net_stats_reset,
452 	.dev_infos_get		= nfp_net_infos_get,
453 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
454 	.mtu_set		= nfp_net_dev_mtu_set,
455 	.mac_addr_set		= nfp_net_set_mac_addr,
456 	.vlan_offload_set	= nfp_net_vlan_offload_set,
457 	.reta_update		= nfp_net_reta_update,
458 	.reta_query		= nfp_net_reta_query,
459 	.rss_hash_update	= nfp_net_rss_hash_update,
460 	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
461 	.rx_queue_setup		= nfp_net_rx_queue_setup,
462 	.rx_queue_release	= nfp_net_rx_queue_release,
463 	.tx_queue_setup		= nfp_net_tx_queue_setup,
464 	.tx_queue_release	= nfp_net_tx_queue_release,
465 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
466 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
467 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
468 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
469 };
470 
471 static inline int
472 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
473 {
474 	switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
475 	case NFP_NET_CFG_VERSION_DP_NFD3:
476 		eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
477 		break;
478 	case NFP_NET_CFG_VERSION_DP_NFDK:
479 		if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
480 			PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
481 				NFD_CFG_MAJOR_VERSION_of(hw->ver));
482 			return -EINVAL;
483 		}
484 		eth_dev->tx_pkt_burst = &nfp_net_nfdk_xmit_pkts;
485 		break;
486 	default:
487 		PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
488 		return -EINVAL;
489 	}
490 
491 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
492 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
493 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
494 
495 	return 0;
496 }
497 
498 static int
499 nfp_net_init(struct rte_eth_dev *eth_dev)
500 {
501 	struct rte_pci_device *pci_dev;
502 	struct nfp_pf_dev *pf_dev;
503 	struct nfp_app_fw_nic *app_fw_nic;
504 	struct nfp_net_hw *hw;
505 	struct rte_ether_addr *tmp_ether_addr;
506 	uint64_t rx_bar_off = 0;
507 	uint64_t tx_bar_off = 0;
508 	uint32_t start_q;
509 	int stride = 4;
510 	int port = 0;
511 
512 	PMD_INIT_FUNC_TRACE();
513 
514 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
515 
516 	/* Use backpointer here to the PF of this eth_dev */
517 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
518 
519 	/* Use backpointer to the CoreNIC app struct */
520 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
521 
522 	/* NFP can not handle DMA addresses requiring more than 40 bits */
523 	if (rte_mem_check_dma_mask(40)) {
524 		RTE_LOG(ERR, PMD,
525 			"device %s can not be used: restricted dma mask to 40 bits!\n",
526 			pci_dev->device.name);
527 		return -ENODEV;
528 	}
529 
530 	port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
531 	if (port < 0 || port > 7) {
532 		PMD_DRV_LOG(ERR, "Port value is wrong");
533 		return -ENODEV;
534 	}
535 
536 	/*
537 	 * Use PF array of physical ports to get pointer to
538 	 * this specific port
539 	 */
540 	hw = app_fw_nic->ports[port];
541 
542 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
543 			"NFP internal port number: %d", port, hw->nfp_idx);
544 
545 	rte_eth_copy_pci_info(eth_dev, pci_dev);
546 
547 	hw->device_id = pci_dev->id.device_id;
548 	hw->vendor_id = pci_dev->id.vendor_id;
549 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
550 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
551 
552 	PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
553 		     pci_dev->id.vendor_id, pci_dev->id.device_id,
554 		     pci_dev->addr.domain, pci_dev->addr.bus,
555 		     pci_dev->addr.devid, pci_dev->addr.function);
556 
557 	hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
558 	if (hw->ctrl_bar == NULL) {
559 		PMD_DRV_LOG(ERR,
560 			"hw->ctrl_bar is NULL. BAR0 not configured");
561 		return -ENODEV;
562 	}
563 
564 	if (port == 0) {
565 		hw->ctrl_bar = pf_dev->ctrl_bar;
566 	} else {
567 		if (pf_dev->ctrl_bar == NULL)
568 			return -ENODEV;
569 		/* Use port offset in pf ctrl_bar for this ports control bar */
570 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE);
571 	}
572 
573 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
574 
575 	hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
576 
577 	if (nfp_net_ethdev_ops_mount(hw, eth_dev))
578 		return -EINVAL;
579 
580 	hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
581 	hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
582 
583 	/* Work out where in the BAR the queues start. */
584 	switch (pci_dev->id.device_id) {
585 	case PCI_DEVICE_ID_NFP3800_PF_NIC:
586 	case PCI_DEVICE_ID_NFP4000_PF_NIC:
587 	case PCI_DEVICE_ID_NFP6000_PF_NIC:
588 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
589 		tx_bar_off = nfp_pci_queue(pci_dev, start_q);
590 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
591 		rx_bar_off = nfp_pci_queue(pci_dev, start_q);
592 		break;
593 	default:
594 		PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
595 		return -ENODEV;
596 	}
597 
598 	PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
599 	PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
600 
601 	hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
602 	hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
603 	eth_dev->data->dev_private = hw;
604 
605 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
606 		     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
607 
608 	nfp_net_cfg_queue_setup(hw);
609 
610 	/* Get some of the read-only fields from the config BAR */
611 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
612 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
613 	hw->mtu = RTE_ETHER_MTU;
614 
615 	/* VLAN insertion is incompatible with LSOv2 */
616 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
617 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
618 
619 	if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
620 		hw->rx_offset = NFP_NET_RX_OFFSET;
621 	else
622 		hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
623 
624 	hw->ctrl = 0;
625 
626 	hw->stride_rx = stride;
627 	hw->stride_tx = stride;
628 
629 	nfp_net_log_device_information(hw);
630 
631 	/* Initializing spinlock for reconfigs */
632 	rte_spinlock_init(&hw->reconfig_lock);
633 
634 	/* Allocating memory for mac addr */
635 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
636 					       RTE_ETHER_ADDR_LEN, 0);
637 	if (eth_dev->data->mac_addrs == NULL) {
638 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
639 		return -ENOMEM;
640 	}
641 
642 	nfp_net_pf_read_mac(app_fw_nic, port);
643 	nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
644 
645 	tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
646 	if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
647 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
648 		/* Using random mac addresses for VFs */
649 		rte_eth_random_addr(&hw->mac_addr[0]);
650 		nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
651 	}
652 
653 	/* Copying mac address to DPDK eth_dev struct */
654 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
655 			&eth_dev->data->mac_addrs[0]);
656 
657 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
658 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
659 
660 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
661 
662 	PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
663 		     "mac=" RTE_ETHER_ADDR_PRT_FMT,
664 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
665 		     pci_dev->id.device_id,
666 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
667 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
668 
669 	/* Registering LSC interrupt handler */
670 	rte_intr_callback_register(pci_dev->intr_handle,
671 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
672 	/* Telling the firmware about the LSC interrupt entry */
673 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
674 	/* Recording current stats counters values */
675 	nfp_net_stats_reset(eth_dev);
676 
677 	return 0;
678 }
679 
680 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
681 
682 static int
683 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
684 {
685 	struct nfp_cpp *cpp = nsp->cpp;
686 	void *fw_buf;
687 	char fw_name[125];
688 	char serial[40];
689 	size_t fsize;
690 
691 	/* Looking for firmware file in order of priority */
692 
693 	/* First try to find a firmware image specific for this device */
694 	snprintf(serial, sizeof(serial),
695 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
696 		cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
697 		cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
698 		cpp->interface & 0xff);
699 
700 	snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
701 			serial);
702 
703 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
704 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
705 		goto load_fw;
706 	/* Then try the PCI name */
707 	snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
708 			dev->name);
709 
710 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
711 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
712 		goto load_fw;
713 
714 	/* Finally try the card type and media */
715 	snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
716 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
717 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
718 		PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
719 		return -ENOENT;
720 	}
721 
722 load_fw:
723 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
724 		fw_name, fsize);
725 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
726 	nfp_nsp_load_fw(nsp, fw_buf, fsize);
727 	PMD_DRV_LOG(INFO, "Done");
728 
729 	free(fw_buf);
730 
731 	return 0;
732 }
733 
734 static int
735 nfp_fw_setup(struct rte_pci_device *dev,
736 		struct nfp_cpp *cpp,
737 		struct nfp_eth_table *nfp_eth_table,
738 		struct nfp_hwinfo *hwinfo)
739 {
740 	struct nfp_nsp *nsp;
741 	const char *nfp_fw_model;
742 	char card_desc[100];
743 	int err = 0;
744 
745 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
746 	if (nfp_fw_model == NULL)
747 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
748 
749 	if (nfp_fw_model) {
750 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
751 	} else {
752 		PMD_DRV_LOG(ERR, "firmware model NOT found");
753 		return -EIO;
754 	}
755 
756 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
757 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
758 			nfp_eth_table->count);
759 		return -EIO;
760 	}
761 
762 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
763 			nfp_eth_table->count);
764 
765 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
766 
767 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
768 			nfp_fw_model, nfp_eth_table->count,
769 			nfp_eth_table->ports[0].speed / 1000);
770 
771 	nsp = nfp_nsp_open(cpp);
772 	if (nsp == NULL) {
773 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
774 		return -EIO;
775 	}
776 
777 	nfp_nsp_device_soft_reset(nsp);
778 	err = nfp_fw_upload(dev, nsp, card_desc);
779 
780 	nfp_nsp_close(nsp);
781 	return err;
782 }
783 
784 static int
785 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
786 {
787 	int i;
788 	int ret;
789 	int err = 0;
790 	int total_vnics;
791 	struct nfp_net_hw *hw;
792 	unsigned int numa_node;
793 	struct rte_eth_dev *eth_dev;
794 	struct nfp_app_fw_nic *app_fw_nic;
795 	struct nfp_eth_table *nfp_eth_table;
796 	char port_name[RTE_ETH_NAME_MAX_LEN];
797 
798 	nfp_eth_table = pf_dev->nfp_eth_table;
799 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
800 
801 	/* Allocate memory for the CoreNIC app */
802 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
803 	if (app_fw_nic == NULL)
804 		return -ENOMEM;
805 
806 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
807 	pf_dev->app_fw_priv = app_fw_nic;
808 
809 	/* Read the number of vNIC's created for the PF */
810 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err);
811 	if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
812 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
813 		ret = -ENODEV;
814 		goto app_cleanup;
815 	}
816 
817 	/*
818 	 * For coreNIC the number of vNICs exposed should be the same as the
819 	 * number of physical ports
820 	 */
821 	if (total_vnics != (int)nfp_eth_table->count) {
822 		PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
823 		ret = -ENODEV;
824 		goto app_cleanup;
825 	}
826 
827 	/* Populate coreNIC app properties*/
828 	app_fw_nic->total_phyports = total_vnics;
829 	app_fw_nic->pf_dev = pf_dev;
830 	if (total_vnics > 1)
831 		app_fw_nic->multiport = true;
832 
833 	/* Map the symbol table */
834 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
835 			app_fw_nic->total_phyports * 32768, &pf_dev->ctrl_area);
836 	if (pf_dev->ctrl_bar == NULL) {
837 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
838 		ret = -EIO;
839 		goto app_cleanup;
840 	}
841 
842 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
843 
844 	/* Loop through all physical ports on PF */
845 	numa_node = rte_socket_id();
846 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
847 		snprintf(port_name, sizeof(port_name), "%s_port%d",
848 			 pf_dev->pci_dev->device.name, i);
849 
850 		/* Allocate a eth_dev for this phyport */
851 		eth_dev = rte_eth_dev_allocate(port_name);
852 		if (eth_dev == NULL) {
853 			ret = -ENODEV;
854 			goto port_cleanup;
855 		}
856 
857 		/* Allocate memory for this phyport */
858 		eth_dev->data->dev_private =
859 			rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
860 				RTE_CACHE_LINE_SIZE, numa_node);
861 		if (eth_dev->data->dev_private == NULL) {
862 			ret = -ENOMEM;
863 			rte_eth_dev_release_port(eth_dev);
864 			goto port_cleanup;
865 		}
866 
867 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
868 
869 		/* Add this device to the PF's array of physical ports */
870 		app_fw_nic->ports[i] = hw;
871 
872 		hw->pf_dev = pf_dev;
873 		hw->cpp = pf_dev->cpp;
874 		hw->eth_dev = eth_dev;
875 		hw->idx = i;
876 		hw->nfp_idx = nfp_eth_table->ports[i].index;
877 
878 		eth_dev->device = &pf_dev->pci_dev->device;
879 
880 		/* ctrl/tx/rx BAR mappings and remaining init happens in
881 		 * nfp_net_init
882 		 */
883 		ret = nfp_net_init(eth_dev);
884 		if (ret) {
885 			ret = -ENODEV;
886 			goto port_cleanup;
887 		}
888 
889 		rte_eth_dev_probing_finish(eth_dev);
890 
891 	} /* End loop, all ports on this PF */
892 
893 	return 0;
894 
895 port_cleanup:
896 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
897 		if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) {
898 			struct rte_eth_dev *tmp_dev;
899 			tmp_dev = app_fw_nic->ports[i]->eth_dev;
900 			rte_eth_dev_release_port(tmp_dev);
901 			app_fw_nic->ports[i] = NULL;
902 		}
903 	}
904 	nfp_cpp_area_free(pf_dev->ctrl_area);
905 app_cleanup:
906 	rte_free(app_fw_nic);
907 
908 	return ret;
909 }
910 
911 static int
912 nfp_pf_init(struct rte_pci_device *pci_dev)
913 {
914 	int ret;
915 	int err = 0;
916 	uint64_t addr;
917 	struct nfp_cpp *cpp;
918 	enum nfp_app_fw_id app_fw_id;
919 	struct nfp_pf_dev *pf_dev;
920 	struct nfp_hwinfo *hwinfo;
921 	char name[RTE_ETH_NAME_MAX_LEN];
922 	struct nfp_rtsym_table *sym_tbl;
923 	struct nfp_eth_table *nfp_eth_table;
924 
925 	if (pci_dev == NULL)
926 		return -ENODEV;
927 
928 	/*
929 	 * When device bound to UIO, the device could be used, by mistake,
930 	 * by two DPDK apps, and the UIO driver does not avoid it. This
931 	 * could lead to a serious problem when configuring the NFP CPP
932 	 * interface. Here we avoid this telling to the CPP init code to
933 	 * use a lock file if UIO is being used.
934 	 */
935 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
936 		cpp = nfp_cpp_from_device_name(pci_dev, 0);
937 	else
938 		cpp = nfp_cpp_from_device_name(pci_dev, 1);
939 
940 	if (cpp == NULL) {
941 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
942 		return -EIO;
943 	}
944 
945 	hwinfo = nfp_hwinfo_read(cpp);
946 	if (hwinfo == NULL) {
947 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
948 		ret = -EIO;
949 		goto cpp_cleanup;
950 	}
951 
952 	/* Read the number of physical ports from hardware */
953 	nfp_eth_table = nfp_eth_read_ports(cpp);
954 	if (nfp_eth_table == NULL) {
955 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
956 		ret = -EIO;
957 		goto hwinfo_cleanup;
958 	}
959 
960 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
961 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
962 		ret = -EIO;
963 		goto eth_table_cleanup;
964 	}
965 
966 	/* Now the symbol table should be there */
967 	sym_tbl = nfp_rtsym_table_read(cpp);
968 	if (sym_tbl == NULL) {
969 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
970 				" symbol table");
971 		ret = -EIO;
972 		goto eth_table_cleanup;
973 	}
974 
975 	/* Read the app ID of the firmware loaded */
976 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
977 	if (err != 0) {
978 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
979 		ret = -EIO;
980 		goto sym_tbl_cleanup;
981 	}
982 
983 	/* Allocate memory for the PF "device" */
984 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
985 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
986 	if (pf_dev == NULL) {
987 		ret = -ENOMEM;
988 		goto sym_tbl_cleanup;
989 	}
990 
991 	/* Populate the newly created PF device */
992 	pf_dev->app_fw_id = app_fw_id;
993 	pf_dev->cpp = cpp;
994 	pf_dev->hwinfo = hwinfo;
995 	pf_dev->sym_tbl = sym_tbl;
996 	pf_dev->pci_dev = pci_dev;
997 	pf_dev->nfp_eth_table = nfp_eth_table;
998 
999 	/* configure access to tx/rx vNIC BARs */
1000 	switch (pci_dev->id.device_id) {
1001 	case PCI_DEVICE_ID_NFP3800_PF_NIC:
1002 		addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP3800_OFFSET,
1003 					0, NFP_PCIE_QUEUE_NFP3800_MASK);
1004 		break;
1005 	case PCI_DEVICE_ID_NFP4000_PF_NIC:
1006 	case PCI_DEVICE_ID_NFP6000_PF_NIC:
1007 		addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP6000_OFFSET,
1008 					0, NFP_PCIE_QUEUE_NFP6000_MASK);
1009 		break;
1010 	default:
1011 		PMD_INIT_LOG(ERR, "nfp_net: no device ID matching");
1012 		ret = -ENODEV;
1013 		goto pf_cleanup;
1014 	}
1015 
1016 	pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
1017 			addr, NFP_QCP_QUEUE_AREA_SZ,
1018 			&pf_dev->hwqueues_area);
1019 	if (pf_dev->hw_queues == NULL) {
1020 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
1021 		ret = -EIO;
1022 		goto pf_cleanup;
1023 	}
1024 
1025 	PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
1026 
1027 	/*
1028 	 * PF initialization has been done at this point. Call app specific
1029 	 * init code now
1030 	 */
1031 	switch (pf_dev->app_fw_id) {
1032 	case NFP_APP_FW_CORE_NIC:
1033 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1034 		ret = nfp_init_app_fw_nic(pf_dev);
1035 		if (ret != 0) {
1036 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1037 			goto hwqueues_cleanup;
1038 		}
1039 		break;
1040 	case NFP_APP_FW_FLOWER_NIC:
1041 		PMD_INIT_LOG(INFO, "Initializing Flower");
1042 		ret = nfp_init_app_fw_flower(pf_dev);
1043 		if (ret != 0) {
1044 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1045 			goto hwqueues_cleanup;
1046 		}
1047 		break;
1048 	default:
1049 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1050 		ret = -EINVAL;
1051 		goto hwqueues_cleanup;
1052 	}
1053 
1054 	/* register the CPP bridge service here for primary use */
1055 	ret = nfp_enable_cpp_service(pf_dev);
1056 	if (ret != 0)
1057 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1058 
1059 	return 0;
1060 
1061 hwqueues_cleanup:
1062 	nfp_cpp_area_free(pf_dev->hwqueues_area);
1063 pf_cleanup:
1064 	rte_free(pf_dev);
1065 sym_tbl_cleanup:
1066 	free(sym_tbl);
1067 eth_table_cleanup:
1068 	free(nfp_eth_table);
1069 hwinfo_cleanup:
1070 	free(hwinfo);
1071 cpp_cleanup:
1072 	nfp_cpp_free(cpp);
1073 
1074 	return ret;
1075 }
1076 
1077 static int
1078 nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
1079 		struct nfp_rtsym_table *sym_tbl,
1080 		struct nfp_cpp *cpp)
1081 {
1082 	int i;
1083 	int err = 0;
1084 	int ret = 0;
1085 	int total_vnics;
1086 	struct nfp_net_hw *hw;
1087 
1088 	/* Read the number of vNIC's created for the PF */
1089 	total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
1090 	if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
1091 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
1092 		return -ENODEV;
1093 	}
1094 
1095 	for (i = 0; i < total_vnics; i++) {
1096 		struct rte_eth_dev *eth_dev;
1097 		char port_name[RTE_ETH_NAME_MAX_LEN];
1098 		snprintf(port_name, sizeof(port_name), "%s_port%d",
1099 				pci_dev->device.name, i);
1100 
1101 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
1102 		eth_dev = rte_eth_dev_attach_secondary(port_name);
1103 		if (eth_dev == NULL) {
1104 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
1105 			ret = -ENODEV;
1106 			break;
1107 		}
1108 
1109 		eth_dev->process_private = cpp;
1110 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1111 		if (nfp_net_ethdev_ops_mount(hw, eth_dev))
1112 			return -EINVAL;
1113 
1114 		rte_eth_dev_probing_finish(eth_dev);
1115 	}
1116 
1117 	return ret;
1118 }
1119 
1120 /*
1121  * When attaching to the NFP4000/6000 PF on a secondary process there
1122  * is no need to initialise the PF again. Only minimal work is required
1123  * here
1124  */
1125 static int
1126 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
1127 {
1128 	int err = 0;
1129 	int ret = 0;
1130 	struct nfp_cpp *cpp;
1131 	enum nfp_app_fw_id app_fw_id;
1132 	struct nfp_rtsym_table *sym_tbl;
1133 
1134 	if (pci_dev == NULL)
1135 		return -ENODEV;
1136 
1137 	/*
1138 	 * When device bound to UIO, the device could be used, by mistake,
1139 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1140 	 * could lead to a serious problem when configuring the NFP CPP
1141 	 * interface. Here we avoid this telling to the CPP init code to
1142 	 * use a lock file if UIO is being used.
1143 	 */
1144 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1145 		cpp = nfp_cpp_from_device_name(pci_dev, 0);
1146 	else
1147 		cpp = nfp_cpp_from_device_name(pci_dev, 1);
1148 
1149 	if (cpp == NULL) {
1150 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1151 		return -EIO;
1152 	}
1153 
1154 	/*
1155 	 * We don't have access to the PF created in the primary process
1156 	 * here so we have to read the number of ports from firmware
1157 	 */
1158 	sym_tbl = nfp_rtsym_table_read(cpp);
1159 	if (sym_tbl == NULL) {
1160 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
1161 				" symbol table");
1162 		return -EIO;
1163 	}
1164 
1165 	/* Read the app ID of the firmware loaded */
1166 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
1167 	if (err != 0) {
1168 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
1169 		goto sym_tbl_cleanup;
1170 	}
1171 
1172 	switch (app_fw_id) {
1173 	case NFP_APP_FW_CORE_NIC:
1174 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1175 		ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp);
1176 		if (ret != 0) {
1177 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1178 			goto sym_tbl_cleanup;
1179 		}
1180 		break;
1181 	case NFP_APP_FW_FLOWER_NIC:
1182 		PMD_INIT_LOG(INFO, "Initializing Flower");
1183 		ret = nfp_secondary_init_app_fw_flower(cpp);
1184 		if (ret != 0) {
1185 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1186 			goto sym_tbl_cleanup;
1187 		}
1188 		break;
1189 	default:
1190 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1191 		ret = -EINVAL;
1192 		goto sym_tbl_cleanup;
1193 	}
1194 
1195 sym_tbl_cleanup:
1196 	free(sym_tbl);
1197 
1198 	return ret;
1199 }
1200 
1201 static int
1202 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1203 		struct rte_pci_device *dev)
1204 {
1205 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1206 		return nfp_pf_init(dev);
1207 	else
1208 		return nfp_pf_secondary_init(dev);
1209 }
1210 
1211 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
1212 	{
1213 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1214 			       PCI_DEVICE_ID_NFP3800_PF_NIC)
1215 	},
1216 	{
1217 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1218 			       PCI_DEVICE_ID_NFP4000_PF_NIC)
1219 	},
1220 	{
1221 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1222 			       PCI_DEVICE_ID_NFP6000_PF_NIC)
1223 	},
1224 	{
1225 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1226 			       PCI_DEVICE_ID_NFP3800_PF_NIC)
1227 	},
1228 	{
1229 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1230 			       PCI_DEVICE_ID_NFP4000_PF_NIC)
1231 	},
1232 	{
1233 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1234 			       PCI_DEVICE_ID_NFP6000_PF_NIC)
1235 	},
1236 	{
1237 		.vendor_id = 0,
1238 	},
1239 };
1240 
1241 static int
1242 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
1243 {
1244 	struct rte_pci_device *pci_dev;
1245 	uint16_t port_id;
1246 
1247 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1248 
1249 	/* Free up all physical ports under PF */
1250 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1251 		rte_eth_dev_close(port_id);
1252 	/*
1253 	 * Ports can be closed and freed but hotplugging is not
1254 	 * currently supported
1255 	 */
1256 	return -ENOTSUP;
1257 }
1258 
1259 static int
1260 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
1261 {
1262 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
1263 }
1264 
1265 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
1266 	.id_table = pci_id_nfp_pf_net_map,
1267 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1268 	.probe = nfp_pf_pci_probe,
1269 	.remove = eth_nfp_pci_remove,
1270 };
1271 
1272 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
1273 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
1274 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
1275 /*
1276  * Local variables:
1277  * c-file-style: "Linux"
1278  * indent-tabs-mode: t
1279  * End:
1280  */
1281