xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision 2bf48044dca1892e571fd4964eecaacf6cb0c1c2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
14  */
15 
16 #include <rte_common.h>
17 #include <ethdev_driver.h>
18 #include <ethdev_pci.h>
19 #include <dev_driver.h>
20 #include <rte_ether.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_mempool.h>
24 #include <rte_service_component.h>
25 #include <rte_alarm.h>
26 #include "eal_firmware.h"
27 
28 #include "nfpcore/nfp_cpp.h"
29 #include "nfpcore/nfp_nffw.h"
30 #include "nfpcore/nfp_hwinfo.h"
31 #include "nfpcore/nfp_mip.h"
32 #include "nfpcore/nfp_rtsym.h"
33 #include "nfpcore/nfp_nsp.h"
34 
35 #include "nfp_common.h"
36 #include "nfp_ctrl.h"
37 #include "nfp_rxtx.h"
38 #include "nfp_logs.h"
39 #include "nfp_cpp_bridge.h"
40 
41 #include "flower/nfp_flower.h"
42 
43 static int
44 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port)
45 {
46 	struct nfp_eth_table *nfp_eth_table;
47 	struct nfp_net_hw *hw = NULL;
48 
49 	/* Grab a pointer to the correct physical port */
50 	hw = app_fw_nic->ports[port];
51 
52 	nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp);
53 
54 	nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
55 			 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
56 
57 	free(nfp_eth_table);
58 	return 0;
59 }
60 
61 static int
62 nfp_net_start(struct rte_eth_dev *dev)
63 {
64 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
65 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
66 	uint32_t new_ctrl, update = 0;
67 	struct nfp_net_hw *hw;
68 	struct nfp_pf_dev *pf_dev;
69 	struct nfp_app_fw_nic *app_fw_nic;
70 	struct rte_eth_conf *dev_conf;
71 	struct rte_eth_rxmode *rxmode;
72 	uint32_t intr_vector;
73 	int ret;
74 
75 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
76 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
77 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
78 
79 	PMD_INIT_LOG(DEBUG, "Start");
80 
81 	/* Disabling queues just in case... */
82 	nfp_net_disable_queues(dev);
83 
84 	/* Enabling the required queues in the device */
85 	nfp_net_enable_queues(dev);
86 
87 	/* check and configure queue intr-vector mapping */
88 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
89 		if (app_fw_nic->multiport) {
90 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
91 					  "with NFP multiport PF");
92 				return -EINVAL;
93 		}
94 		if (rte_intr_type_get(intr_handle) ==
95 						RTE_INTR_HANDLE_UIO) {
96 			/*
97 			 * Better not to share LSC with RX interrupts.
98 			 * Unregistering LSC interrupt handler
99 			 */
100 			rte_intr_callback_unregister(pci_dev->intr_handle,
101 				nfp_net_dev_interrupt_handler, (void *)dev);
102 
103 			if (dev->data->nb_rx_queues > 1) {
104 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
105 					     "supports 1 queue with UIO");
106 				return -EIO;
107 			}
108 		}
109 		intr_vector = dev->data->nb_rx_queues;
110 		if (rte_intr_efd_enable(intr_handle, intr_vector))
111 			return -1;
112 
113 		nfp_configure_rx_interrupt(dev, intr_handle);
114 		update = NFP_NET_CFG_UPDATE_MSIX;
115 	}
116 
117 	/* Checking MTU set */
118 	if (dev->data->mtu > hw->flbufsz) {
119 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
120 				dev->data->mtu, hw->flbufsz);
121 		return -ERANGE;
122 	}
123 
124 	rte_intr_enable(intr_handle);
125 
126 	new_ctrl = nfp_check_offloads(dev);
127 
128 	/* Writing configuration parameters in the device */
129 	nfp_net_params_setup(hw);
130 
131 	dev_conf = &dev->data->dev_conf;
132 	rxmode = &dev_conf->rxmode;
133 
134 	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
135 		nfp_net_rss_config_default(dev);
136 		update |= NFP_NET_CFG_UPDATE_RSS;
137 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
138 	}
139 
140 	/* Enable device */
141 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
142 
143 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
144 
145 	/* Enable vxlan */
146 	if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) {
147 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
148 		update |= NFP_NET_CFG_UPDATE_VXLAN;
149 	}
150 
151 	if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
152 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
153 
154 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
155 	if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
156 		return -EIO;
157 
158 	/*
159 	 * Allocating rte mbufs for configured rx queues.
160 	 * This requires queues being enabled before
161 	 */
162 	if (nfp_net_rx_freelist_setup(dev) < 0) {
163 		ret = -ENOMEM;
164 		goto error;
165 	}
166 
167 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
168 		/* Configure the physical port up */
169 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
170 	else
171 		nfp_eth_set_configured(dev->process_private,
172 				       hw->nfp_idx, 1);
173 
174 	hw->ctrl = new_ctrl;
175 
176 	return 0;
177 
178 error:
179 	/*
180 	 * An error returned by this function should mean the app
181 	 * exiting and then the system releasing all the memory
182 	 * allocated even memory coming from hugepages.
183 	 *
184 	 * The device could be enabled at this point with some queues
185 	 * ready for getting packets. This is true if the call to
186 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
187 	 * fails for subsequent queues.
188 	 *
189 	 * This should make the app exiting but better if we tell the
190 	 * device first.
191 	 */
192 	nfp_net_disable_queues(dev);
193 
194 	return ret;
195 }
196 
197 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
198 static int
199 nfp_net_stop(struct rte_eth_dev *dev)
200 {
201 	struct nfp_net_hw *hw;
202 
203 	PMD_INIT_LOG(DEBUG, "Stop");
204 
205 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
206 
207 	nfp_net_disable_queues(dev);
208 
209 	/* Clear queues */
210 	nfp_net_stop_tx_queue(dev);
211 
212 	nfp_net_stop_rx_queue(dev);
213 
214 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
215 		/* Configure the physical port down */
216 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
217 	else
218 		nfp_eth_set_configured(dev->process_private,
219 				       hw->nfp_idx, 0);
220 
221 	return 0;
222 }
223 
224 /* Set the link up. */
225 static int
226 nfp_net_set_link_up(struct rte_eth_dev *dev)
227 {
228 	struct nfp_net_hw *hw;
229 
230 	PMD_DRV_LOG(DEBUG, "Set link up");
231 
232 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
233 
234 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
235 		/* Configure the physical port down */
236 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
237 	else
238 		return nfp_eth_set_configured(dev->process_private,
239 					      hw->nfp_idx, 1);
240 }
241 
242 /* Set the link down. */
243 static int
244 nfp_net_set_link_down(struct rte_eth_dev *dev)
245 {
246 	struct nfp_net_hw *hw;
247 
248 	PMD_DRV_LOG(DEBUG, "Set link down");
249 
250 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
251 
252 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
253 		/* Configure the physical port down */
254 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
255 	else
256 		return nfp_eth_set_configured(dev->process_private,
257 					      hw->nfp_idx, 0);
258 }
259 
260 /* Reset and stop device. The device can not be restarted. */
261 static int
262 nfp_net_close(struct rte_eth_dev *dev)
263 {
264 	struct nfp_net_hw *hw;
265 	struct rte_pci_device *pci_dev;
266 	struct nfp_pf_dev *pf_dev;
267 	struct nfp_app_fw_nic *app_fw_nic;
268 	int i;
269 
270 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
271 		return 0;
272 
273 	PMD_INIT_LOG(DEBUG, "Close");
274 
275 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
276 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
277 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
278 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
279 
280 	/*
281 	 * We assume that the DPDK application is stopping all the
282 	 * threads/queues before calling the device close function.
283 	 */
284 
285 	nfp_net_disable_queues(dev);
286 
287 	/* Clear queues */
288 	nfp_net_close_tx_queue(dev);
289 
290 	nfp_net_close_rx_queue(dev);
291 
292 	/* Cancel possible impending LSC work here before releasing the port*/
293 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
294 			     (void *)dev);
295 
296 	/* Only free PF resources after all physical ports have been closed */
297 	/* Mark this port as unused and free device priv resources*/
298 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
299 	app_fw_nic->ports[hw->idx] = NULL;
300 	rte_eth_dev_release_port(dev);
301 
302 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
303 		/* Check to see if ports are still in use */
304 		if (app_fw_nic->ports[i])
305 			return 0;
306 	}
307 
308 	/* Now it is safe to free all PF resources */
309 	PMD_INIT_LOG(INFO, "Freeing PF resources");
310 	nfp_cpp_area_free(pf_dev->ctrl_area);
311 	nfp_cpp_area_free(pf_dev->hwqueues_area);
312 	free(pf_dev->hwinfo);
313 	free(pf_dev->sym_tbl);
314 	nfp_cpp_free(pf_dev->cpp);
315 	rte_free(app_fw_nic);
316 	rte_free(pf_dev);
317 
318 	rte_intr_disable(pci_dev->intr_handle);
319 
320 	/* unregister callback func from eal lib */
321 	rte_intr_callback_unregister(pci_dev->intr_handle,
322 			nfp_net_dev_interrupt_handler, (void *)dev);
323 
324 	/*
325 	 * The ixgbe PMD disables the pcie master on the
326 	 * device. The i40e does not...
327 	 */
328 
329 	return 0;
330 }
331 
332 static int
333 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
334 		uint16_t port,
335 		uint32_t *idx)
336 {
337 	uint32_t i;
338 	int free_idx = -1;
339 
340 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
341 		if (hw->vxlan_ports[i] == port) {
342 			free_idx = i;
343 			break;
344 		}
345 
346 		if (hw->vxlan_usecnt[i] == 0) {
347 			free_idx = i;
348 			break;
349 		}
350 	}
351 
352 	if (free_idx == -1)
353 		return -EINVAL;
354 
355 	*idx = free_idx;
356 
357 	return 0;
358 }
359 
360 static int
361 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
362 		struct rte_eth_udp_tunnel *tunnel_udp)
363 {
364 	int ret;
365 	uint32_t idx;
366 	uint16_t vxlan_port;
367 	struct nfp_net_hw *hw;
368 	enum rte_eth_tunnel_type tnl_type;
369 
370 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
371 	vxlan_port = tunnel_udp->udp_port;
372 	tnl_type   = tunnel_udp->prot_type;
373 
374 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
375 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
376 		return -ENOTSUP;
377 	}
378 
379 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
380 	if (ret != 0) {
381 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
382 		return -EINVAL;
383 	}
384 
385 	if (hw->vxlan_usecnt[idx] == 0) {
386 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
387 		if (ret != 0) {
388 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
389 			return -EINVAL;
390 		}
391 	}
392 
393 	hw->vxlan_usecnt[idx]++;
394 
395 	return 0;
396 }
397 
398 static int
399 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
400 		struct rte_eth_udp_tunnel *tunnel_udp)
401 {
402 	int ret;
403 	uint32_t idx;
404 	uint16_t vxlan_port;
405 	struct nfp_net_hw *hw;
406 	enum rte_eth_tunnel_type tnl_type;
407 
408 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
409 	vxlan_port = tunnel_udp->udp_port;
410 	tnl_type   = tunnel_udp->prot_type;
411 
412 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
413 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
414 		return -ENOTSUP;
415 	}
416 
417 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
418 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
419 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
420 		return -EINVAL;
421 	}
422 
423 	hw->vxlan_usecnt[idx]--;
424 
425 	if (hw->vxlan_usecnt[idx] == 0) {
426 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
427 		if (ret != 0) {
428 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
429 			return -EINVAL;
430 		}
431 	}
432 
433 	return 0;
434 }
435 
436 /* Initialise and register driver with DPDK Application */
437 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
438 	.dev_configure		= nfp_net_configure,
439 	.dev_start		= nfp_net_start,
440 	.dev_stop		= nfp_net_stop,
441 	.dev_set_link_up	= nfp_net_set_link_up,
442 	.dev_set_link_down	= nfp_net_set_link_down,
443 	.dev_close		= nfp_net_close,
444 	.promiscuous_enable	= nfp_net_promisc_enable,
445 	.promiscuous_disable	= nfp_net_promisc_disable,
446 	.link_update		= nfp_net_link_update,
447 	.stats_get		= nfp_net_stats_get,
448 	.stats_reset		= nfp_net_stats_reset,
449 	.dev_infos_get		= nfp_net_infos_get,
450 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
451 	.mtu_set		= nfp_net_dev_mtu_set,
452 	.mac_addr_set		= nfp_net_set_mac_addr,
453 	.vlan_offload_set	= nfp_net_vlan_offload_set,
454 	.reta_update		= nfp_net_reta_update,
455 	.reta_query		= nfp_net_reta_query,
456 	.rss_hash_update	= nfp_net_rss_hash_update,
457 	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
458 	.rx_queue_setup		= nfp_net_rx_queue_setup,
459 	.rx_queue_release	= nfp_net_rx_queue_release,
460 	.tx_queue_setup		= nfp_net_tx_queue_setup,
461 	.tx_queue_release	= nfp_net_tx_queue_release,
462 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
463 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
464 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
465 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
466 };
467 
468 static inline int
469 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
470 {
471 	switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
472 	case NFP_NET_CFG_VERSION_DP_NFD3:
473 		eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
474 		break;
475 	case NFP_NET_CFG_VERSION_DP_NFDK:
476 		if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
477 			PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
478 				NFD_CFG_MAJOR_VERSION_of(hw->ver));
479 			return -EINVAL;
480 		}
481 		eth_dev->tx_pkt_burst = &nfp_net_nfdk_xmit_pkts;
482 		break;
483 	default:
484 		PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
485 		return -EINVAL;
486 	}
487 
488 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
489 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
490 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
491 
492 	return 0;
493 }
494 
495 static int
496 nfp_net_init(struct rte_eth_dev *eth_dev)
497 {
498 	struct rte_pci_device *pci_dev;
499 	struct nfp_pf_dev *pf_dev;
500 	struct nfp_app_fw_nic *app_fw_nic;
501 	struct nfp_net_hw *hw;
502 	struct rte_ether_addr *tmp_ether_addr;
503 	uint64_t rx_bar_off = 0;
504 	uint64_t tx_bar_off = 0;
505 	uint32_t start_q;
506 	int stride = 4;
507 	int port = 0;
508 
509 	PMD_INIT_FUNC_TRACE();
510 
511 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
512 
513 	/* Use backpointer here to the PF of this eth_dev */
514 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
515 
516 	/* Use backpointer to the CoreNIC app struct */
517 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
518 
519 	port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
520 	if (port < 0 || port > 7) {
521 		PMD_DRV_LOG(ERR, "Port value is wrong");
522 		return -ENODEV;
523 	}
524 
525 	/*
526 	 * Use PF array of physical ports to get pointer to
527 	 * this specific port
528 	 */
529 	hw = app_fw_nic->ports[port];
530 
531 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
532 			"NFP internal port number: %d", port, hw->nfp_idx);
533 
534 	rte_eth_copy_pci_info(eth_dev, pci_dev);
535 
536 	hw->device_id = pci_dev->id.device_id;
537 	hw->vendor_id = pci_dev->id.vendor_id;
538 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
539 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
540 
541 	PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
542 		     pci_dev->id.vendor_id, pci_dev->id.device_id,
543 		     pci_dev->addr.domain, pci_dev->addr.bus,
544 		     pci_dev->addr.devid, pci_dev->addr.function);
545 
546 	hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
547 	if (hw->ctrl_bar == NULL) {
548 		PMD_DRV_LOG(ERR,
549 			"hw->ctrl_bar is NULL. BAR0 not configured");
550 		return -ENODEV;
551 	}
552 
553 	if (port == 0) {
554 		hw->ctrl_bar = pf_dev->ctrl_bar;
555 	} else {
556 		if (pf_dev->ctrl_bar == NULL)
557 			return -ENODEV;
558 		/* Use port offset in pf ctrl_bar for this ports control bar */
559 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE);
560 	}
561 
562 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
563 
564 	hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
565 
566 	if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0)
567 		return -ENODEV;
568 
569 	if (nfp_net_ethdev_ops_mount(hw, eth_dev))
570 		return -EINVAL;
571 
572 	hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
573 	hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
574 
575 	/* Work out where in the BAR the queues start. */
576 	switch (pci_dev->id.device_id) {
577 	case PCI_DEVICE_ID_NFP3800_PF_NIC:
578 	case PCI_DEVICE_ID_NFP4000_PF_NIC:
579 	case PCI_DEVICE_ID_NFP6000_PF_NIC:
580 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
581 		tx_bar_off = nfp_pci_queue(pci_dev, start_q);
582 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
583 		rx_bar_off = nfp_pci_queue(pci_dev, start_q);
584 		break;
585 	default:
586 		PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
587 		return -ENODEV;
588 	}
589 
590 	PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
591 	PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
592 
593 	hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
594 	hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
595 	eth_dev->data->dev_private = hw;
596 
597 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
598 		     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
599 
600 	nfp_net_cfg_queue_setup(hw);
601 
602 	/* Get some of the read-only fields from the config BAR */
603 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
604 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
605 	hw->mtu = RTE_ETHER_MTU;
606 	hw->flbufsz = DEFAULT_FLBUF_SIZE;
607 
608 	/* VLAN insertion is incompatible with LSOv2 */
609 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
610 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
611 
612 	nfp_net_init_metadata_format(hw);
613 
614 	if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
615 		hw->rx_offset = NFP_NET_RX_OFFSET;
616 	else
617 		hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
618 
619 	hw->ctrl = 0;
620 
621 	hw->stride_rx = stride;
622 	hw->stride_tx = stride;
623 
624 	nfp_net_log_device_information(hw);
625 
626 	/* Initializing spinlock for reconfigs */
627 	rte_spinlock_init(&hw->reconfig_lock);
628 
629 	/* Allocating memory for mac addr */
630 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
631 					       RTE_ETHER_ADDR_LEN, 0);
632 	if (eth_dev->data->mac_addrs == NULL) {
633 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
634 		return -ENOMEM;
635 	}
636 
637 	nfp_net_pf_read_mac(app_fw_nic, port);
638 	nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
639 
640 	tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
641 	if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
642 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
643 		/* Using random mac addresses for VFs */
644 		rte_eth_random_addr(&hw->mac_addr[0]);
645 		nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
646 	}
647 
648 	/* Copying mac address to DPDK eth_dev struct */
649 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
650 			&eth_dev->data->mac_addrs[0]);
651 
652 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
653 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
654 
655 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
656 
657 	PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
658 		     "mac=" RTE_ETHER_ADDR_PRT_FMT,
659 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
660 		     pci_dev->id.device_id,
661 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
662 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
663 
664 	/* Registering LSC interrupt handler */
665 	rte_intr_callback_register(pci_dev->intr_handle,
666 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
667 	/* Telling the firmware about the LSC interrupt entry */
668 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
669 	/* Recording current stats counters values */
670 	nfp_net_stats_reset(eth_dev);
671 
672 	return 0;
673 }
674 
675 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
676 
677 static int
678 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
679 {
680 	struct nfp_cpp *cpp = nsp->cpp;
681 	void *fw_buf;
682 	char fw_name[125];
683 	char serial[40];
684 	size_t fsize;
685 
686 	/* Looking for firmware file in order of priority */
687 
688 	/* First try to find a firmware image specific for this device */
689 	snprintf(serial, sizeof(serial),
690 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
691 		cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
692 		cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
693 		cpp->interface & 0xff);
694 
695 	snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
696 			serial);
697 
698 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
699 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
700 		goto load_fw;
701 	/* Then try the PCI name */
702 	snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
703 			dev->name);
704 
705 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
706 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
707 		goto load_fw;
708 
709 	/* Finally try the card type and media */
710 	snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
711 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
712 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
713 		PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
714 		return -ENOENT;
715 	}
716 
717 load_fw:
718 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
719 		fw_name, fsize);
720 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
721 	nfp_nsp_load_fw(nsp, fw_buf, fsize);
722 	PMD_DRV_LOG(INFO, "Done");
723 
724 	free(fw_buf);
725 
726 	return 0;
727 }
728 
729 static int
730 nfp_fw_setup(struct rte_pci_device *dev,
731 		struct nfp_cpp *cpp,
732 		struct nfp_eth_table *nfp_eth_table,
733 		struct nfp_hwinfo *hwinfo)
734 {
735 	struct nfp_nsp *nsp;
736 	const char *nfp_fw_model;
737 	char card_desc[100];
738 	int err = 0;
739 
740 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
741 	if (nfp_fw_model == NULL)
742 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
743 
744 	if (nfp_fw_model) {
745 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
746 	} else {
747 		PMD_DRV_LOG(ERR, "firmware model NOT found");
748 		return -EIO;
749 	}
750 
751 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
752 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
753 			nfp_eth_table->count);
754 		return -EIO;
755 	}
756 
757 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
758 			nfp_eth_table->count);
759 
760 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
761 
762 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
763 			nfp_fw_model, nfp_eth_table->count,
764 			nfp_eth_table->ports[0].speed / 1000);
765 
766 	nsp = nfp_nsp_open(cpp);
767 	if (nsp == NULL) {
768 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
769 		return -EIO;
770 	}
771 
772 	nfp_nsp_device_soft_reset(nsp);
773 	err = nfp_fw_upload(dev, nsp, card_desc);
774 
775 	nfp_nsp_close(nsp);
776 	return err;
777 }
778 
779 static int
780 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
781 {
782 	int i;
783 	int ret;
784 	int err = 0;
785 	int total_vnics;
786 	struct nfp_net_hw *hw;
787 	unsigned int numa_node;
788 	struct rte_eth_dev *eth_dev;
789 	struct nfp_app_fw_nic *app_fw_nic;
790 	struct nfp_eth_table *nfp_eth_table;
791 	char port_name[RTE_ETH_NAME_MAX_LEN];
792 
793 	nfp_eth_table = pf_dev->nfp_eth_table;
794 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
795 
796 	/* Allocate memory for the CoreNIC app */
797 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
798 	if (app_fw_nic == NULL)
799 		return -ENOMEM;
800 
801 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
802 	pf_dev->app_fw_priv = app_fw_nic;
803 
804 	/* Read the number of vNIC's created for the PF */
805 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err);
806 	if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
807 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
808 		ret = -ENODEV;
809 		goto app_cleanup;
810 	}
811 
812 	/*
813 	 * For coreNIC the number of vNICs exposed should be the same as the
814 	 * number of physical ports
815 	 */
816 	if (total_vnics != (int)nfp_eth_table->count) {
817 		PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
818 		ret = -ENODEV;
819 		goto app_cleanup;
820 	}
821 
822 	/* Populate coreNIC app properties*/
823 	app_fw_nic->total_phyports = total_vnics;
824 	app_fw_nic->pf_dev = pf_dev;
825 	if (total_vnics > 1)
826 		app_fw_nic->multiport = true;
827 
828 	/* Map the symbol table */
829 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
830 			app_fw_nic->total_phyports * 32768, &pf_dev->ctrl_area);
831 	if (pf_dev->ctrl_bar == NULL) {
832 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
833 		ret = -EIO;
834 		goto app_cleanup;
835 	}
836 
837 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
838 
839 	/* Loop through all physical ports on PF */
840 	numa_node = rte_socket_id();
841 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
842 		snprintf(port_name, sizeof(port_name), "%s_port%d",
843 			 pf_dev->pci_dev->device.name, i);
844 
845 		/* Allocate a eth_dev for this phyport */
846 		eth_dev = rte_eth_dev_allocate(port_name);
847 		if (eth_dev == NULL) {
848 			ret = -ENODEV;
849 			goto port_cleanup;
850 		}
851 
852 		/* Allocate memory for this phyport */
853 		eth_dev->data->dev_private =
854 			rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
855 				RTE_CACHE_LINE_SIZE, numa_node);
856 		if (eth_dev->data->dev_private == NULL) {
857 			ret = -ENOMEM;
858 			rte_eth_dev_release_port(eth_dev);
859 			goto port_cleanup;
860 		}
861 
862 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
863 
864 		/* Add this device to the PF's array of physical ports */
865 		app_fw_nic->ports[i] = hw;
866 
867 		hw->pf_dev = pf_dev;
868 		hw->cpp = pf_dev->cpp;
869 		hw->eth_dev = eth_dev;
870 		hw->idx = i;
871 		hw->nfp_idx = nfp_eth_table->ports[i].index;
872 
873 		eth_dev->device = &pf_dev->pci_dev->device;
874 
875 		/* ctrl/tx/rx BAR mappings and remaining init happens in
876 		 * nfp_net_init
877 		 */
878 		ret = nfp_net_init(eth_dev);
879 		if (ret) {
880 			ret = -ENODEV;
881 			goto port_cleanup;
882 		}
883 
884 		rte_eth_dev_probing_finish(eth_dev);
885 
886 	} /* End loop, all ports on this PF */
887 
888 	return 0;
889 
890 port_cleanup:
891 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
892 		if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) {
893 			struct rte_eth_dev *tmp_dev;
894 			tmp_dev = app_fw_nic->ports[i]->eth_dev;
895 			rte_eth_dev_release_port(tmp_dev);
896 			app_fw_nic->ports[i] = NULL;
897 		}
898 	}
899 	nfp_cpp_area_free(pf_dev->ctrl_area);
900 app_cleanup:
901 	rte_free(app_fw_nic);
902 
903 	return ret;
904 }
905 
906 static int
907 nfp_pf_init(struct rte_pci_device *pci_dev)
908 {
909 	int ret;
910 	int err = 0;
911 	uint64_t addr;
912 	struct nfp_cpp *cpp;
913 	enum nfp_app_fw_id app_fw_id;
914 	struct nfp_pf_dev *pf_dev;
915 	struct nfp_hwinfo *hwinfo;
916 	char name[RTE_ETH_NAME_MAX_LEN];
917 	struct nfp_rtsym_table *sym_tbl;
918 	struct nfp_eth_table *nfp_eth_table;
919 
920 	if (pci_dev == NULL)
921 		return -ENODEV;
922 
923 	/*
924 	 * When device bound to UIO, the device could be used, by mistake,
925 	 * by two DPDK apps, and the UIO driver does not avoid it. This
926 	 * could lead to a serious problem when configuring the NFP CPP
927 	 * interface. Here we avoid this telling to the CPP init code to
928 	 * use a lock file if UIO is being used.
929 	 */
930 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
931 		cpp = nfp_cpp_from_device_name(pci_dev, 0);
932 	else
933 		cpp = nfp_cpp_from_device_name(pci_dev, 1);
934 
935 	if (cpp == NULL) {
936 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
937 		return -EIO;
938 	}
939 
940 	hwinfo = nfp_hwinfo_read(cpp);
941 	if (hwinfo == NULL) {
942 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
943 		ret = -EIO;
944 		goto cpp_cleanup;
945 	}
946 
947 	/* Read the number of physical ports from hardware */
948 	nfp_eth_table = nfp_eth_read_ports(cpp);
949 	if (nfp_eth_table == NULL) {
950 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
951 		ret = -EIO;
952 		goto hwinfo_cleanup;
953 	}
954 
955 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
956 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
957 		ret = -EIO;
958 		goto eth_table_cleanup;
959 	}
960 
961 	/* Now the symbol table should be there */
962 	sym_tbl = nfp_rtsym_table_read(cpp);
963 	if (sym_tbl == NULL) {
964 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
965 				" symbol table");
966 		ret = -EIO;
967 		goto eth_table_cleanup;
968 	}
969 
970 	/* Read the app ID of the firmware loaded */
971 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
972 	if (err != 0) {
973 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
974 		ret = -EIO;
975 		goto sym_tbl_cleanup;
976 	}
977 
978 	/* Allocate memory for the PF "device" */
979 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
980 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
981 	if (pf_dev == NULL) {
982 		ret = -ENOMEM;
983 		goto sym_tbl_cleanup;
984 	}
985 
986 	/* Populate the newly created PF device */
987 	pf_dev->app_fw_id = app_fw_id;
988 	pf_dev->cpp = cpp;
989 	pf_dev->hwinfo = hwinfo;
990 	pf_dev->sym_tbl = sym_tbl;
991 	pf_dev->pci_dev = pci_dev;
992 	pf_dev->nfp_eth_table = nfp_eth_table;
993 
994 	/* configure access to tx/rx vNIC BARs */
995 	switch (pci_dev->id.device_id) {
996 	case PCI_DEVICE_ID_NFP3800_PF_NIC:
997 		addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP3800_OFFSET,
998 					0, NFP_PCIE_QUEUE_NFP3800_MASK);
999 		break;
1000 	case PCI_DEVICE_ID_NFP4000_PF_NIC:
1001 	case PCI_DEVICE_ID_NFP6000_PF_NIC:
1002 		addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP6000_OFFSET,
1003 					0, NFP_PCIE_QUEUE_NFP6000_MASK);
1004 		break;
1005 	default:
1006 		PMD_INIT_LOG(ERR, "nfp_net: no device ID matching");
1007 		ret = -ENODEV;
1008 		goto pf_cleanup;
1009 	}
1010 
1011 	pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
1012 			addr, NFP_QCP_QUEUE_AREA_SZ,
1013 			&pf_dev->hwqueues_area);
1014 	if (pf_dev->hw_queues == NULL) {
1015 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
1016 		ret = -EIO;
1017 		goto pf_cleanup;
1018 	}
1019 
1020 	PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
1021 
1022 	/*
1023 	 * PF initialization has been done at this point. Call app specific
1024 	 * init code now
1025 	 */
1026 	switch (pf_dev->app_fw_id) {
1027 	case NFP_APP_FW_CORE_NIC:
1028 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1029 		ret = nfp_init_app_fw_nic(pf_dev);
1030 		if (ret != 0) {
1031 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1032 			goto hwqueues_cleanup;
1033 		}
1034 		break;
1035 	case NFP_APP_FW_FLOWER_NIC:
1036 		PMD_INIT_LOG(INFO, "Initializing Flower");
1037 		ret = nfp_init_app_fw_flower(pf_dev);
1038 		if (ret != 0) {
1039 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1040 			goto hwqueues_cleanup;
1041 		}
1042 		break;
1043 	default:
1044 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1045 		ret = -EINVAL;
1046 		goto hwqueues_cleanup;
1047 	}
1048 
1049 	/* register the CPP bridge service here for primary use */
1050 	ret = nfp_enable_cpp_service(pf_dev);
1051 	if (ret != 0)
1052 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1053 
1054 	return 0;
1055 
1056 hwqueues_cleanup:
1057 	nfp_cpp_area_free(pf_dev->hwqueues_area);
1058 pf_cleanup:
1059 	rte_free(pf_dev);
1060 sym_tbl_cleanup:
1061 	free(sym_tbl);
1062 eth_table_cleanup:
1063 	free(nfp_eth_table);
1064 hwinfo_cleanup:
1065 	free(hwinfo);
1066 cpp_cleanup:
1067 	nfp_cpp_free(cpp);
1068 
1069 	return ret;
1070 }
1071 
1072 static int
1073 nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
1074 		struct nfp_rtsym_table *sym_tbl,
1075 		struct nfp_cpp *cpp)
1076 {
1077 	int i;
1078 	int err = 0;
1079 	int ret = 0;
1080 	int total_vnics;
1081 	struct nfp_net_hw *hw;
1082 
1083 	/* Read the number of vNIC's created for the PF */
1084 	total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
1085 	if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
1086 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
1087 		return -ENODEV;
1088 	}
1089 
1090 	for (i = 0; i < total_vnics; i++) {
1091 		struct rte_eth_dev *eth_dev;
1092 		char port_name[RTE_ETH_NAME_MAX_LEN];
1093 		snprintf(port_name, sizeof(port_name), "%s_port%d",
1094 				pci_dev->device.name, i);
1095 
1096 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
1097 		eth_dev = rte_eth_dev_attach_secondary(port_name);
1098 		if (eth_dev == NULL) {
1099 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
1100 			ret = -ENODEV;
1101 			break;
1102 		}
1103 
1104 		eth_dev->process_private = cpp;
1105 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1106 		if (nfp_net_ethdev_ops_mount(hw, eth_dev))
1107 			return -EINVAL;
1108 
1109 		rte_eth_dev_probing_finish(eth_dev);
1110 	}
1111 
1112 	return ret;
1113 }
1114 
1115 /*
1116  * When attaching to the NFP4000/6000 PF on a secondary process there
1117  * is no need to initialise the PF again. Only minimal work is required
1118  * here
1119  */
1120 static int
1121 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
1122 {
1123 	int err = 0;
1124 	int ret = 0;
1125 	struct nfp_cpp *cpp;
1126 	enum nfp_app_fw_id app_fw_id;
1127 	struct nfp_rtsym_table *sym_tbl;
1128 
1129 	if (pci_dev == NULL)
1130 		return -ENODEV;
1131 
1132 	/*
1133 	 * When device bound to UIO, the device could be used, by mistake,
1134 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1135 	 * could lead to a serious problem when configuring the NFP CPP
1136 	 * interface. Here we avoid this telling to the CPP init code to
1137 	 * use a lock file if UIO is being used.
1138 	 */
1139 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1140 		cpp = nfp_cpp_from_device_name(pci_dev, 0);
1141 	else
1142 		cpp = nfp_cpp_from_device_name(pci_dev, 1);
1143 
1144 	if (cpp == NULL) {
1145 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1146 		return -EIO;
1147 	}
1148 
1149 	/*
1150 	 * We don't have access to the PF created in the primary process
1151 	 * here so we have to read the number of ports from firmware
1152 	 */
1153 	sym_tbl = nfp_rtsym_table_read(cpp);
1154 	if (sym_tbl == NULL) {
1155 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
1156 				" symbol table");
1157 		return -EIO;
1158 	}
1159 
1160 	/* Read the app ID of the firmware loaded */
1161 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
1162 	if (err != 0) {
1163 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
1164 		goto sym_tbl_cleanup;
1165 	}
1166 
1167 	switch (app_fw_id) {
1168 	case NFP_APP_FW_CORE_NIC:
1169 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1170 		ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp);
1171 		if (ret != 0) {
1172 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1173 			goto sym_tbl_cleanup;
1174 		}
1175 		break;
1176 	case NFP_APP_FW_FLOWER_NIC:
1177 		PMD_INIT_LOG(INFO, "Initializing Flower");
1178 		ret = nfp_secondary_init_app_fw_flower(cpp);
1179 		if (ret != 0) {
1180 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1181 			goto sym_tbl_cleanup;
1182 		}
1183 		break;
1184 	default:
1185 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1186 		ret = -EINVAL;
1187 		goto sym_tbl_cleanup;
1188 	}
1189 
1190 sym_tbl_cleanup:
1191 	free(sym_tbl);
1192 
1193 	return ret;
1194 }
1195 
1196 static int
1197 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1198 		struct rte_pci_device *dev)
1199 {
1200 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1201 		return nfp_pf_init(dev);
1202 	else
1203 		return nfp_pf_secondary_init(dev);
1204 }
1205 
1206 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
1207 	{
1208 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1209 			       PCI_DEVICE_ID_NFP3800_PF_NIC)
1210 	},
1211 	{
1212 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1213 			       PCI_DEVICE_ID_NFP4000_PF_NIC)
1214 	},
1215 	{
1216 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1217 			       PCI_DEVICE_ID_NFP6000_PF_NIC)
1218 	},
1219 	{
1220 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1221 			       PCI_DEVICE_ID_NFP3800_PF_NIC)
1222 	},
1223 	{
1224 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1225 			       PCI_DEVICE_ID_NFP4000_PF_NIC)
1226 	},
1227 	{
1228 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1229 			       PCI_DEVICE_ID_NFP6000_PF_NIC)
1230 	},
1231 	{
1232 		.vendor_id = 0,
1233 	},
1234 };
1235 
1236 static int
1237 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
1238 {
1239 	struct rte_pci_device *pci_dev;
1240 	uint16_t port_id;
1241 
1242 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1243 
1244 	/* Free up all physical ports under PF */
1245 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1246 		rte_eth_dev_close(port_id);
1247 	/*
1248 	 * Ports can be closed and freed but hotplugging is not
1249 	 * currently supported
1250 	 */
1251 	return -ENOTSUP;
1252 }
1253 
1254 static int
1255 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
1256 {
1257 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
1258 }
1259 
1260 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
1261 	.id_table = pci_id_nfp_pf_net_map,
1262 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1263 	.probe = nfp_pf_pci_probe,
1264 	.remove = eth_nfp_pci_remove,
1265 };
1266 
1267 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
1268 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
1269 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
1270 /*
1271  * Local variables:
1272  * c-file-style: "Linux"
1273  * indent-tabs-mode: t
1274  * End:
1275  */
1276