xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision a131d9ec3f4367719ca6b82bfefae8e98cea74c4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev.c
12  *
13  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
14  */
15 
16 #include <rte_common.h>
17 #include <ethdev_driver.h>
18 #include <ethdev_pci.h>
19 #include <dev_driver.h>
20 #include <rte_ether.h>
21 #include <rte_malloc.h>
22 #include <rte_memzone.h>
23 #include <rte_mempool.h>
24 #include <rte_service_component.h>
25 #include <rte_alarm.h>
26 #include "eal_firmware.h"
27 
28 #include "nfpcore/nfp_cpp.h"
29 #include "nfpcore/nfp_nffw.h"
30 #include "nfpcore/nfp_hwinfo.h"
31 #include "nfpcore/nfp_mip.h"
32 #include "nfpcore/nfp_rtsym.h"
33 #include "nfpcore/nfp_nsp.h"
34 
35 #include "nfp_common.h"
36 #include "nfp_ctrl.h"
37 #include "nfp_rxtx.h"
38 #include "nfp_logs.h"
39 #include "nfp_cpp_bridge.h"
40 
41 #include "flower/nfp_flower.h"
42 
43 static int
44 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic, int port)
45 {
46 	struct nfp_eth_table *nfp_eth_table;
47 	struct nfp_net_hw *hw = NULL;
48 
49 	/* Grab a pointer to the correct physical port */
50 	hw = app_fw_nic->ports[port];
51 
52 	nfp_eth_table = nfp_eth_read_ports(app_fw_nic->pf_dev->cpp);
53 
54 	nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
55 			 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
56 
57 	free(nfp_eth_table);
58 	return 0;
59 }
60 
61 static int
62 nfp_net_start(struct rte_eth_dev *dev)
63 {
64 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
65 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
66 	uint32_t new_ctrl, update = 0;
67 	struct nfp_net_hw *hw;
68 	struct nfp_pf_dev *pf_dev;
69 	struct nfp_app_fw_nic *app_fw_nic;
70 	struct rte_eth_conf *dev_conf;
71 	struct rte_eth_rxmode *rxmode;
72 	uint32_t intr_vector;
73 	int ret;
74 
75 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
76 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
77 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
78 
79 	PMD_INIT_LOG(DEBUG, "Start");
80 
81 	/* Disabling queues just in case... */
82 	nfp_net_disable_queues(dev);
83 
84 	/* Enabling the required queues in the device */
85 	nfp_net_enable_queues(dev);
86 
87 	/* check and configure queue intr-vector mapping */
88 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
89 		if (app_fw_nic->multiport) {
90 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
91 					  "with NFP multiport PF");
92 				return -EINVAL;
93 		}
94 		if (rte_intr_type_get(intr_handle) ==
95 						RTE_INTR_HANDLE_UIO) {
96 			/*
97 			 * Better not to share LSC with RX interrupts.
98 			 * Unregistering LSC interrupt handler
99 			 */
100 			rte_intr_callback_unregister(pci_dev->intr_handle,
101 				nfp_net_dev_interrupt_handler, (void *)dev);
102 
103 			if (dev->data->nb_rx_queues > 1) {
104 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
105 					     "supports 1 queue with UIO");
106 				return -EIO;
107 			}
108 		}
109 		intr_vector = dev->data->nb_rx_queues;
110 		if (rte_intr_efd_enable(intr_handle, intr_vector))
111 			return -1;
112 
113 		nfp_configure_rx_interrupt(dev, intr_handle);
114 		update = NFP_NET_CFG_UPDATE_MSIX;
115 	}
116 
117 	/* Checking MTU set */
118 	if (dev->data->mtu > hw->flbufsz) {
119 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
120 				dev->data->mtu, hw->flbufsz);
121 		return -ERANGE;
122 	}
123 
124 	rte_intr_enable(intr_handle);
125 
126 	new_ctrl = nfp_check_offloads(dev);
127 
128 	/* Writing configuration parameters in the device */
129 	nfp_net_params_setup(hw);
130 
131 	dev_conf = &dev->data->dev_conf;
132 	rxmode = &dev_conf->rxmode;
133 
134 	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
135 		nfp_net_rss_config_default(dev);
136 		update |= NFP_NET_CFG_UPDATE_RSS;
137 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
138 	}
139 
140 	/* Enable device */
141 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
142 
143 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
144 
145 	/* Enable vxlan */
146 	if (hw->cap & NFP_NET_CFG_CTRL_VXLAN) {
147 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
148 		update |= NFP_NET_CFG_UPDATE_VXLAN;
149 	}
150 
151 	if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
152 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
153 
154 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
155 	if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
156 		return -EIO;
157 
158 	/*
159 	 * Allocating rte mbufs for configured rx queues.
160 	 * This requires queues being enabled before
161 	 */
162 	if (nfp_net_rx_freelist_setup(dev) < 0) {
163 		ret = -ENOMEM;
164 		goto error;
165 	}
166 
167 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
168 		/* Configure the physical port up */
169 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
170 	else
171 		nfp_eth_set_configured(dev->process_private,
172 				       hw->nfp_idx, 1);
173 
174 	hw->ctrl = new_ctrl;
175 
176 	return 0;
177 
178 error:
179 	/*
180 	 * An error returned by this function should mean the app
181 	 * exiting and then the system releasing all the memory
182 	 * allocated even memory coming from hugepages.
183 	 *
184 	 * The device could be enabled at this point with some queues
185 	 * ready for getting packets. This is true if the call to
186 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
187 	 * fails for subsequent queues.
188 	 *
189 	 * This should make the app exiting but better if we tell the
190 	 * device first.
191 	 */
192 	nfp_net_disable_queues(dev);
193 
194 	return ret;
195 }
196 
197 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
198 static int
199 nfp_net_stop(struct rte_eth_dev *dev)
200 {
201 	struct nfp_net_hw *hw;
202 
203 	PMD_INIT_LOG(DEBUG, "Stop");
204 
205 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
206 
207 	nfp_net_disable_queues(dev);
208 
209 	/* Clear queues */
210 	nfp_net_stop_tx_queue(dev);
211 
212 	nfp_net_stop_rx_queue(dev);
213 
214 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
215 		/* Configure the physical port down */
216 		nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
217 	else
218 		nfp_eth_set_configured(dev->process_private,
219 				       hw->nfp_idx, 0);
220 
221 	return 0;
222 }
223 
224 /* Set the link up. */
225 static int
226 nfp_net_set_link_up(struct rte_eth_dev *dev)
227 {
228 	struct nfp_net_hw *hw;
229 
230 	PMD_DRV_LOG(DEBUG, "Set link up");
231 
232 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
233 
234 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
235 		/* Configure the physical port down */
236 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1);
237 	else
238 		return nfp_eth_set_configured(dev->process_private,
239 					      hw->nfp_idx, 1);
240 }
241 
242 /* Set the link down. */
243 static int
244 nfp_net_set_link_down(struct rte_eth_dev *dev)
245 {
246 	struct nfp_net_hw *hw;
247 
248 	PMD_DRV_LOG(DEBUG, "Set link down");
249 
250 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
251 
252 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
253 		/* Configure the physical port down */
254 		return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
255 	else
256 		return nfp_eth_set_configured(dev->process_private,
257 					      hw->nfp_idx, 0);
258 }
259 
260 /* Reset and stop device. The device can not be restarted. */
261 static int
262 nfp_net_close(struct rte_eth_dev *dev)
263 {
264 	struct nfp_net_hw *hw;
265 	struct rte_pci_device *pci_dev;
266 	struct nfp_pf_dev *pf_dev;
267 	struct nfp_app_fw_nic *app_fw_nic;
268 	int i;
269 
270 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
271 		return 0;
272 
273 	PMD_INIT_LOG(DEBUG, "Close");
274 
275 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
276 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
277 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
278 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
279 
280 	/*
281 	 * We assume that the DPDK application is stopping all the
282 	 * threads/queues before calling the device close function.
283 	 */
284 
285 	nfp_net_disable_queues(dev);
286 
287 	/* Clear queues */
288 	nfp_net_close_tx_queue(dev);
289 
290 	nfp_net_close_rx_queue(dev);
291 
292 	/* Cancel possible impending LSC work here before releasing the port*/
293 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
294 			     (void *)dev);
295 
296 	/* Only free PF resources after all physical ports have been closed */
297 	/* Mark this port as unused and free device priv resources*/
298 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
299 	app_fw_nic->ports[hw->idx] = NULL;
300 	rte_eth_dev_release_port(dev);
301 
302 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
303 		/* Check to see if ports are still in use */
304 		if (app_fw_nic->ports[i])
305 			return 0;
306 	}
307 
308 	/* Now it is safe to free all PF resources */
309 	PMD_INIT_LOG(INFO, "Freeing PF resources");
310 	nfp_cpp_area_free(pf_dev->ctrl_area);
311 	nfp_cpp_area_free(pf_dev->hwqueues_area);
312 	free(pf_dev->hwinfo);
313 	free(pf_dev->sym_tbl);
314 	nfp_cpp_free(pf_dev->cpp);
315 	rte_free(app_fw_nic);
316 	rte_free(pf_dev);
317 
318 	rte_intr_disable(pci_dev->intr_handle);
319 
320 	/* unregister callback func from eal lib */
321 	rte_intr_callback_unregister(pci_dev->intr_handle,
322 			nfp_net_dev_interrupt_handler, (void *)dev);
323 
324 	/*
325 	 * The ixgbe PMD disables the pcie master on the
326 	 * device. The i40e does not...
327 	 */
328 
329 	return 0;
330 }
331 
332 static int
333 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
334 		uint16_t port,
335 		uint32_t *idx)
336 {
337 	uint32_t i;
338 	int free_idx = -1;
339 
340 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
341 		if (hw->vxlan_ports[i] == port) {
342 			free_idx = i;
343 			break;
344 		}
345 
346 		if (hw->vxlan_usecnt[i] == 0) {
347 			free_idx = i;
348 			break;
349 		}
350 	}
351 
352 	if (free_idx == -1)
353 		return -EINVAL;
354 
355 	*idx = free_idx;
356 
357 	return 0;
358 }
359 
360 static int
361 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
362 		struct rte_eth_udp_tunnel *tunnel_udp)
363 {
364 	int ret;
365 	uint32_t idx;
366 	uint16_t vxlan_port;
367 	struct nfp_net_hw *hw;
368 	enum rte_eth_tunnel_type tnl_type;
369 
370 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
371 	vxlan_port = tunnel_udp->udp_port;
372 	tnl_type   = tunnel_udp->prot_type;
373 
374 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
375 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
376 		return -ENOTSUP;
377 	}
378 
379 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
380 	if (ret != 0) {
381 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
382 		return -EINVAL;
383 	}
384 
385 	if (hw->vxlan_usecnt[idx] == 0) {
386 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
387 		if (ret != 0) {
388 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
389 			return -EINVAL;
390 		}
391 	}
392 
393 	hw->vxlan_usecnt[idx]++;
394 
395 	return 0;
396 }
397 
398 static int
399 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
400 		struct rte_eth_udp_tunnel *tunnel_udp)
401 {
402 	int ret;
403 	uint32_t idx;
404 	uint16_t vxlan_port;
405 	struct nfp_net_hw *hw;
406 	enum rte_eth_tunnel_type tnl_type;
407 
408 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
409 	vxlan_port = tunnel_udp->udp_port;
410 	tnl_type   = tunnel_udp->prot_type;
411 
412 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
413 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
414 		return -ENOTSUP;
415 	}
416 
417 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
418 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
419 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
420 		return -EINVAL;
421 	}
422 
423 	hw->vxlan_usecnt[idx]--;
424 
425 	if (hw->vxlan_usecnt[idx] == 0) {
426 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
427 		if (ret != 0) {
428 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
429 			return -EINVAL;
430 		}
431 	}
432 
433 	return 0;
434 }
435 
436 /* Initialise and register driver with DPDK Application */
437 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
438 	.dev_configure		= nfp_net_configure,
439 	.dev_start		= nfp_net_start,
440 	.dev_stop		= nfp_net_stop,
441 	.dev_set_link_up	= nfp_net_set_link_up,
442 	.dev_set_link_down	= nfp_net_set_link_down,
443 	.dev_close		= nfp_net_close,
444 	.promiscuous_enable	= nfp_net_promisc_enable,
445 	.promiscuous_disable	= nfp_net_promisc_disable,
446 	.link_update		= nfp_net_link_update,
447 	.stats_get		= nfp_net_stats_get,
448 	.stats_reset		= nfp_net_stats_reset,
449 	.dev_infos_get		= nfp_net_infos_get,
450 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
451 	.mtu_set		= nfp_net_dev_mtu_set,
452 	.mac_addr_set		= nfp_net_set_mac_addr,
453 	.vlan_offload_set	= nfp_net_vlan_offload_set,
454 	.reta_update		= nfp_net_reta_update,
455 	.reta_query		= nfp_net_reta_query,
456 	.rss_hash_update	= nfp_net_rss_hash_update,
457 	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
458 	.rx_queue_setup		= nfp_net_rx_queue_setup,
459 	.rx_queue_release	= nfp_net_rx_queue_release,
460 	.tx_queue_setup		= nfp_net_tx_queue_setup,
461 	.tx_queue_release	= nfp_net_tx_queue_release,
462 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
463 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
464 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
465 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
466 };
467 
468 static inline int
469 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
470 {
471 	switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
472 	case NFP_NET_CFG_VERSION_DP_NFD3:
473 		eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
474 		break;
475 	case NFP_NET_CFG_VERSION_DP_NFDK:
476 		if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
477 			PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
478 				NFD_CFG_MAJOR_VERSION_of(hw->ver));
479 			return -EINVAL;
480 		}
481 		eth_dev->tx_pkt_burst = &nfp_net_nfdk_xmit_pkts;
482 		break;
483 	default:
484 		PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
485 		return -EINVAL;
486 	}
487 
488 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
489 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
490 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
491 
492 	return 0;
493 }
494 
495 static int
496 nfp_net_init(struct rte_eth_dev *eth_dev)
497 {
498 	struct rte_pci_device *pci_dev;
499 	struct nfp_pf_dev *pf_dev;
500 	struct nfp_app_fw_nic *app_fw_nic;
501 	struct nfp_net_hw *hw;
502 	struct rte_ether_addr *tmp_ether_addr;
503 	uint64_t rx_bar_off = 0;
504 	uint64_t tx_bar_off = 0;
505 	uint32_t start_q;
506 	int stride = 4;
507 	int port = 0;
508 
509 	PMD_INIT_FUNC_TRACE();
510 
511 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
512 
513 	/* Use backpointer here to the PF of this eth_dev */
514 	pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(eth_dev->data->dev_private);
515 
516 	/* Use backpointer to the CoreNIC app struct */
517 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
518 
519 	port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
520 	if (port < 0 || port > 7) {
521 		PMD_DRV_LOG(ERR, "Port value is wrong");
522 		return -ENODEV;
523 	}
524 
525 	/*
526 	 * Use PF array of physical ports to get pointer to
527 	 * this specific port
528 	 */
529 	hw = app_fw_nic->ports[port];
530 
531 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %d, "
532 			"NFP internal port number: %d", port, hw->nfp_idx);
533 
534 	rte_eth_copy_pci_info(eth_dev, pci_dev);
535 
536 	hw->device_id = pci_dev->id.device_id;
537 	hw->vendor_id = pci_dev->id.vendor_id;
538 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
539 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
540 
541 	PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
542 		     pci_dev->id.vendor_id, pci_dev->id.device_id,
543 		     pci_dev->addr.domain, pci_dev->addr.bus,
544 		     pci_dev->addr.devid, pci_dev->addr.function);
545 
546 	hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
547 	if (hw->ctrl_bar == NULL) {
548 		PMD_DRV_LOG(ERR,
549 			"hw->ctrl_bar is NULL. BAR0 not configured");
550 		return -ENODEV;
551 	}
552 
553 	if (port == 0) {
554 		hw->ctrl_bar = pf_dev->ctrl_bar;
555 	} else {
556 		if (pf_dev->ctrl_bar == NULL)
557 			return -ENODEV;
558 		/* Use port offset in pf ctrl_bar for this ports control bar */
559 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_PF_CSR_SLICE_SIZE);
560 	}
561 
562 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
563 
564 	hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
565 
566 	if (nfp_net_check_dma_mask(hw, pci_dev->name) != 0)
567 		return -ENODEV;
568 
569 	if (nfp_net_ethdev_ops_mount(hw, eth_dev))
570 		return -EINVAL;
571 
572 	hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
573 	hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
574 
575 	/* Work out where in the BAR the queues start. */
576 	switch (pci_dev->id.device_id) {
577 	case PCI_DEVICE_ID_NFP3800_PF_NIC:
578 	case PCI_DEVICE_ID_NFP4000_PF_NIC:
579 	case PCI_DEVICE_ID_NFP6000_PF_NIC:
580 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
581 		tx_bar_off = nfp_pci_queue(pci_dev, start_q);
582 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
583 		rx_bar_off = nfp_pci_queue(pci_dev, start_q);
584 		break;
585 	default:
586 		PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
587 		return -ENODEV;
588 	}
589 
590 	PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
591 	PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
592 
593 	hw->tx_bar = pf_dev->hw_queues + tx_bar_off;
594 	hw->rx_bar = pf_dev->hw_queues + rx_bar_off;
595 	eth_dev->data->dev_private = hw;
596 
597 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
598 		     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
599 
600 	nfp_net_cfg_queue_setup(hw);
601 
602 	/* Get some of the read-only fields from the config BAR */
603 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
604 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
605 	hw->mtu = RTE_ETHER_MTU;
606 
607 	/* VLAN insertion is incompatible with LSOv2 */
608 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
609 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
610 
611 	nfp_net_init_metadata_format(hw);
612 
613 	if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
614 		hw->rx_offset = NFP_NET_RX_OFFSET;
615 	else
616 		hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
617 
618 	hw->ctrl = 0;
619 
620 	hw->stride_rx = stride;
621 	hw->stride_tx = stride;
622 
623 	nfp_net_log_device_information(hw);
624 
625 	/* Initializing spinlock for reconfigs */
626 	rte_spinlock_init(&hw->reconfig_lock);
627 
628 	/* Allocating memory for mac addr */
629 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
630 					       RTE_ETHER_ADDR_LEN, 0);
631 	if (eth_dev->data->mac_addrs == NULL) {
632 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
633 		return -ENOMEM;
634 	}
635 
636 	nfp_net_pf_read_mac(app_fw_nic, port);
637 	nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
638 
639 	tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
640 	if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
641 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
642 		/* Using random mac addresses for VFs */
643 		rte_eth_random_addr(&hw->mac_addr[0]);
644 		nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
645 	}
646 
647 	/* Copying mac address to DPDK eth_dev struct */
648 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
649 			&eth_dev->data->mac_addrs[0]);
650 
651 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
652 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
653 
654 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
655 
656 	PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
657 		     "mac=" RTE_ETHER_ADDR_PRT_FMT,
658 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
659 		     pci_dev->id.device_id,
660 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
661 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
662 
663 	/* Registering LSC interrupt handler */
664 	rte_intr_callback_register(pci_dev->intr_handle,
665 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
666 	/* Telling the firmware about the LSC interrupt entry */
667 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
668 	/* Recording current stats counters values */
669 	nfp_net_stats_reset(eth_dev);
670 
671 	return 0;
672 }
673 
674 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
675 
676 static int
677 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
678 {
679 	struct nfp_cpp *cpp = nsp->cpp;
680 	void *fw_buf;
681 	char fw_name[125];
682 	char serial[40];
683 	size_t fsize;
684 
685 	/* Looking for firmware file in order of priority */
686 
687 	/* First try to find a firmware image specific for this device */
688 	snprintf(serial, sizeof(serial),
689 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
690 		cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
691 		cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
692 		cpp->interface & 0xff);
693 
694 	snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
695 			serial);
696 
697 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
698 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
699 		goto load_fw;
700 	/* Then try the PCI name */
701 	snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
702 			dev->name);
703 
704 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
705 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0)
706 		goto load_fw;
707 
708 	/* Finally try the card type and media */
709 	snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
710 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
711 	if (rte_firmware_read(fw_name, &fw_buf, &fsize) < 0) {
712 		PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
713 		return -ENOENT;
714 	}
715 
716 load_fw:
717 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
718 		fw_name, fsize);
719 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
720 	nfp_nsp_load_fw(nsp, fw_buf, fsize);
721 	PMD_DRV_LOG(INFO, "Done");
722 
723 	free(fw_buf);
724 
725 	return 0;
726 }
727 
728 static int
729 nfp_fw_setup(struct rte_pci_device *dev,
730 		struct nfp_cpp *cpp,
731 		struct nfp_eth_table *nfp_eth_table,
732 		struct nfp_hwinfo *hwinfo)
733 {
734 	struct nfp_nsp *nsp;
735 	const char *nfp_fw_model;
736 	char card_desc[100];
737 	int err = 0;
738 
739 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
740 	if (nfp_fw_model == NULL)
741 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
742 
743 	if (nfp_fw_model) {
744 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
745 	} else {
746 		PMD_DRV_LOG(ERR, "firmware model NOT found");
747 		return -EIO;
748 	}
749 
750 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
751 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
752 			nfp_eth_table->count);
753 		return -EIO;
754 	}
755 
756 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
757 			nfp_eth_table->count);
758 
759 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
760 
761 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
762 			nfp_fw_model, nfp_eth_table->count,
763 			nfp_eth_table->ports[0].speed / 1000);
764 
765 	nsp = nfp_nsp_open(cpp);
766 	if (nsp == NULL) {
767 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
768 		return -EIO;
769 	}
770 
771 	nfp_nsp_device_soft_reset(nsp);
772 	err = nfp_fw_upload(dev, nsp, card_desc);
773 
774 	nfp_nsp_close(nsp);
775 	return err;
776 }
777 
778 static int
779 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
780 {
781 	int i;
782 	int ret;
783 	int err = 0;
784 	int total_vnics;
785 	struct nfp_net_hw *hw;
786 	unsigned int numa_node;
787 	struct rte_eth_dev *eth_dev;
788 	struct nfp_app_fw_nic *app_fw_nic;
789 	struct nfp_eth_table *nfp_eth_table;
790 	char port_name[RTE_ETH_NAME_MAX_LEN];
791 
792 	nfp_eth_table = pf_dev->nfp_eth_table;
793 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
794 
795 	/* Allocate memory for the CoreNIC app */
796 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
797 	if (app_fw_nic == NULL)
798 		return -ENOMEM;
799 
800 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
801 	pf_dev->app_fw_priv = app_fw_nic;
802 
803 	/* Read the number of vNIC's created for the PF */
804 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_cfg_pf0_num_ports", &err);
805 	if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
806 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
807 		ret = -ENODEV;
808 		goto app_cleanup;
809 	}
810 
811 	/*
812 	 * For coreNIC the number of vNICs exposed should be the same as the
813 	 * number of physical ports
814 	 */
815 	if (total_vnics != (int)nfp_eth_table->count) {
816 		PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
817 		ret = -ENODEV;
818 		goto app_cleanup;
819 	}
820 
821 	/* Populate coreNIC app properties*/
822 	app_fw_nic->total_phyports = total_vnics;
823 	app_fw_nic->pf_dev = pf_dev;
824 	if (total_vnics > 1)
825 		app_fw_nic->multiport = true;
826 
827 	/* Map the symbol table */
828 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, "_pf0_net_bar0",
829 			app_fw_nic->total_phyports * 32768, &pf_dev->ctrl_area);
830 	if (pf_dev->ctrl_bar == NULL) {
831 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _pf0_net_ctrl_bar");
832 		ret = -EIO;
833 		goto app_cleanup;
834 	}
835 
836 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
837 
838 	/* Loop through all physical ports on PF */
839 	numa_node = rte_socket_id();
840 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
841 		snprintf(port_name, sizeof(port_name), "%s_port%d",
842 			 pf_dev->pci_dev->device.name, i);
843 
844 		/* Allocate a eth_dev for this phyport */
845 		eth_dev = rte_eth_dev_allocate(port_name);
846 		if (eth_dev == NULL) {
847 			ret = -ENODEV;
848 			goto port_cleanup;
849 		}
850 
851 		/* Allocate memory for this phyport */
852 		eth_dev->data->dev_private =
853 			rte_zmalloc_socket(port_name, sizeof(struct nfp_net_hw),
854 				RTE_CACHE_LINE_SIZE, numa_node);
855 		if (eth_dev->data->dev_private == NULL) {
856 			ret = -ENOMEM;
857 			rte_eth_dev_release_port(eth_dev);
858 			goto port_cleanup;
859 		}
860 
861 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
862 
863 		/* Add this device to the PF's array of physical ports */
864 		app_fw_nic->ports[i] = hw;
865 
866 		hw->pf_dev = pf_dev;
867 		hw->cpp = pf_dev->cpp;
868 		hw->eth_dev = eth_dev;
869 		hw->idx = i;
870 		hw->nfp_idx = nfp_eth_table->ports[i].index;
871 
872 		eth_dev->device = &pf_dev->pci_dev->device;
873 
874 		/* ctrl/tx/rx BAR mappings and remaining init happens in
875 		 * nfp_net_init
876 		 */
877 		ret = nfp_net_init(eth_dev);
878 		if (ret) {
879 			ret = -ENODEV;
880 			goto port_cleanup;
881 		}
882 
883 		rte_eth_dev_probing_finish(eth_dev);
884 
885 	} /* End loop, all ports on this PF */
886 
887 	return 0;
888 
889 port_cleanup:
890 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
891 		if (app_fw_nic->ports[i] && app_fw_nic->ports[i]->eth_dev) {
892 			struct rte_eth_dev *tmp_dev;
893 			tmp_dev = app_fw_nic->ports[i]->eth_dev;
894 			rte_eth_dev_release_port(tmp_dev);
895 			app_fw_nic->ports[i] = NULL;
896 		}
897 	}
898 	nfp_cpp_area_free(pf_dev->ctrl_area);
899 app_cleanup:
900 	rte_free(app_fw_nic);
901 
902 	return ret;
903 }
904 
905 static int
906 nfp_pf_init(struct rte_pci_device *pci_dev)
907 {
908 	int ret;
909 	int err = 0;
910 	uint64_t addr;
911 	struct nfp_cpp *cpp;
912 	enum nfp_app_fw_id app_fw_id;
913 	struct nfp_pf_dev *pf_dev;
914 	struct nfp_hwinfo *hwinfo;
915 	char name[RTE_ETH_NAME_MAX_LEN];
916 	struct nfp_rtsym_table *sym_tbl;
917 	struct nfp_eth_table *nfp_eth_table;
918 
919 	if (pci_dev == NULL)
920 		return -ENODEV;
921 
922 	/*
923 	 * When device bound to UIO, the device could be used, by mistake,
924 	 * by two DPDK apps, and the UIO driver does not avoid it. This
925 	 * could lead to a serious problem when configuring the NFP CPP
926 	 * interface. Here we avoid this telling to the CPP init code to
927 	 * use a lock file if UIO is being used.
928 	 */
929 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
930 		cpp = nfp_cpp_from_device_name(pci_dev, 0);
931 	else
932 		cpp = nfp_cpp_from_device_name(pci_dev, 1);
933 
934 	if (cpp == NULL) {
935 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
936 		return -EIO;
937 	}
938 
939 	hwinfo = nfp_hwinfo_read(cpp);
940 	if (hwinfo == NULL) {
941 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
942 		ret = -EIO;
943 		goto cpp_cleanup;
944 	}
945 
946 	/* Read the number of physical ports from hardware */
947 	nfp_eth_table = nfp_eth_read_ports(cpp);
948 	if (nfp_eth_table == NULL) {
949 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
950 		ret = -EIO;
951 		goto hwinfo_cleanup;
952 	}
953 
954 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) {
955 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
956 		ret = -EIO;
957 		goto eth_table_cleanup;
958 	}
959 
960 	/* Now the symbol table should be there */
961 	sym_tbl = nfp_rtsym_table_read(cpp);
962 	if (sym_tbl == NULL) {
963 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
964 				" symbol table");
965 		ret = -EIO;
966 		goto eth_table_cleanup;
967 	}
968 
969 	/* Read the app ID of the firmware loaded */
970 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
971 	if (err != 0) {
972 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
973 		ret = -EIO;
974 		goto sym_tbl_cleanup;
975 	}
976 
977 	/* Allocate memory for the PF "device" */
978 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
979 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
980 	if (pf_dev == NULL) {
981 		ret = -ENOMEM;
982 		goto sym_tbl_cleanup;
983 	}
984 
985 	/* Populate the newly created PF device */
986 	pf_dev->app_fw_id = app_fw_id;
987 	pf_dev->cpp = cpp;
988 	pf_dev->hwinfo = hwinfo;
989 	pf_dev->sym_tbl = sym_tbl;
990 	pf_dev->pci_dev = pci_dev;
991 	pf_dev->nfp_eth_table = nfp_eth_table;
992 
993 	/* configure access to tx/rx vNIC BARs */
994 	switch (pci_dev->id.device_id) {
995 	case PCI_DEVICE_ID_NFP3800_PF_NIC:
996 		addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP3800_OFFSET,
997 					0, NFP_PCIE_QUEUE_NFP3800_MASK);
998 		break;
999 	case PCI_DEVICE_ID_NFP4000_PF_NIC:
1000 	case PCI_DEVICE_ID_NFP6000_PF_NIC:
1001 		addr = NFP_PCIE_QUEUE(NFP_PCIE_QCP_NFP6000_OFFSET,
1002 					0, NFP_PCIE_QUEUE_NFP6000_MASK);
1003 		break;
1004 	default:
1005 		PMD_INIT_LOG(ERR, "nfp_net: no device ID matching");
1006 		ret = -ENODEV;
1007 		goto pf_cleanup;
1008 	}
1009 
1010 	pf_dev->hw_queues = nfp_cpp_map_area(pf_dev->cpp, 0, 0,
1011 			addr, NFP_QCP_QUEUE_AREA_SZ,
1012 			&pf_dev->hwqueues_area);
1013 	if (pf_dev->hw_queues == NULL) {
1014 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
1015 		ret = -EIO;
1016 		goto pf_cleanup;
1017 	}
1018 
1019 	PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p", pf_dev->hw_queues);
1020 
1021 	/*
1022 	 * PF initialization has been done at this point. Call app specific
1023 	 * init code now
1024 	 */
1025 	switch (pf_dev->app_fw_id) {
1026 	case NFP_APP_FW_CORE_NIC:
1027 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1028 		ret = nfp_init_app_fw_nic(pf_dev);
1029 		if (ret != 0) {
1030 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1031 			goto hwqueues_cleanup;
1032 		}
1033 		break;
1034 	case NFP_APP_FW_FLOWER_NIC:
1035 		PMD_INIT_LOG(INFO, "Initializing Flower");
1036 		ret = nfp_init_app_fw_flower(pf_dev);
1037 		if (ret != 0) {
1038 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1039 			goto hwqueues_cleanup;
1040 		}
1041 		break;
1042 	default:
1043 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1044 		ret = -EINVAL;
1045 		goto hwqueues_cleanup;
1046 	}
1047 
1048 	/* register the CPP bridge service here for primary use */
1049 	ret = nfp_enable_cpp_service(pf_dev);
1050 	if (ret != 0)
1051 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1052 
1053 	return 0;
1054 
1055 hwqueues_cleanup:
1056 	nfp_cpp_area_free(pf_dev->hwqueues_area);
1057 pf_cleanup:
1058 	rte_free(pf_dev);
1059 sym_tbl_cleanup:
1060 	free(sym_tbl);
1061 eth_table_cleanup:
1062 	free(nfp_eth_table);
1063 hwinfo_cleanup:
1064 	free(hwinfo);
1065 cpp_cleanup:
1066 	nfp_cpp_free(cpp);
1067 
1068 	return ret;
1069 }
1070 
1071 static int
1072 nfp_secondary_init_app_fw_nic(struct rte_pci_device *pci_dev,
1073 		struct nfp_rtsym_table *sym_tbl,
1074 		struct nfp_cpp *cpp)
1075 {
1076 	int i;
1077 	int err = 0;
1078 	int ret = 0;
1079 	int total_vnics;
1080 	struct nfp_net_hw *hw;
1081 
1082 	/* Read the number of vNIC's created for the PF */
1083 	total_vnics = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
1084 	if (err != 0 || total_vnics <= 0 || total_vnics > 8) {
1085 		PMD_INIT_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
1086 		return -ENODEV;
1087 	}
1088 
1089 	for (i = 0; i < total_vnics; i++) {
1090 		struct rte_eth_dev *eth_dev;
1091 		char port_name[RTE_ETH_NAME_MAX_LEN];
1092 		snprintf(port_name, sizeof(port_name), "%s_port%d",
1093 				pci_dev->device.name, i);
1094 
1095 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
1096 		eth_dev = rte_eth_dev_attach_secondary(port_name);
1097 		if (eth_dev == NULL) {
1098 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
1099 			ret = -ENODEV;
1100 			break;
1101 		}
1102 
1103 		eth_dev->process_private = cpp;
1104 		hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1105 		if (nfp_net_ethdev_ops_mount(hw, eth_dev))
1106 			return -EINVAL;
1107 
1108 		rte_eth_dev_probing_finish(eth_dev);
1109 	}
1110 
1111 	return ret;
1112 }
1113 
1114 /*
1115  * When attaching to the NFP4000/6000 PF on a secondary process there
1116  * is no need to initialise the PF again. Only minimal work is required
1117  * here
1118  */
1119 static int
1120 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
1121 {
1122 	int err = 0;
1123 	int ret = 0;
1124 	struct nfp_cpp *cpp;
1125 	enum nfp_app_fw_id app_fw_id;
1126 	struct nfp_rtsym_table *sym_tbl;
1127 
1128 	if (pci_dev == NULL)
1129 		return -ENODEV;
1130 
1131 	/*
1132 	 * When device bound to UIO, the device could be used, by mistake,
1133 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1134 	 * could lead to a serious problem when configuring the NFP CPP
1135 	 * interface. Here we avoid this telling to the CPP init code to
1136 	 * use a lock file if UIO is being used.
1137 	 */
1138 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1139 		cpp = nfp_cpp_from_device_name(pci_dev, 0);
1140 	else
1141 		cpp = nfp_cpp_from_device_name(pci_dev, 1);
1142 
1143 	if (cpp == NULL) {
1144 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1145 		return -EIO;
1146 	}
1147 
1148 	/*
1149 	 * We don't have access to the PF created in the primary process
1150 	 * here so we have to read the number of ports from firmware
1151 	 */
1152 	sym_tbl = nfp_rtsym_table_read(cpp);
1153 	if (sym_tbl == NULL) {
1154 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware"
1155 				" symbol table");
1156 		return -EIO;
1157 	}
1158 
1159 	/* Read the app ID of the firmware loaded */
1160 	app_fw_id = nfp_rtsym_read_le(sym_tbl, "_pf0_net_app_id", &err);
1161 	if (err != 0) {
1162 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_id from fw");
1163 		goto sym_tbl_cleanup;
1164 	}
1165 
1166 	switch (app_fw_id) {
1167 	case NFP_APP_FW_CORE_NIC:
1168 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1169 		ret = nfp_secondary_init_app_fw_nic(pci_dev, sym_tbl, cpp);
1170 		if (ret != 0) {
1171 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1172 			goto sym_tbl_cleanup;
1173 		}
1174 		break;
1175 	case NFP_APP_FW_FLOWER_NIC:
1176 		PMD_INIT_LOG(INFO, "Initializing Flower");
1177 		ret = nfp_secondary_init_app_fw_flower(cpp);
1178 		if (ret != 0) {
1179 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1180 			goto sym_tbl_cleanup;
1181 		}
1182 		break;
1183 	default:
1184 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1185 		ret = -EINVAL;
1186 		goto sym_tbl_cleanup;
1187 	}
1188 
1189 sym_tbl_cleanup:
1190 	free(sym_tbl);
1191 
1192 	return ret;
1193 }
1194 
1195 static int
1196 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1197 		struct rte_pci_device *dev)
1198 {
1199 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1200 		return nfp_pf_init(dev);
1201 	else
1202 		return nfp_pf_secondary_init(dev);
1203 }
1204 
1205 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
1206 	{
1207 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1208 			       PCI_DEVICE_ID_NFP3800_PF_NIC)
1209 	},
1210 	{
1211 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1212 			       PCI_DEVICE_ID_NFP4000_PF_NIC)
1213 	},
1214 	{
1215 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
1216 			       PCI_DEVICE_ID_NFP6000_PF_NIC)
1217 	},
1218 	{
1219 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1220 			       PCI_DEVICE_ID_NFP3800_PF_NIC)
1221 	},
1222 	{
1223 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1224 			       PCI_DEVICE_ID_NFP4000_PF_NIC)
1225 	},
1226 	{
1227 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
1228 			       PCI_DEVICE_ID_NFP6000_PF_NIC)
1229 	},
1230 	{
1231 		.vendor_id = 0,
1232 	},
1233 };
1234 
1235 static int
1236 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
1237 {
1238 	struct rte_pci_device *pci_dev;
1239 	uint16_t port_id;
1240 
1241 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1242 
1243 	/* Free up all physical ports under PF */
1244 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
1245 		rte_eth_dev_close(port_id);
1246 	/*
1247 	 * Ports can be closed and freed but hotplugging is not
1248 	 * currently supported
1249 	 */
1250 	return -ENOTSUP;
1251 }
1252 
1253 static int
1254 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
1255 {
1256 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
1257 }
1258 
1259 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
1260 	.id_table = pci_id_nfp_pf_net_map,
1261 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1262 	.probe = nfp_pf_pci_probe,
1263 	.remove = eth_nfp_pci_remove,
1264 };
1265 
1266 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
1267 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
1268 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
1269 /*
1270  * Local variables:
1271  * c-file-style: "Linux"
1272  * indent-tabs-mode: t
1273  * End:
1274  */
1275