xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision bd4a5aa413583aa698f10849c4784a3d524566bc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <unistd.h>
9 
10 #include <eal_firmware.h>
11 #include <rte_alarm.h>
12 #include <rte_kvargs.h>
13 
14 #include "flower/nfp_flower.h"
15 #include "nfd3/nfp_nfd3.h"
16 #include "nfdk/nfp_nfdk.h"
17 #include "nfpcore/nfp_cpp.h"
18 #include "nfpcore/nfp_elf.h"
19 #include "nfpcore/nfp_hwinfo.h"
20 #include "nfpcore/nfp_rtsym.h"
21 #include "nfpcore/nfp_nsp.h"
22 #include "nfpcore/nfp6000_pcie.h"
23 #include "nfpcore/nfp_resource.h"
24 #include "nfpcore/nfp_sync.h"
25 
26 #include "nfp_cpp_bridge.h"
27 #include "nfp_ipsec.h"
28 #include "nfp_logs.h"
29 #include "nfp_net_flow.h"
30 
31 /* 64-bit per app capabilities */
32 #define NFP_NET_APP_CAP_SP_INDIFF       RTE_BIT64(0) /* Indifferent to port speed */
33 
34 #define NFP_PF_DRIVER_NAME net_nfp_pf
35 #define NFP_PF_FORCE_RELOAD_FW   "force_reload_fw"
36 
37 static int
38 nfp_devarg_handle_int(const char *key,
39 		const char *value,
40 		void *extra_args)
41 {
42 	char *end_ptr;
43 	uint64_t *num = extra_args;
44 
45 	if (value == NULL)
46 		return -EPERM;
47 
48 	*num = strtoul(value, &end_ptr, 10);
49 	if (*num == ULONG_MAX) {
50 		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param", key, value);
51 		return -ERANGE;
52 	} else if (value == end_ptr) {
53 		return -EPERM;
54 	}
55 
56 	return 0;
57 }
58 
59 static void
60 nfp_devarg_parse_force_reload_fw(struct rte_kvargs *kvlist,
61 		bool *force_reload_fw)
62 {
63 	int ret;
64 	uint64_t value;
65 
66 
67 	if (rte_kvargs_count(kvlist, NFP_PF_FORCE_RELOAD_FW) != 1)
68 		return;
69 
70 	ret = rte_kvargs_process(kvlist, NFP_PF_FORCE_RELOAD_FW, &nfp_devarg_handle_int, &value);
71 	if (ret != 0)
72 		return;
73 
74 	if (value == 1)
75 		*force_reload_fw = true;
76 	else if (value == 0)
77 		*force_reload_fw = false;
78 	else
79 		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1",
80 				NFP_PF_FORCE_RELOAD_FW);
81 }
82 
83 static void
84 nfp_devargs_parse(struct nfp_devargs *nfp_devargs_param,
85 		const struct rte_devargs *devargs)
86 {
87 	struct rte_kvargs *kvlist;
88 
89 	if (devargs == NULL)
90 		return;
91 
92 	kvlist = rte_kvargs_parse(devargs->args, NULL);
93 	if (kvlist == NULL)
94 		return;
95 
96 	nfp_devarg_parse_force_reload_fw(kvlist, &nfp_devargs_param->force_reload_fw);
97 
98 	rte_kvargs_free(kvlist);
99 }
100 
101 static void
102 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
103 		uint16_t port)
104 {
105 	struct nfp_net_hw *hw;
106 	struct nfp_eth_table *nfp_eth_table;
107 
108 	/* Grab a pointer to the correct physical port */
109 	hw = app_fw_nic->ports[port];
110 
111 	nfp_eth_table = app_fw_nic->pf_dev->nfp_eth_table;
112 
113 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
114 }
115 
116 static uint32_t
117 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap)
118 {
119 	switch (speeds_bitmap) {
120 	case RTE_ETH_LINK_SPEED_10M_HD:
121 		return RTE_ETH_SPEED_NUM_10M;
122 	case RTE_ETH_LINK_SPEED_10M:
123 		return RTE_ETH_SPEED_NUM_10M;
124 	case RTE_ETH_LINK_SPEED_100M_HD:
125 		return RTE_ETH_SPEED_NUM_100M;
126 	case RTE_ETH_LINK_SPEED_100M:
127 		return RTE_ETH_SPEED_NUM_100M;
128 	case RTE_ETH_LINK_SPEED_1G:
129 		return RTE_ETH_SPEED_NUM_1G;
130 	case RTE_ETH_LINK_SPEED_2_5G:
131 		return RTE_ETH_SPEED_NUM_2_5G;
132 	case RTE_ETH_LINK_SPEED_5G:
133 		return RTE_ETH_SPEED_NUM_5G;
134 	case RTE_ETH_LINK_SPEED_10G:
135 		return RTE_ETH_SPEED_NUM_10G;
136 	case RTE_ETH_LINK_SPEED_20G:
137 		return RTE_ETH_SPEED_NUM_20G;
138 	case RTE_ETH_LINK_SPEED_25G:
139 		return RTE_ETH_SPEED_NUM_25G;
140 	case RTE_ETH_LINK_SPEED_40G:
141 		return RTE_ETH_SPEED_NUM_40G;
142 	case RTE_ETH_LINK_SPEED_50G:
143 		return RTE_ETH_SPEED_NUM_50G;
144 	case RTE_ETH_LINK_SPEED_56G:
145 		return RTE_ETH_SPEED_NUM_56G;
146 	case RTE_ETH_LINK_SPEED_100G:
147 		return RTE_ETH_SPEED_NUM_100G;
148 	case RTE_ETH_LINK_SPEED_200G:
149 		return RTE_ETH_SPEED_NUM_200G;
150 	case RTE_ETH_LINK_SPEED_400G:
151 		return RTE_ETH_SPEED_NUM_400G;
152 	default:
153 		return RTE_ETH_SPEED_NUM_NONE;
154 	}
155 }
156 
157 static int
158 nfp_net_nfp4000_speed_configure_check(uint16_t port_id,
159 		uint32_t configure_speed,
160 		struct nfp_eth_table *nfp_eth_table)
161 {
162 	switch (port_id) {
163 	case 0:
164 		if (configure_speed == RTE_ETH_SPEED_NUM_25G &&
165 				nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) {
166 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
167 			return -ENOTSUP;
168 		}
169 		break;
170 	case 1:
171 		if (configure_speed == RTE_ETH_SPEED_NUM_10G &&
172 				nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) {
173 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
174 			return -ENOTSUP;
175 		}
176 		break;
177 	default:
178 		PMD_DRV_LOG(ERR, "The port id is invalid.");
179 		return -EINVAL;
180 	}
181 
182 	return 0;
183 }
184 
185 static int
186 nfp_net_speed_configure(struct rte_eth_dev *dev,
187 		struct nfp_net_hw *net_hw)
188 {
189 	int ret;
190 	uint32_t speed_capa;
191 	struct nfp_nsp *nsp;
192 	uint32_t link_speeds;
193 	uint32_t configure_speed;
194 	struct nfp_eth_table_port *eth_port;
195 	struct nfp_eth_table *nfp_eth_table;
196 
197 	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
198 	eth_port = &nfp_eth_table->ports[net_hw->idx];
199 
200 	speed_capa = net_hw->pf_dev->speed_capa;
201 	if (speed_capa == 0) {
202 		PMD_DRV_LOG(ERR, "Speed_capa is invalid.");
203 		return -EINVAL;
204 	}
205 
206 	link_speeds = dev->data->dev_conf.link_speeds;
207 	configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds);
208 	if (configure_speed == RTE_ETH_SPEED_NUM_NONE &&
209 			link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
210 		PMD_DRV_LOG(ERR, "Configured speed is invalid.");
211 		return -EINVAL;
212 	}
213 
214 	/* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */
215 	if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) {
216 		ret = nfp_net_nfp4000_speed_configure_check(net_hw->idx,
217 				configure_speed, nfp_eth_table);
218 		if (ret != 0) {
219 			PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000.");
220 			return ret;
221 		}
222 	}
223 
224 	nsp = nfp_eth_config_start(net_hw->cpp, eth_port->index);
225 	if (nsp == NULL) {
226 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
227 		return -EIO;
228 	}
229 
230 	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
231 		if (eth_port->supp_aneg) {
232 			ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO);
233 			if (ret != 0) {
234 				PMD_DRV_LOG(ERR, "Failed to set ANEG enable.");
235 				goto config_cleanup;
236 			}
237 		}
238 	} else {
239 		ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED);
240 		if (ret != 0) {
241 			PMD_DRV_LOG(ERR, "Failed to set ANEG disable.");
242 			goto config_cleanup;
243 		}
244 
245 		ret = nfp_eth_set_speed(nsp, configure_speed);
246 		if (ret != 0) {
247 			PMD_DRV_LOG(ERR, "Failed to set speed.");
248 			goto config_cleanup;
249 		}
250 	}
251 
252 	return nfp_eth_config_commit_end(nsp);
253 
254 config_cleanup:
255 	nfp_eth_config_cleanup_end(nsp);
256 
257 	return ret;
258 }
259 
260 static int
261 nfp_net_start(struct rte_eth_dev *dev)
262 {
263 	int ret;
264 	uint16_t i;
265 	struct nfp_hw *hw;
266 	uint32_t new_ctrl;
267 	struct nfp_cpp *cpp;
268 	uint32_t update = 0;
269 	uint32_t cap_extend;
270 	uint32_t intr_vector;
271 	uint32_t ctrl_extend = 0;
272 	struct nfp_net_hw *net_hw;
273 	struct nfp_pf_dev *pf_dev;
274 	struct rte_eth_rxmode *rxmode;
275 	struct nfp_app_fw_nic *app_fw_nic;
276 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
277 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
278 
279 	net_hw = dev->data->dev_private;
280 	pf_dev = net_hw->pf_dev;
281 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
282 	hw = &net_hw->super;
283 
284 	/* Disabling queues just in case... */
285 	nfp_net_disable_queues(dev);
286 
287 	/* Enabling the required queues in the device */
288 	nfp_net_enable_queues(dev);
289 
290 	/* Configure the port speed and the auto-negotiation mode. */
291 	ret = nfp_net_speed_configure(dev, net_hw);
292 	if (ret < 0) {
293 		PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode.");
294 		return ret;
295 	}
296 
297 	/* Check and configure queue intr-vector mapping */
298 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
299 		if (app_fw_nic->multiport) {
300 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
301 					"with NFP multiport PF");
302 				return -EINVAL;
303 		}
304 
305 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
306 			/*
307 			 * Better not to share LSC with RX interrupts.
308 			 * Unregistering LSC interrupt handler.
309 			 */
310 			rte_intr_callback_unregister(intr_handle,
311 					nfp_net_dev_interrupt_handler, (void *)dev);
312 
313 			if (dev->data->nb_rx_queues > 1) {
314 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
315 						"supports 1 queue with UIO");
316 				return -EIO;
317 			}
318 		}
319 
320 		intr_vector = dev->data->nb_rx_queues;
321 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
322 			return -1;
323 
324 		nfp_configure_rx_interrupt(dev, intr_handle);
325 		update = NFP_NET_CFG_UPDATE_MSIX;
326 	}
327 
328 	/* Checking MTU set */
329 	if (dev->data->mtu > net_hw->flbufsz) {
330 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
331 				dev->data->mtu, net_hw->flbufsz);
332 		return -ERANGE;
333 	}
334 
335 	rte_intr_enable(intr_handle);
336 
337 	new_ctrl = nfp_check_offloads(dev);
338 
339 	/* Writing configuration parameters in the device */
340 	nfp_net_params_setup(net_hw);
341 
342 	rxmode = &dev->data->dev_conf.rxmode;
343 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
344 		nfp_net_rss_config_default(dev);
345 		update |= NFP_NET_CFG_UPDATE_RSS;
346 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
347 	}
348 
349 	/* Enable device */
350 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
351 
352 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
353 
354 	/* Enable vxlan */
355 	if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) {
356 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
357 		update |= NFP_NET_CFG_UPDATE_VXLAN;
358 	}
359 
360 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
361 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
362 
363 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
364 		return -EIO;
365 
366 	hw->ctrl = new_ctrl;
367 
368 	/* Enable packet type offload by extend ctrl word1. */
369 	cap_extend = hw->cap_ext;
370 	if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
371 		ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE;
372 
373 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0)
374 		ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC |
375 				NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP |
376 				NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
377 
378 	/* Enable flow steer by extend ctrl word1. */
379 	if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
380 		ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER;
381 
382 	update = NFP_NET_CFG_UPDATE_GEN;
383 	if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0)
384 		return -EIO;
385 
386 	hw->ctrl_ext = ctrl_extend;
387 
388 	/*
389 	 * Allocating rte mbufs for configured rx queues.
390 	 * This requires queues being enabled before.
391 	 */
392 	if (nfp_net_rx_freelist_setup(dev) != 0) {
393 		ret = -ENOMEM;
394 		goto error;
395 	}
396 
397 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
398 		cpp = net_hw->cpp;
399 	else
400 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
401 
402 	/* Configure the physical port up */
403 	nfp_eth_set_configured(cpp, net_hw->nfp_idx, 1);
404 
405 	for (i = 0; i < dev->data->nb_rx_queues; i++)
406 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
407 	for (i = 0; i < dev->data->nb_tx_queues; i++)
408 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
409 
410 	return 0;
411 
412 error:
413 	/*
414 	 * An error returned by this function should mean the app
415 	 * exiting and then the system releasing all the memory
416 	 * allocated even memory coming from hugepages.
417 	 *
418 	 * The device could be enabled at this point with some queues
419 	 * ready for getting packets. This is true if the call to
420 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
421 	 * fails for subsequent queues.
422 	 *
423 	 * This should make the app exiting but better if we tell the
424 	 * device first.
425 	 */
426 	nfp_net_disable_queues(dev);
427 
428 	return ret;
429 }
430 
431 /* Set the link up. */
432 static int
433 nfp_net_set_link_up(struct rte_eth_dev *dev)
434 {
435 	struct nfp_cpp *cpp;
436 	struct nfp_net_hw *hw;
437 
438 	hw = dev->data->dev_private;
439 
440 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
441 		cpp = hw->cpp;
442 	else
443 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
444 
445 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 1);
446 }
447 
448 /* Set the link down. */
449 static int
450 nfp_net_set_link_down(struct rte_eth_dev *dev)
451 {
452 	struct nfp_cpp *cpp;
453 	struct nfp_net_hw *hw;
454 
455 	hw = dev->data->dev_private;
456 
457 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
458 		cpp = hw->cpp;
459 	else
460 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
461 
462 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 0);
463 }
464 
465 static uint8_t
466 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
467 		uint8_t phy_port)
468 {
469 	if (pf_dev->multi_pf.enabled)
470 		return pf_dev->multi_pf.function_id;
471 
472 	return phy_port;
473 }
474 
475 static void
476 nfp_net_beat_timer(void *arg)
477 {
478 	uint64_t cur_sec;
479 	struct nfp_multi_pf *multi_pf = arg;
480 
481 	cur_sec = rte_rdtsc();
482 	nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id));
483 
484 	/* Beat once per second. */
485 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
486 			(void *)multi_pf) < 0) {
487 		PMD_DRV_LOG(ERR, "Error setting alarm");
488 	}
489 }
490 
491 static int
492 nfp_net_keepalive_init(struct nfp_cpp *cpp,
493 		struct nfp_multi_pf *multi_pf)
494 {
495 	uint8_t *base;
496 	uint64_t addr;
497 	uint32_t size;
498 	uint32_t cpp_id;
499 	struct nfp_resource *res;
500 
501 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE);
502 	if (res == NULL)
503 		return -EIO;
504 
505 	cpp_id = nfp_resource_cpp_id(res);
506 	addr = nfp_resource_address(res);
507 	size = nfp_resource_size(res);
508 
509 	nfp_resource_release(res);
510 
511 	/* Allocate a fixed area for keepalive. */
512 	base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area);
513 	if (base == NULL) {
514 		PMD_DRV_LOG(ERR, "Failed to map area for keepalive.");
515 		return -EIO;
516 	}
517 
518 	multi_pf->beat_addr = base;
519 
520 	return 0;
521 }
522 
523 static void
524 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf)
525 {
526 	nfp_cpp_area_release_free(multi_pf->beat_area);
527 }
528 
529 static int
530 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf)
531 {
532 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
533 			(void *)multi_pf) < 0) {
534 		PMD_DRV_LOG(ERR, "Error setting alarm");
535 		return -EIO;
536 	}
537 
538 	return 0;
539 }
540 
541 static void
542 nfp_net_keepalive_clear(uint8_t *beat_addr,
543 		uint8_t function_id)
544 {
545 	nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id));
546 }
547 
548 static void
549 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info,
550 		struct nfp_multi_pf *multi_pf)
551 {
552 	uint8_t port_num;
553 
554 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
555 		if (port_num == multi_pf->function_id)
556 			continue;
557 
558 		nfp_net_keepalive_clear(multi_pf->beat_addr, port_num);
559 	}
560 }
561 
562 static void
563 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf)
564 {
565 	/* Cancel keepalive for multiple PF setup */
566 	rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf);
567 }
568 
569 static void
570 nfp_net_uninit(struct rte_eth_dev *eth_dev)
571 {
572 	struct nfp_net_hw *net_hw;
573 
574 	net_hw = eth_dev->data->dev_private;
575 
576 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
577 		nfp_net_flow_priv_uninit(net_hw->pf_dev, net_hw->idx);
578 
579 	rte_free(net_hw->eth_xstats_base);
580 	nfp_ipsec_uninit(eth_dev);
581 	if (net_hw->mac_stats_area != NULL)
582 		nfp_cpp_area_release_free(net_hw->mac_stats_area);
583 }
584 
585 static void
586 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev,
587 		uint8_t id)
588 {
589 	struct rte_eth_dev *eth_dev;
590 	struct nfp_app_fw_nic *app_fw_nic;
591 
592 	app_fw_nic = pf_dev->app_fw_priv;
593 	if (app_fw_nic->ports[id] != NULL) {
594 		eth_dev = app_fw_nic->ports[id]->eth_dev;
595 		if (eth_dev != NULL)
596 			nfp_net_uninit(eth_dev);
597 
598 		app_fw_nic->ports[id] = NULL;
599 	}
600 }
601 
602 static void
603 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
604 {
605 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
606 	rte_free(pf_dev->app_fw_priv);
607 }
608 
609 void
610 nfp_pf_uninit(struct nfp_pf_dev *pf_dev)
611 {
612 	nfp_cpp_area_release_free(pf_dev->qc_area);
613 	free(pf_dev->sym_tbl);
614 	if (pf_dev->multi_pf.enabled) {
615 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
616 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
617 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
618 	}
619 	free(pf_dev->nfp_eth_table);
620 	free(pf_dev->hwinfo);
621 	nfp_cpp_free(pf_dev->cpp);
622 	nfp_sync_free(pf_dev->sync);
623 	rte_free(pf_dev);
624 }
625 
626 static int
627 nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev)
628 {
629 	free(pf_dev->sym_tbl);
630 	nfp_cpp_free(pf_dev->cpp);
631 	nfp_sync_free(pf_dev->sync);
632 	rte_free(pf_dev);
633 
634 	return 0;
635 }
636 
637 /* Reset and stop device. The device can not be restarted. */
638 static int
639 nfp_net_close(struct rte_eth_dev *dev)
640 {
641 	uint8_t i;
642 	uint8_t id;
643 	struct nfp_net_hw *hw;
644 	struct nfp_pf_dev *pf_dev;
645 	struct rte_pci_device *pci_dev;
646 	struct nfp_app_fw_nic *app_fw_nic;
647 
648 	/*
649 	 * In secondary process, a released eth device can be found by its name
650 	 * in shared memory.
651 	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
652 	 * eth device has been released.
653 	 */
654 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
655 		if (dev->state == RTE_ETH_DEV_UNUSED)
656 			return 0;
657 
658 		nfp_pf_secondary_uninit(dev->process_private);
659 		return 0;
660 	}
661 
662 	hw = dev->data->dev_private;
663 	pf_dev = hw->pf_dev;
664 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
665 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
666 
667 	/*
668 	 * We assume that the DPDK application is stopping all the
669 	 * threads/queues before calling the device close function.
670 	 */
671 	nfp_net_disable_queues(dev);
672 
673 	/* Clear queues */
674 	nfp_net_close_tx_queue(dev);
675 	nfp_net_close_rx_queue(dev);
676 
677 	/* Cancel possible impending LSC work here before releasing the port */
678 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
679 
680 	/* Only free PF resources after all physical ports have been closed */
681 	/* Mark this port as unused and free device priv resources */
682 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
683 
684 	if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC)
685 		return -EINVAL;
686 
687 	nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx);
688 
689 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
690 		id = nfp_function_id_get(pf_dev, i);
691 
692 		/* Check to see if ports are still in use */
693 		if (app_fw_nic->ports[id] != NULL)
694 			return 0;
695 	}
696 
697 	/* Enable in nfp_net_start() */
698 	rte_intr_disable(pci_dev->intr_handle);
699 
700 	/* Register in nfp_net_init() */
701 	rte_intr_callback_unregister(pci_dev->intr_handle,
702 			nfp_net_dev_interrupt_handler, (void *)dev);
703 
704 	nfp_uninit_app_fw_nic(pf_dev);
705 	nfp_pf_uninit(pf_dev);
706 
707 	return 0;
708 }
709 
710 static int
711 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
712 		uint16_t port,
713 		uint32_t *idx)
714 {
715 	uint32_t i;
716 	int free_idx = -1;
717 
718 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
719 		if (hw->vxlan_ports[i] == port) {
720 			free_idx = i;
721 			break;
722 		}
723 
724 		if (hw->vxlan_usecnt[i] == 0) {
725 			free_idx = i;
726 			break;
727 		}
728 	}
729 
730 	if (free_idx == -1)
731 		return -EINVAL;
732 
733 	*idx = free_idx;
734 
735 	return 0;
736 }
737 
738 static int
739 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
740 		struct rte_eth_udp_tunnel *tunnel_udp)
741 {
742 	int ret;
743 	uint32_t idx;
744 	uint16_t vxlan_port;
745 	struct nfp_net_hw *hw;
746 	enum rte_eth_tunnel_type tnl_type;
747 
748 	hw = dev->data->dev_private;
749 	vxlan_port = tunnel_udp->udp_port;
750 	tnl_type   = tunnel_udp->prot_type;
751 
752 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
753 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
754 		return -ENOTSUP;
755 	}
756 
757 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
758 	if (ret != 0) {
759 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
760 		return -EINVAL;
761 	}
762 
763 	if (hw->vxlan_usecnt[idx] == 0) {
764 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
765 		if (ret != 0) {
766 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
767 			return -EINVAL;
768 		}
769 	}
770 
771 	hw->vxlan_usecnt[idx]++;
772 
773 	return 0;
774 }
775 
776 static int
777 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
778 		struct rte_eth_udp_tunnel *tunnel_udp)
779 {
780 	int ret;
781 	uint32_t idx;
782 	uint16_t vxlan_port;
783 	struct nfp_net_hw *hw;
784 	enum rte_eth_tunnel_type tnl_type;
785 
786 	hw = dev->data->dev_private;
787 	vxlan_port = tunnel_udp->udp_port;
788 	tnl_type   = tunnel_udp->prot_type;
789 
790 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
791 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
792 		return -ENOTSUP;
793 	}
794 
795 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
796 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
797 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
798 		return -EINVAL;
799 	}
800 
801 	hw->vxlan_usecnt[idx]--;
802 
803 	if (hw->vxlan_usecnt[idx] == 0) {
804 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
805 		if (ret != 0) {
806 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
807 			return -EINVAL;
808 		}
809 	}
810 
811 	return 0;
812 }
813 
814 /* Initialise and register driver with DPDK Application */
815 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
816 	.dev_configure          = nfp_net_configure,
817 	.dev_start              = nfp_net_start,
818 	.dev_stop               = nfp_net_stop,
819 	.dev_set_link_up        = nfp_net_set_link_up,
820 	.dev_set_link_down      = nfp_net_set_link_down,
821 	.dev_close              = nfp_net_close,
822 	.promiscuous_enable     = nfp_net_promisc_enable,
823 	.promiscuous_disable    = nfp_net_promisc_disable,
824 	.allmulticast_enable    = nfp_net_allmulticast_enable,
825 	.allmulticast_disable   = nfp_net_allmulticast_disable,
826 	.link_update            = nfp_net_link_update,
827 	.stats_get              = nfp_net_stats_get,
828 	.stats_reset            = nfp_net_stats_reset,
829 	.xstats_get             = nfp_net_xstats_get,
830 	.xstats_reset           = nfp_net_xstats_reset,
831 	.xstats_get_names       = nfp_net_xstats_get_names,
832 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
833 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
834 	.dev_infos_get          = nfp_net_infos_get,
835 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
836 	.mtu_set                = nfp_net_dev_mtu_set,
837 	.mac_addr_set           = nfp_net_set_mac_addr,
838 	.vlan_offload_set       = nfp_net_vlan_offload_set,
839 	.reta_update            = nfp_net_reta_update,
840 	.reta_query             = nfp_net_reta_query,
841 	.rss_hash_update        = nfp_net_rss_hash_update,
842 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
843 	.rx_queue_setup         = nfp_net_rx_queue_setup,
844 	.rx_queue_release       = nfp_net_rx_queue_release,
845 	.tx_queue_setup         = nfp_net_tx_queue_setup,
846 	.tx_queue_release       = nfp_net_tx_queue_release,
847 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
848 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
849 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
850 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
851 	.fw_version_get         = nfp_net_firmware_version_get,
852 	.flow_ctrl_get          = nfp_net_flow_ctrl_get,
853 	.flow_ctrl_set          = nfp_net_flow_ctrl_set,
854 	.flow_ops_get           = nfp_net_flow_ops_get,
855 	.fec_get_capability     = nfp_net_fec_get_capability,
856 	.fec_get                = nfp_net_fec_get,
857 	.fec_set                = nfp_net_fec_set,
858 };
859 
860 static inline void
861 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw,
862 		struct rte_eth_dev *eth_dev)
863 {
864 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
865 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
866 	else
867 		eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts;
868 
869 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
870 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
871 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
872 }
873 
874 static int
875 nfp_net_init(struct rte_eth_dev *eth_dev)
876 {
877 	int err;
878 	uint16_t port;
879 	uint64_t rx_base;
880 	uint64_t tx_base;
881 	struct nfp_hw *hw;
882 	struct nfp_net_hw *net_hw;
883 	struct nfp_pf_dev *pf_dev;
884 	struct rte_pci_device *pci_dev;
885 	struct nfp_app_fw_nic *app_fw_nic;
886 
887 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
888 	net_hw = eth_dev->data->dev_private;
889 
890 	/* Use backpointer here to the PF of this eth_dev */
891 	pf_dev = net_hw->pf_dev;
892 
893 	/* Use backpointer to the CoreNIC app struct */
894 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
895 
896 	port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
897 	if (port > 7) {
898 		PMD_DRV_LOG(ERR, "Port value is wrong");
899 		return -ENODEV;
900 	}
901 
902 	hw = &net_hw->super;
903 
904 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
905 			"NFP internal port number: %d", port, net_hw->nfp_idx);
906 
907 	rte_eth_copy_pci_info(eth_dev, pci_dev);
908 
909 	if (port == 0 || pf_dev->multi_pf.enabled) {
910 		uint32_t min_size;
911 
912 		hw->ctrl_bar = pf_dev->ctrl_bar;
913 		min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index;
914 		net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats",
915 				min_size, &net_hw->mac_stats_area);
916 		if (net_hw->mac_stats_bar == NULL) {
917 			PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
918 			return -EIO;
919 		}
920 
921 		net_hw->mac_stats = net_hw->mac_stats_bar;
922 	} else {
923 		/* Use port offset in pf ctrl_bar for this ports control bar */
924 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
925 		net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar +
926 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
927 	}
928 
929 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
930 	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
931 
932 	err = nfp_net_common_init(pci_dev, net_hw);
933 	if (err != 0)
934 		goto free_area;
935 
936 	err = nfp_net_tlv_caps_parse(eth_dev);
937 	if (err != 0) {
938 		PMD_INIT_LOG(ERR, "Failed to parser TLV caps");
939 		goto free_area;
940 	}
941 
942 	err = nfp_ipsec_init(eth_dev);
943 	if (err != 0) {
944 		PMD_INIT_LOG(ERR, "Failed to init IPsec module");
945 		goto free_area;
946 	}
947 
948 	nfp_net_ethdev_ops_mount(net_hw, eth_dev);
949 
950 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
951 			nfp_net_xstats_size(eth_dev), 0);
952 	if (net_hw->eth_xstats_base == NULL) {
953 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
954 				pci_dev->device.name);
955 		err = -ENOMEM;
956 		goto ipsec_exit;
957 	}
958 
959 	/* Work out where in the BAR the queues start. */
960 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
961 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
962 
963 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
964 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
965 	eth_dev->data->dev_private = net_hw;
966 
967 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
968 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
969 
970 	nfp_net_cfg_queue_setup(net_hw);
971 	net_hw->mtu = RTE_ETHER_MTU;
972 
973 	/* VLAN insertion is incompatible with LSOv2 */
974 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
975 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
976 
977 	nfp_net_log_device_information(net_hw);
978 
979 	/* Initializing spinlock for reconfigs */
980 	rte_spinlock_init(&hw->reconfig_lock);
981 
982 	/* Allocating memory for mac addr */
983 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
984 	if (eth_dev->data->mac_addrs == NULL) {
985 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
986 		err = -ENOMEM;
987 		goto xstats_free;
988 	}
989 
990 	nfp_net_pf_read_mac(app_fw_nic, port);
991 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
992 
993 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
994 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
995 		/* Using random mac addresses for VFs */
996 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
997 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
998 	}
999 
1000 	/* Copying mac address to DPDK eth_dev struct */
1001 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
1002 
1003 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
1004 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
1005 
1006 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1007 
1008 	PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x "
1009 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
1010 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1011 			pci_dev->id.device_id,
1012 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
1013 
1014 	/* Registering LSC interrupt handler */
1015 	rte_intr_callback_register(pci_dev->intr_handle,
1016 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
1017 	/* Telling the firmware about the LSC interrupt entry */
1018 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1019 	/* Unmask the LSC interrupt */
1020 	nfp_net_irq_unmask(eth_dev);
1021 	/* Recording current stats counters values */
1022 	nfp_net_stats_reset(eth_dev);
1023 
1024 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) {
1025 		err = nfp_net_flow_priv_init(pf_dev, port);
1026 		if (err != 0) {
1027 			PMD_INIT_LOG(ERR, "Init net flow priv failed");
1028 			goto xstats_free;
1029 		}
1030 	}
1031 
1032 	return 0;
1033 
1034 xstats_free:
1035 	rte_free(net_hw->eth_xstats_base);
1036 ipsec_exit:
1037 	nfp_ipsec_uninit(eth_dev);
1038 free_area:
1039 	if (net_hw->mac_stats_area != NULL)
1040 		nfp_cpp_area_release_free(net_hw->mac_stats_area);
1041 
1042 	return err;
1043 }
1044 
1045 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
1046 
1047 static int
1048 nfp_fw_get_name(struct rte_pci_device *dev,
1049 		struct nfp_nsp *nsp,
1050 		char *card,
1051 		char *fw_name,
1052 		size_t fw_size)
1053 {
1054 	char serial[40];
1055 	uint16_t interface;
1056 	uint32_t cpp_serial_len;
1057 	const uint8_t *cpp_serial;
1058 	struct nfp_cpp *cpp = nfp_nsp_cpp(nsp);
1059 
1060 	cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial);
1061 	if (cpp_serial_len != NFP_SERIAL_LEN)
1062 		return -ERANGE;
1063 
1064 	interface = nfp_cpp_interface(cpp);
1065 
1066 	/* Looking for firmware file in order of priority */
1067 
1068 	/* First try to find a firmware image specific for this device */
1069 	snprintf(serial, sizeof(serial),
1070 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
1071 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
1072 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
1073 	snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
1074 
1075 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1076 	if (access(fw_name, F_OK) == 0)
1077 		return 0;
1078 
1079 	/* Then try the PCI name */
1080 	snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH,
1081 			dev->name);
1082 
1083 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1084 	if (access(fw_name, F_OK) == 0)
1085 		return 0;
1086 
1087 	/* Finally try the card type and media */
1088 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card);
1089 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1090 	if (access(fw_name, F_OK) == 0)
1091 		return 0;
1092 
1093 	return -ENOENT;
1094 }
1095 
1096 static int
1097 nfp_fw_upload(struct nfp_nsp *nsp,
1098 		char *fw_name)
1099 {
1100 	int err;
1101 	void *fw_buf;
1102 	size_t fsize;
1103 
1104 	err = rte_firmware_read(fw_name, &fw_buf, &fsize);
1105 	if (err != 0) {
1106 		PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name);
1107 		return -ENOENT;
1108 	}
1109 
1110 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
1111 			fw_name, fsize);
1112 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
1113 	if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) {
1114 		free(fw_buf);
1115 		PMD_DRV_LOG(ERR, "Firmware load failed.");
1116 		return -EIO;
1117 	}
1118 
1119 	PMD_DRV_LOG(INFO, "Done");
1120 
1121 	free(fw_buf);
1122 
1123 	return 0;
1124 }
1125 
1126 static void
1127 nfp_fw_unload(struct nfp_cpp *cpp)
1128 {
1129 	struct nfp_nsp *nsp;
1130 
1131 	nsp = nfp_nsp_open(cpp);
1132 	if (nsp == NULL)
1133 		return;
1134 
1135 	nfp_nsp_device_soft_reset(nsp);
1136 	nfp_nsp_close(nsp);
1137 }
1138 
1139 static int
1140 nfp_fw_check_change(struct nfp_cpp *cpp,
1141 		char *fw_name,
1142 		bool *fw_changed)
1143 {
1144 	int ret;
1145 	struct nfp_net_hw hw;
1146 	uint32_t new_version = 0;
1147 	uint32_t old_version = 0;
1148 
1149 	ret = nfp_elf_get_fw_version(&new_version, fw_name);
1150 	if (ret != 0)
1151 		return ret;
1152 
1153 	hw.cpp = cpp;
1154 	nfp_net_get_fw_version(&hw, &old_version);
1155 
1156 	if (new_version != old_version) {
1157 		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u",
1158 				new_version, old_version);
1159 		*fw_changed = true;
1160 	} else {
1161 		PMD_DRV_LOG(INFO, "FW version is not changed and is %u", new_version);
1162 		*fw_changed = false;
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 static int
1169 nfp_fw_reload(struct nfp_nsp *nsp,
1170 		char *fw_name)
1171 {
1172 	int err;
1173 
1174 	nfp_nsp_device_soft_reset(nsp);
1175 	err = nfp_fw_upload(nsp, fw_name);
1176 	if (err != 0)
1177 		PMD_DRV_LOG(ERR, "NFP firmware load failed");
1178 
1179 	return err;
1180 }
1181 
1182 static bool
1183 nfp_fw_skip_load(const struct nfp_dev_info *dev_info,
1184 		struct nfp_multi_pf *multi_pf,
1185 		bool *reload_fw)
1186 {
1187 	uint8_t i;
1188 	uint64_t tmp_beat;
1189 	uint32_t port_num;
1190 	uint8_t in_use = 0;
1191 	uint64_t beat[dev_info->pf_num_per_unit];
1192 	uint32_t offset[dev_info->pf_num_per_unit];
1193 	uint8_t abnormal = dev_info->pf_num_per_unit;
1194 
1195 	sleep(1);
1196 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1197 		if (port_num == multi_pf->function_id) {
1198 			abnormal--;
1199 			continue;
1200 		}
1201 
1202 		offset[port_num] = NFP_BEAT_OFFSET(port_num);
1203 		beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]);
1204 		if (beat[port_num] == 0)
1205 			abnormal--;
1206 	}
1207 
1208 	if (abnormal == 0)
1209 		return true;
1210 
1211 	for (i = 0; i < 3; i++) {
1212 		sleep(1);
1213 		for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1214 			if (port_num == multi_pf->function_id)
1215 				continue;
1216 
1217 			if (beat[port_num] == 0)
1218 				continue;
1219 
1220 			tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]);
1221 			if (tmp_beat != beat[port_num]) {
1222 				in_use++;
1223 				abnormal--;
1224 				beat[port_num] = 0;
1225 				if (*reload_fw) {
1226 					*reload_fw = false;
1227 					PMD_DRV_LOG(ERR, "The param %s does not work",
1228 							NFP_PF_FORCE_RELOAD_FW);
1229 				}
1230 			}
1231 		}
1232 
1233 		if (abnormal == 0)
1234 			return true;
1235 	}
1236 
1237 	if (in_use != 0) {
1238 		PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.",
1239 				abnormal);
1240 		return true;
1241 	}
1242 
1243 	return false;
1244 }
1245 static int
1246 nfp_fw_reload_for_single_pf(struct nfp_nsp *nsp,
1247 		char *fw_name,
1248 		struct nfp_cpp *cpp,
1249 		bool force_reload_fw)
1250 {
1251 	int ret;
1252 	bool fw_changed = true;
1253 
1254 	if (nfp_nsp_fw_loaded(nsp) && !force_reload_fw) {
1255 		ret = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1256 		if (ret != 0)
1257 			return ret;
1258 	}
1259 
1260 	if (!fw_changed)
1261 		return 0;
1262 
1263 	ret = nfp_fw_reload(nsp, fw_name);
1264 	if (ret != 0)
1265 		return ret;
1266 
1267 	return 0;
1268 }
1269 
1270 static int
1271 nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp,
1272 		char *fw_name,
1273 		struct nfp_cpp *cpp,
1274 		const struct nfp_dev_info *dev_info,
1275 		struct nfp_multi_pf *multi_pf,
1276 		bool force_reload_fw)
1277 {
1278 	int err;
1279 	bool fw_changed = true;
1280 	bool skip_load_fw = false;
1281 	bool reload_fw = force_reload_fw;
1282 
1283 	err = nfp_net_keepalive_init(cpp, multi_pf);
1284 	if (err != 0) {
1285 		PMD_DRV_LOG(ERR, "NFP init beat failed");
1286 		return err;
1287 	}
1288 
1289 	err = nfp_net_keepalive_start(multi_pf);
1290 	if (err != 0) {
1291 		PMD_DRV_LOG(ERR, "NFP write beat failed");
1292 		goto keepalive_uninit;
1293 	}
1294 
1295 	if (nfp_nsp_fw_loaded(nsp) && !reload_fw) {
1296 		err = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1297 		if (err != 0)
1298 			goto keepalive_stop;
1299 	}
1300 
1301 	if (!fw_changed || reload_fw)
1302 		skip_load_fw = nfp_fw_skip_load(dev_info, multi_pf, &reload_fw);
1303 
1304 	if (skip_load_fw && !reload_fw)
1305 		return 0;
1306 
1307 	err = nfp_fw_reload(nsp, fw_name);
1308 	if (err != 0)
1309 		goto keepalive_stop;
1310 
1311 	nfp_net_keepalive_clear_others(dev_info, multi_pf);
1312 
1313 	return 0;
1314 
1315 keepalive_stop:
1316 	nfp_net_keepalive_stop(multi_pf);
1317 keepalive_uninit:
1318 	nfp_net_keepalive_uninit(multi_pf);
1319 
1320 	return err;
1321 }
1322 
1323 static int
1324 nfp_fw_setup(struct rte_pci_device *dev,
1325 		struct nfp_cpp *cpp,
1326 		struct nfp_eth_table *nfp_eth_table,
1327 		struct nfp_hwinfo *hwinfo,
1328 		const struct nfp_dev_info *dev_info,
1329 		struct nfp_multi_pf *multi_pf,
1330 		bool force_reload_fw)
1331 {
1332 	int err;
1333 	char fw_name[125];
1334 	char card_desc[100];
1335 	struct nfp_nsp *nsp;
1336 	const char *nfp_fw_model;
1337 
1338 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
1339 	if (nfp_fw_model == NULL)
1340 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
1341 
1342 	if (nfp_fw_model != NULL) {
1343 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
1344 	} else {
1345 		PMD_DRV_LOG(ERR, "firmware model NOT found");
1346 		return -EIO;
1347 	}
1348 
1349 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
1350 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
1351 				nfp_eth_table->count);
1352 		return -EIO;
1353 	}
1354 
1355 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
1356 			nfp_eth_table->count);
1357 
1358 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
1359 
1360 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
1361 			nfp_fw_model, nfp_eth_table->count,
1362 			nfp_eth_table->ports[0].speed / 1000);
1363 
1364 	nsp = nfp_nsp_open(cpp);
1365 	if (nsp == NULL) {
1366 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1367 		return -EIO;
1368 	}
1369 
1370 	err = nfp_fw_get_name(dev, nsp, card_desc, fw_name, sizeof(fw_name));
1371 	if (err != 0) {
1372 		PMD_DRV_LOG(ERR, "Can't find suitable firmware.");
1373 		nfp_nsp_close(nsp);
1374 		return err;
1375 	}
1376 
1377 	if (multi_pf->enabled)
1378 		err = nfp_fw_reload_for_multi_pf(nsp, fw_name, cpp, dev_info, multi_pf,
1379 				force_reload_fw);
1380 	else
1381 		err = nfp_fw_reload_for_single_pf(nsp, fw_name, cpp, force_reload_fw);
1382 
1383 	nfp_nsp_close(nsp);
1384 	return err;
1385 }
1386 
1387 static inline bool
1388 nfp_check_multi_pf_from_fw(uint32_t total_vnics)
1389 {
1390 	if (total_vnics == 1)
1391 		return true;
1392 
1393 	return false;
1394 }
1395 
1396 static inline bool
1397 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev,
1398 		struct nfp_cpp *cpp)
1399 {
1400 	bool flag;
1401 	struct nfp_nsp *nsp;
1402 
1403 	nsp = nfp_nsp_open(cpp);
1404 	if (nsp == NULL) {
1405 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1406 		return false;
1407 	}
1408 
1409 	flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) &&
1410 			(pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC);
1411 
1412 	nfp_nsp_close(nsp);
1413 	return flag;
1414 }
1415 
1416 static int
1417 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
1418 {
1419 	int err = 0;
1420 	uint64_t tx_base;
1421 	uint8_t *ctrl_bar;
1422 	struct nfp_hw *hw;
1423 	uint32_t cap_extend;
1424 	struct nfp_net_hw net_hw;
1425 	struct nfp_cpp_area *area;
1426 	char name[RTE_ETH_NAME_MAX_LEN];
1427 
1428 	memset(&net_hw, 0, sizeof(struct nfp_net_hw));
1429 
1430 	/* Map the symbol table */
1431 	snprintf(name, sizeof(name), "_pf%u_net_bar0",
1432 			pf_dev->multi_pf.function_id);
1433 	ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, NFP_NET_CFG_BAR_SZ,
1434 			&area);
1435 	if (ctrl_bar == NULL) {
1436 		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol");
1437 		return -ENODEV;
1438 	}
1439 
1440 	hw = &net_hw.super;
1441 	hw->ctrl_bar = ctrl_bar;
1442 
1443 	cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
1444 	if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
1445 		PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF");
1446 		err = -EINVAL;
1447 		goto end;
1448 	}
1449 
1450 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1451 	net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1452 	nfp_net_cfg_queue_setup(&net_hw);
1453 	rte_spinlock_init(&hw->reconfig_lock);
1454 	nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN);
1455 end:
1456 	nfp_cpp_area_release_free(area);
1457 	return err;
1458 }
1459 
1460 static int
1461 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
1462 		const struct nfp_dev_info *dev_info)
1463 {
1464 	uint8_t i;
1465 	uint8_t id;
1466 	int ret = 0;
1467 	uint32_t total_vnics;
1468 	struct nfp_net_hw *hw;
1469 	unsigned int numa_node;
1470 	struct rte_eth_dev *eth_dev;
1471 	struct nfp_app_fw_nic *app_fw_nic;
1472 	struct nfp_eth_table *nfp_eth_table;
1473 	char bar_name[RTE_ETH_NAME_MAX_LEN];
1474 	char port_name[RTE_ETH_NAME_MAX_LEN];
1475 	char vnic_name[RTE_ETH_NAME_MAX_LEN];
1476 
1477 	nfp_eth_table = pf_dev->nfp_eth_table;
1478 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
1479 	id = nfp_function_id_get(pf_dev, 0);
1480 
1481 	/* Allocate memory for the CoreNIC app */
1482 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
1483 	if (app_fw_nic == NULL)
1484 		return -ENOMEM;
1485 
1486 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
1487 	pf_dev->app_fw_priv = app_fw_nic;
1488 
1489 	/* Read the number of vNIC's created for the PF */
1490 	snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id);
1491 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret);
1492 	if (ret != 0 || total_vnics == 0 || total_vnics > 8) {
1493 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name);
1494 		ret = -ENODEV;
1495 		goto app_cleanup;
1496 	}
1497 
1498 	if (pf_dev->multi_pf.enabled) {
1499 		if (!nfp_check_multi_pf_from_fw(total_vnics)) {
1500 			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf");
1501 			ret = -ENODEV;
1502 			goto app_cleanup;
1503 		}
1504 	} else {
1505 		/*
1506 		 * For coreNIC the number of vNICs exposed should be the same as the
1507 		 * number of physical ports.
1508 		 */
1509 		if (total_vnics != nfp_eth_table->count) {
1510 			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
1511 			ret = -ENODEV;
1512 			goto app_cleanup;
1513 		}
1514 	}
1515 
1516 	/* Populate coreNIC app properties */
1517 	app_fw_nic->total_phyports = total_vnics;
1518 	app_fw_nic->pf_dev = pf_dev;
1519 	if (total_vnics > 1)
1520 		app_fw_nic->multiport = true;
1521 
1522 	/* Map the symbol table */
1523 	snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id);
1524 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name,
1525 			app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ,
1526 			&pf_dev->ctrl_area);
1527 	if (pf_dev->ctrl_bar == NULL) {
1528 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name);
1529 		ret = -EIO;
1530 		goto app_cleanup;
1531 	}
1532 
1533 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
1534 
1535 	/* Loop through all physical ports on PF */
1536 	numa_node = rte_socket_id();
1537 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1538 		if (pf_dev->multi_pf.enabled)
1539 			snprintf(port_name, sizeof(port_name), "%s",
1540 					pf_dev->pci_dev->device.name);
1541 		else
1542 			snprintf(port_name, sizeof(port_name), "%s_port%u",
1543 					pf_dev->pci_dev->device.name, i);
1544 
1545 		/* Allocate a eth_dev for this phyport */
1546 		eth_dev = rte_eth_dev_allocate(port_name);
1547 		if (eth_dev == NULL) {
1548 			ret = -ENODEV;
1549 			goto port_cleanup;
1550 		}
1551 
1552 		/* Allocate memory for this phyport */
1553 		eth_dev->data->dev_private = rte_zmalloc_socket(port_name,
1554 				sizeof(struct nfp_net_hw),
1555 				RTE_CACHE_LINE_SIZE, numa_node);
1556 		if (eth_dev->data->dev_private == NULL) {
1557 			ret = -ENOMEM;
1558 			rte_eth_dev_release_port(eth_dev);
1559 			goto port_cleanup;
1560 		}
1561 
1562 		hw = eth_dev->data->dev_private;
1563 		id = nfp_function_id_get(pf_dev, i);
1564 
1565 		/* Add this device to the PF's array of physical ports */
1566 		app_fw_nic->ports[id] = hw;
1567 
1568 		hw->dev_info = dev_info;
1569 		hw->pf_dev = pf_dev;
1570 		hw->cpp = pf_dev->cpp;
1571 		hw->eth_dev = eth_dev;
1572 		hw->idx = id;
1573 		hw->nfp_idx = nfp_eth_table->ports[id].index;
1574 
1575 		eth_dev->device = &pf_dev->pci_dev->device;
1576 
1577 		/*
1578 		 * Ctrl/tx/rx BAR mappings and remaining init happens in
1579 		 * @nfp_net_init()
1580 		 */
1581 		ret = nfp_net_init(eth_dev);
1582 		if (ret != 0) {
1583 			ret = -ENODEV;
1584 			goto port_cleanup;
1585 		}
1586 
1587 		rte_eth_dev_probing_finish(eth_dev);
1588 
1589 	} /* End loop, all ports on this PF */
1590 
1591 	return 0;
1592 
1593 port_cleanup:
1594 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1595 		id = nfp_function_id_get(pf_dev, i);
1596 		hw = app_fw_nic->ports[id];
1597 
1598 		if (hw != NULL && hw->eth_dev != NULL) {
1599 			nfp_net_uninit(hw->eth_dev);
1600 			rte_eth_dev_release_port(hw->eth_dev);
1601 		}
1602 	}
1603 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
1604 app_cleanup:
1605 	rte_free(app_fw_nic);
1606 
1607 	return ret;
1608 }
1609 
1610 static int
1611 nfp_net_hwinfo_set(uint8_t function_id,
1612 		struct nfp_rtsym_table *sym_tbl,
1613 		struct nfp_cpp *cpp,
1614 		enum nfp_app_fw_id app_fw_id)
1615 {
1616 	int ret = 0;
1617 	uint64_t app_cap;
1618 	struct nfp_nsp *nsp;
1619 	uint8_t sp_indiff = 1;
1620 	char hw_info[RTE_ETH_NAME_MAX_LEN];
1621 	char app_cap_name[RTE_ETH_NAME_MAX_LEN];
1622 
1623 	if (app_fw_id != NFP_APP_FW_FLOWER_NIC) {
1624 		/* Read the app capabilities of the firmware loaded */
1625 		snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id);
1626 		app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret);
1627 		if (ret != 0) {
1628 			PMD_INIT_LOG(ERR, "Could not read app_fw_cap from firmware.");
1629 			return ret;
1630 		}
1631 
1632 		/* Calculate the value of sp_indiff and write to hw_info */
1633 		sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF;
1634 	}
1635 
1636 	snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff);
1637 
1638 	nsp = nfp_nsp_open(cpp);
1639 	if (nsp == NULL) {
1640 		PMD_INIT_LOG(ERR, "Could not get NSP.");
1641 		return -EIO;
1642 	}
1643 
1644 	ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info));
1645 	nfp_nsp_close(nsp);
1646 	if (ret != 0) {
1647 		PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo.");
1648 		return ret;
1649 	}
1650 
1651 	return 0;
1652 }
1653 
1654 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
1655 	[NFP_MEDIA_W0_RJ45_10M]     = RTE_ETH_LINK_SPEED_10M,
1656 	[NFP_MEDIA_W0_RJ45_10M_HD]  = RTE_ETH_LINK_SPEED_10M_HD,
1657 	[NFP_MEDIA_W0_RJ45_100M]    = RTE_ETH_LINK_SPEED_100M,
1658 	[NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD,
1659 	[NFP_MEDIA_W0_RJ45_1G]      = RTE_ETH_LINK_SPEED_1G,
1660 	[NFP_MEDIA_W0_RJ45_2P5G]    = RTE_ETH_LINK_SPEED_2_5G,
1661 	[NFP_MEDIA_W0_RJ45_5G]      = RTE_ETH_LINK_SPEED_5G,
1662 	[NFP_MEDIA_W0_RJ45_10G]     = RTE_ETH_LINK_SPEED_10G,
1663 	[NFP_MEDIA_1000BASE_CX]     = RTE_ETH_LINK_SPEED_1G,
1664 	[NFP_MEDIA_1000BASE_KX]     = RTE_ETH_LINK_SPEED_1G,
1665 	[NFP_MEDIA_10GBASE_KX4]     = RTE_ETH_LINK_SPEED_10G,
1666 	[NFP_MEDIA_10GBASE_KR]      = RTE_ETH_LINK_SPEED_10G,
1667 	[NFP_MEDIA_10GBASE_CX4]     = RTE_ETH_LINK_SPEED_10G,
1668 	[NFP_MEDIA_10GBASE_CR]      = RTE_ETH_LINK_SPEED_10G,
1669 	[NFP_MEDIA_10GBASE_SR]      = RTE_ETH_LINK_SPEED_10G,
1670 	[NFP_MEDIA_10GBASE_ER]      = RTE_ETH_LINK_SPEED_10G,
1671 	[NFP_MEDIA_25GBASE_KR]      = RTE_ETH_LINK_SPEED_25G,
1672 	[NFP_MEDIA_25GBASE_KR_S]    = RTE_ETH_LINK_SPEED_25G,
1673 	[NFP_MEDIA_25GBASE_CR]      = RTE_ETH_LINK_SPEED_25G,
1674 	[NFP_MEDIA_25GBASE_CR_S]    = RTE_ETH_LINK_SPEED_25G,
1675 	[NFP_MEDIA_25GBASE_SR]      = RTE_ETH_LINK_SPEED_25G,
1676 	[NFP_MEDIA_40GBASE_CR4]     = RTE_ETH_LINK_SPEED_40G,
1677 	[NFP_MEDIA_40GBASE_KR4]     = RTE_ETH_LINK_SPEED_40G,
1678 	[NFP_MEDIA_40GBASE_SR4]     = RTE_ETH_LINK_SPEED_40G,
1679 	[NFP_MEDIA_40GBASE_LR4]     = RTE_ETH_LINK_SPEED_40G,
1680 	[NFP_MEDIA_50GBASE_KR]      = RTE_ETH_LINK_SPEED_50G,
1681 	[NFP_MEDIA_50GBASE_SR]      = RTE_ETH_LINK_SPEED_50G,
1682 	[NFP_MEDIA_50GBASE_CR]      = RTE_ETH_LINK_SPEED_50G,
1683 	[NFP_MEDIA_50GBASE_LR]      = RTE_ETH_LINK_SPEED_50G,
1684 	[NFP_MEDIA_50GBASE_ER]      = RTE_ETH_LINK_SPEED_50G,
1685 	[NFP_MEDIA_50GBASE_FR]      = RTE_ETH_LINK_SPEED_50G,
1686 	[NFP_MEDIA_100GBASE_KR4]    = RTE_ETH_LINK_SPEED_100G,
1687 	[NFP_MEDIA_100GBASE_SR4]    = RTE_ETH_LINK_SPEED_100G,
1688 	[NFP_MEDIA_100GBASE_CR4]    = RTE_ETH_LINK_SPEED_100G,
1689 	[NFP_MEDIA_100GBASE_KP4]    = RTE_ETH_LINK_SPEED_100G,
1690 	[NFP_MEDIA_100GBASE_CR10]   = RTE_ETH_LINK_SPEED_100G,
1691 	[NFP_MEDIA_10GBASE_LR]      = RTE_ETH_LINK_SPEED_10G,
1692 	[NFP_MEDIA_25GBASE_LR]      = RTE_ETH_LINK_SPEED_25G,
1693 	[NFP_MEDIA_25GBASE_ER]      = RTE_ETH_LINK_SPEED_25G
1694 };
1695 
1696 static int
1697 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf,
1698 		struct nfp_pf_dev *pf_dev)
1699 {
1700 	uint32_t i;
1701 	uint32_t j;
1702 	uint32_t offset;
1703 	uint32_t speed_capa = 0;
1704 	uint64_t supported_modes;
1705 
1706 	for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) {
1707 		supported_modes = media_buf->supported_modes[i];
1708 		offset = i * UINT64_BIT;
1709 		for (j = 0; j < UINT64_BIT; j++) {
1710 			if (supported_modes == 0)
1711 				break;
1712 
1713 			if ((supported_modes & 1) != 0) {
1714 				if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) {
1715 					PMD_DRV_LOG(ERR, "Invalid offset of media table.");
1716 					return -EINVAL;
1717 				}
1718 
1719 				speed_capa |= nfp_eth_media_table[j + offset];
1720 			}
1721 
1722 			supported_modes = supported_modes >> 1;
1723 		}
1724 	}
1725 
1726 	pf_dev->speed_capa = speed_capa;
1727 
1728 	return pf_dev->speed_capa == 0 ? -EINVAL : 0;
1729 }
1730 
1731 static int
1732 nfp_net_speed_capa_get(struct nfp_pf_dev *pf_dev,
1733 		uint32_t port_id)
1734 {
1735 	int ret;
1736 	struct nfp_nsp *nsp;
1737 	struct nfp_eth_media_buf media_buf;
1738 
1739 	media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index;
1740 	pf_dev->speed_capa = 0;
1741 
1742 	nsp = nfp_nsp_open(pf_dev->cpp);
1743 	if (nsp == NULL) {
1744 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
1745 		return -EIO;
1746 	}
1747 
1748 	ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf));
1749 	nfp_nsp_close(nsp);
1750 	if (ret != 0) {
1751 		PMD_DRV_LOG(ERR, "Failed to read media.");
1752 		return ret;
1753 	}
1754 
1755 	ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev);
1756 	if (ret < 0) {
1757 		PMD_DRV_LOG(ERR, "Speed capability is invalid.");
1758 		return ret;
1759 	}
1760 
1761 	return 0;
1762 }
1763 
1764 static int
1765 nfp_pf_init(struct rte_pci_device *pci_dev)
1766 {
1767 	void *sync;
1768 	uint32_t i;
1769 	uint32_t id;
1770 	int ret = 0;
1771 	uint64_t addr;
1772 	uint32_t index;
1773 	uint32_t cpp_id;
1774 	uint8_t function_id;
1775 	struct nfp_cpp *cpp;
1776 	struct nfp_pf_dev *pf_dev;
1777 	struct nfp_hwinfo *hwinfo;
1778 	enum nfp_app_fw_id app_fw_id;
1779 	char name[RTE_ETH_NAME_MAX_LEN];
1780 	struct nfp_rtsym_table *sym_tbl;
1781 	char app_name[RTE_ETH_NAME_MAX_LEN];
1782 	struct nfp_eth_table *nfp_eth_table;
1783 	const struct nfp_dev_info *dev_info;
1784 
1785 	if (pci_dev == NULL)
1786 		return -ENODEV;
1787 
1788 	if (pci_dev->mem_resource[0].addr == NULL) {
1789 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
1790 		return -ENODEV;
1791 	}
1792 
1793 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
1794 	if (dev_info == NULL) {
1795 		PMD_INIT_LOG(ERR, "Not supported device ID");
1796 		return -ENODEV;
1797 	}
1798 
1799 	/* Allocate memory for the PF "device" */
1800 	function_id = (pci_dev->addr.function) & 0x07;
1801 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
1802 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
1803 	if (pf_dev == NULL) {
1804 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
1805 		return -ENOMEM;
1806 	}
1807 
1808 	sync = nfp_sync_alloc();
1809 	if (sync == NULL) {
1810 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
1811 		ret = -ENOMEM;
1812 		goto pf_cleanup;
1813 	}
1814 
1815 	/*
1816 	 * When device bound to UIO, the device could be used, by mistake,
1817 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1818 	 * could lead to a serious problem when configuring the NFP CPP
1819 	 * interface. Here we avoid this telling to the CPP init code to
1820 	 * use a lock file if UIO is being used.
1821 	 */
1822 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1823 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
1824 	else
1825 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
1826 
1827 	if (cpp == NULL) {
1828 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1829 		ret = -EIO;
1830 		goto sync_free;
1831 	}
1832 
1833 	hwinfo = nfp_hwinfo_read(cpp);
1834 	if (hwinfo == NULL) {
1835 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
1836 		ret = -EIO;
1837 		goto cpp_cleanup;
1838 	}
1839 
1840 	/* Read the number of physical ports from hardware */
1841 	nfp_eth_table = nfp_eth_read_ports(cpp);
1842 	if (nfp_eth_table == NULL) {
1843 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
1844 		ret = -EIO;
1845 		goto hwinfo_cleanup;
1846 	}
1847 
1848 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp);
1849 	pf_dev->multi_pf.function_id = function_id;
1850 
1851 	/* Force the physical port down to clear the possible DMA error */
1852 	for (i = 0; i < nfp_eth_table->count; i++) {
1853 		id = nfp_function_id_get(pf_dev, i);
1854 		index = nfp_eth_table->ports[id].index;
1855 		nfp_eth_set_configured(cpp, index, 0);
1856 	}
1857 
1858 	nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs);
1859 
1860 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo,
1861 			dev_info, &pf_dev->multi_pf, pf_dev->devargs.force_reload_fw) != 0) {
1862 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
1863 		ret = -EIO;
1864 		goto eth_table_cleanup;
1865 	}
1866 
1867 	/* Now the symbol table should be there */
1868 	sym_tbl = nfp_rtsym_table_read(cpp);
1869 	if (sym_tbl == NULL) {
1870 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
1871 		ret = -EIO;
1872 		goto fw_cleanup;
1873 	}
1874 
1875 	/* Read the app ID of the firmware loaded */
1876 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
1877 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
1878 	if (ret != 0) {
1879 		PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name);
1880 		ret = -EIO;
1881 		goto sym_tbl_cleanup;
1882 	}
1883 
1884 	/* Write sp_indiff to hw_info */
1885 	ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp, app_fw_id);
1886 	if (ret != 0) {
1887 		PMD_INIT_LOG(ERR, "Failed to set hwinfo.");
1888 		ret = -EIO;
1889 		goto sym_tbl_cleanup;
1890 	}
1891 
1892 	/* Populate the newly created PF device */
1893 	pf_dev->app_fw_id = app_fw_id;
1894 	pf_dev->cpp = cpp;
1895 	pf_dev->hwinfo = hwinfo;
1896 	pf_dev->sym_tbl = sym_tbl;
1897 	pf_dev->pci_dev = pci_dev;
1898 	pf_dev->nfp_eth_table = nfp_eth_table;
1899 	pf_dev->sync = sync;
1900 
1901 	/* Get the speed capability */
1902 	for (i = 0; i < nfp_eth_table->count; i++) {
1903 		id = nfp_function_id_get(pf_dev, i);
1904 		ret = nfp_net_speed_capa_get(pf_dev, id);
1905 		if (ret != 0) {
1906 			PMD_INIT_LOG(ERR, "Failed to get speed capability.");
1907 			ret = -EIO;
1908 			goto sym_tbl_cleanup;
1909 		}
1910 	}
1911 
1912 	/* Configure access to tx/rx vNIC BARs */
1913 	addr = nfp_qcp_queue_offset(dev_info, 0);
1914 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
1915 
1916 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
1917 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
1918 	if (pf_dev->qc_bar == NULL) {
1919 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
1920 		ret = -EIO;
1921 		goto sym_tbl_cleanup;
1922 	}
1923 
1924 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
1925 
1926 	/*
1927 	 * PF initialization has been done at this point. Call app specific
1928 	 * init code now.
1929 	 */
1930 	switch (pf_dev->app_fw_id) {
1931 	case NFP_APP_FW_CORE_NIC:
1932 		if (pf_dev->multi_pf.enabled) {
1933 			ret = nfp_enable_multi_pf(pf_dev);
1934 			if (ret != 0)
1935 				goto hwqueues_cleanup;
1936 		}
1937 
1938 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1939 		ret = nfp_init_app_fw_nic(pf_dev, dev_info);
1940 		if (ret != 0) {
1941 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1942 			goto hwqueues_cleanup;
1943 		}
1944 		break;
1945 	case NFP_APP_FW_FLOWER_NIC:
1946 		PMD_INIT_LOG(INFO, "Initializing Flower");
1947 		ret = nfp_init_app_fw_flower(pf_dev, dev_info);
1948 		if (ret != 0) {
1949 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1950 			goto hwqueues_cleanup;
1951 		}
1952 		break;
1953 	default:
1954 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1955 		ret = -EINVAL;
1956 		goto hwqueues_cleanup;
1957 	}
1958 
1959 	/* Register the CPP bridge service here for primary use */
1960 	ret = nfp_enable_cpp_service(pf_dev);
1961 	if (ret != 0)
1962 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1963 
1964 	return 0;
1965 
1966 hwqueues_cleanup:
1967 	nfp_cpp_area_release_free(pf_dev->qc_area);
1968 sym_tbl_cleanup:
1969 	free(sym_tbl);
1970 fw_cleanup:
1971 	nfp_fw_unload(cpp);
1972 	nfp_net_keepalive_stop(&pf_dev->multi_pf);
1973 	nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
1974 	nfp_net_keepalive_uninit(&pf_dev->multi_pf);
1975 eth_table_cleanup:
1976 	free(nfp_eth_table);
1977 hwinfo_cleanup:
1978 	free(hwinfo);
1979 cpp_cleanup:
1980 	nfp_cpp_free(cpp);
1981 sync_free:
1982 	nfp_sync_free(sync);
1983 pf_cleanup:
1984 	rte_free(pf_dev);
1985 
1986 	return ret;
1987 }
1988 
1989 static int
1990 nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
1991 {
1992 	uint32_t i;
1993 	int err = 0;
1994 	int ret = 0;
1995 	uint8_t function_id;
1996 	uint32_t total_vnics;
1997 	struct nfp_net_hw *hw;
1998 	char pf_name[RTE_ETH_NAME_MAX_LEN];
1999 
2000 	/* Read the number of vNIC's created for the PF */
2001 	function_id = (pf_dev->pci_dev->addr.function) & 0x07;
2002 	snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id);
2003 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err);
2004 	if (err != 0 || total_vnics == 0 || total_vnics > 8) {
2005 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name);
2006 		return -ENODEV;
2007 	}
2008 
2009 	for (i = 0; i < total_vnics; i++) {
2010 		struct rte_eth_dev *eth_dev;
2011 		char port_name[RTE_ETH_NAME_MAX_LEN];
2012 
2013 		if (nfp_check_multi_pf_from_fw(total_vnics))
2014 			snprintf(port_name, sizeof(port_name), "%s",
2015 					pf_dev->pci_dev->device.name);
2016 		else
2017 			snprintf(port_name, sizeof(port_name), "%s_port%u",
2018 					pf_dev->pci_dev->device.name, i);
2019 
2020 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
2021 		eth_dev = rte_eth_dev_attach_secondary(port_name);
2022 		if (eth_dev == NULL) {
2023 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
2024 			ret = -ENODEV;
2025 			break;
2026 		}
2027 
2028 		eth_dev->process_private = pf_dev;
2029 		hw = eth_dev->data->dev_private;
2030 		nfp_net_ethdev_ops_mount(hw, eth_dev);
2031 
2032 		rte_eth_dev_probing_finish(eth_dev);
2033 	}
2034 
2035 	return ret;
2036 }
2037 
2038 /*
2039  * When attaching to the NFP4000/6000 PF on a secondary process there
2040  * is no need to initialise the PF again. Only minimal work is required
2041  * here.
2042  */
2043 static int
2044 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
2045 {
2046 	void *sync;
2047 	int ret = 0;
2048 	struct nfp_cpp *cpp;
2049 	uint8_t function_id;
2050 	struct nfp_pf_dev *pf_dev;
2051 	enum nfp_app_fw_id app_fw_id;
2052 	char name[RTE_ETH_NAME_MAX_LEN];
2053 	struct nfp_rtsym_table *sym_tbl;
2054 	const struct nfp_dev_info *dev_info;
2055 	char app_name[RTE_ETH_NAME_MAX_LEN];
2056 
2057 	if (pci_dev == NULL)
2058 		return -ENODEV;
2059 
2060 	if (pci_dev->mem_resource[0].addr == NULL) {
2061 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2062 		return -ENODEV;
2063 	}
2064 
2065 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2066 	if (dev_info == NULL) {
2067 		PMD_INIT_LOG(ERR, "Not supported device ID");
2068 		return -ENODEV;
2069 	}
2070 
2071 	/* Allocate memory for the PF "device" */
2072 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
2073 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2074 	if (pf_dev == NULL) {
2075 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
2076 		return -ENOMEM;
2077 	}
2078 
2079 	sync = nfp_sync_alloc();
2080 	if (sync == NULL) {
2081 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2082 		ret = -ENOMEM;
2083 		goto pf_cleanup;
2084 	}
2085 
2086 	/*
2087 	 * When device bound to UIO, the device could be used, by mistake,
2088 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2089 	 * could lead to a serious problem when configuring the NFP CPP
2090 	 * interface. Here we avoid this telling to the CPP init code to
2091 	 * use a lock file if UIO is being used.
2092 	 */
2093 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2094 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2095 	else
2096 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2097 
2098 	if (cpp == NULL) {
2099 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2100 		ret = -EIO;
2101 		goto sync_free;
2102 	}
2103 
2104 	/*
2105 	 * We don't have access to the PF created in the primary process
2106 	 * here so we have to read the number of ports from firmware.
2107 	 */
2108 	sym_tbl = nfp_rtsym_table_read(cpp);
2109 	if (sym_tbl == NULL) {
2110 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
2111 		ret = -EIO;
2112 		goto sync_free;
2113 	}
2114 
2115 	/* Read the app ID of the firmware loaded */
2116 	function_id = pci_dev->addr.function & 0x7;
2117 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2118 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2119 	if (ret != 0) {
2120 		PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name);
2121 		ret = -EIO;
2122 		goto sym_tbl_cleanup;
2123 	}
2124 
2125 	/* Populate the newly created PF device */
2126 	pf_dev->app_fw_id = app_fw_id;
2127 	pf_dev->cpp = cpp;
2128 	pf_dev->sym_tbl = sym_tbl;
2129 	pf_dev->pci_dev = pci_dev;
2130 	pf_dev->sync = sync;
2131 
2132 	/* Call app specific init code now */
2133 	switch (app_fw_id) {
2134 	case NFP_APP_FW_CORE_NIC:
2135 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
2136 		ret = nfp_secondary_init_app_fw_nic(pf_dev);
2137 		if (ret != 0) {
2138 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2139 			goto sym_tbl_cleanup;
2140 		}
2141 		break;
2142 	case NFP_APP_FW_FLOWER_NIC:
2143 		PMD_INIT_LOG(INFO, "Initializing Flower");
2144 		ret = nfp_secondary_init_app_fw_flower(pf_dev);
2145 		if (ret != 0) {
2146 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2147 			goto sym_tbl_cleanup;
2148 		}
2149 		break;
2150 	default:
2151 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
2152 		ret = -EINVAL;
2153 		goto sym_tbl_cleanup;
2154 	}
2155 
2156 	return 0;
2157 
2158 sym_tbl_cleanup:
2159 	free(sym_tbl);
2160 sync_free:
2161 	nfp_sync_free(sync);
2162 pf_cleanup:
2163 	rte_free(pf_dev);
2164 
2165 	return ret;
2166 }
2167 
2168 static int
2169 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2170 		struct rte_pci_device *dev)
2171 {
2172 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2173 		return nfp_pf_init(dev);
2174 	else
2175 		return nfp_pf_secondary_init(dev);
2176 }
2177 
2178 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2179 	{
2180 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2181 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2182 	},
2183 	{
2184 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2185 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2186 	},
2187 	{
2188 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2189 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2190 	},
2191 	{
2192 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2193 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2194 	},
2195 	{
2196 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2197 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2198 	},
2199 	{
2200 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2201 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2202 	},
2203 	{
2204 		.vendor_id = 0,
2205 	},
2206 };
2207 
2208 static int
2209 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
2210 {
2211 	uint16_t port_id;
2212 	struct rte_pci_device *pci_dev;
2213 
2214 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2215 
2216 	/* Free up all physical ports under PF */
2217 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2218 		rte_eth_dev_close(port_id);
2219 	/*
2220 	 * Ports can be closed and freed but hotplugging is not
2221 	 * currently supported.
2222 	 */
2223 	return -ENOTSUP;
2224 }
2225 
2226 static int
2227 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2228 {
2229 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
2230 }
2231 
2232 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2233 	.id_table = pci_id_nfp_pf_net_map,
2234 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2235 	.probe = nfp_pf_pci_probe,
2236 	.remove = eth_nfp_pci_remove,
2237 };
2238 
2239 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd);
2240 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map);
2241 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
2242 RTE_PMD_REGISTER_PARAM_STRING(NFP_PF_DRIVER_NAME, NFP_PF_FORCE_RELOAD_FW "=<0|1>");
2243