xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <unistd.h>
9 
10 #include <eal_firmware.h>
11 #include <rte_alarm.h>
12 #include <rte_kvargs.h>
13 
14 #include "flower/nfp_flower.h"
15 #include "nfd3/nfp_nfd3.h"
16 #include "nfdk/nfp_nfdk.h"
17 #include "nfpcore/nfp_cpp.h"
18 #include "nfpcore/nfp_elf.h"
19 #include "nfpcore/nfp_hwinfo.h"
20 #include "nfpcore/nfp_rtsym.h"
21 #include "nfpcore/nfp_nsp.h"
22 #include "nfpcore/nfp6000_pcie.h"
23 #include "nfpcore/nfp_resource.h"
24 #include "nfpcore/nfp_sync.h"
25 
26 #include "nfp_cpp_bridge.h"
27 #include "nfp_ipsec.h"
28 #include "nfp_logs.h"
29 #include "nfp_net_flow.h"
30 
31 /* 64-bit per app capabilities */
32 #define NFP_NET_APP_CAP_SP_INDIFF       RTE_BIT64(0) /* Indifferent to port speed */
33 
34 #define NFP_PF_DRIVER_NAME net_nfp_pf
35 #define NFP_PF_FORCE_RELOAD_FW   "force_reload_fw"
36 
37 static int
38 nfp_devarg_handle_int(const char *key,
39 		const char *value,
40 		void *extra_args)
41 {
42 	char *end_ptr;
43 	uint64_t *num = extra_args;
44 
45 	if (value == NULL)
46 		return -EPERM;
47 
48 	*num = strtoul(value, &end_ptr, 10);
49 	if (*num == ULONG_MAX) {
50 		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param", key, value);
51 		return -ERANGE;
52 	} else if (value == end_ptr) {
53 		return -EPERM;
54 	}
55 
56 	return 0;
57 }
58 
59 static void
60 nfp_devarg_parse_force_reload_fw(struct rte_kvargs *kvlist,
61 		bool *force_reload_fw)
62 {
63 	int ret;
64 	uint64_t value;
65 
66 
67 	if (rte_kvargs_count(kvlist, NFP_PF_FORCE_RELOAD_FW) != 1)
68 		return;
69 
70 	ret = rte_kvargs_process(kvlist, NFP_PF_FORCE_RELOAD_FW, &nfp_devarg_handle_int, &value);
71 	if (ret != 0)
72 		return;
73 
74 	if (value == 1)
75 		*force_reload_fw = true;
76 	else if (value == 0)
77 		*force_reload_fw = false;
78 	else
79 		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1",
80 				NFP_PF_FORCE_RELOAD_FW);
81 }
82 
83 static void
84 nfp_devargs_parse(struct nfp_devargs *nfp_devargs_param,
85 		const struct rte_devargs *devargs)
86 {
87 	struct rte_kvargs *kvlist;
88 
89 	if (devargs == NULL)
90 		return;
91 
92 	kvlist = rte_kvargs_parse(devargs->args, NULL);
93 	if (kvlist == NULL)
94 		return;
95 
96 	nfp_devarg_parse_force_reload_fw(kvlist, &nfp_devargs_param->force_reload_fw);
97 
98 	rte_kvargs_free(kvlist);
99 }
100 
101 static void
102 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
103 		uint16_t port)
104 {
105 	struct nfp_net_hw *hw;
106 	struct nfp_eth_table *nfp_eth_table;
107 
108 	/* Grab a pointer to the correct physical port */
109 	hw = app_fw_nic->ports[port];
110 
111 	nfp_eth_table = app_fw_nic->pf_dev->nfp_eth_table;
112 
113 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
114 }
115 
116 static uint32_t
117 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap)
118 {
119 	switch (speeds_bitmap) {
120 	case RTE_ETH_LINK_SPEED_10M_HD:
121 		return RTE_ETH_SPEED_NUM_10M;
122 	case RTE_ETH_LINK_SPEED_10M:
123 		return RTE_ETH_SPEED_NUM_10M;
124 	case RTE_ETH_LINK_SPEED_100M_HD:
125 		return RTE_ETH_SPEED_NUM_100M;
126 	case RTE_ETH_LINK_SPEED_100M:
127 		return RTE_ETH_SPEED_NUM_100M;
128 	case RTE_ETH_LINK_SPEED_1G:
129 		return RTE_ETH_SPEED_NUM_1G;
130 	case RTE_ETH_LINK_SPEED_2_5G:
131 		return RTE_ETH_SPEED_NUM_2_5G;
132 	case RTE_ETH_LINK_SPEED_5G:
133 		return RTE_ETH_SPEED_NUM_5G;
134 	case RTE_ETH_LINK_SPEED_10G:
135 		return RTE_ETH_SPEED_NUM_10G;
136 	case RTE_ETH_LINK_SPEED_20G:
137 		return RTE_ETH_SPEED_NUM_20G;
138 	case RTE_ETH_LINK_SPEED_25G:
139 		return RTE_ETH_SPEED_NUM_25G;
140 	case RTE_ETH_LINK_SPEED_40G:
141 		return RTE_ETH_SPEED_NUM_40G;
142 	case RTE_ETH_LINK_SPEED_50G:
143 		return RTE_ETH_SPEED_NUM_50G;
144 	case RTE_ETH_LINK_SPEED_56G:
145 		return RTE_ETH_SPEED_NUM_56G;
146 	case RTE_ETH_LINK_SPEED_100G:
147 		return RTE_ETH_SPEED_NUM_100G;
148 	case RTE_ETH_LINK_SPEED_200G:
149 		return RTE_ETH_SPEED_NUM_200G;
150 	case RTE_ETH_LINK_SPEED_400G:
151 		return RTE_ETH_SPEED_NUM_400G;
152 	default:
153 		return RTE_ETH_SPEED_NUM_NONE;
154 	}
155 }
156 
157 static int
158 nfp_net_nfp4000_speed_configure_check(uint16_t port_id,
159 		uint32_t configure_speed,
160 		struct nfp_eth_table *nfp_eth_table)
161 {
162 	switch (port_id) {
163 	case 0:
164 		if (configure_speed == RTE_ETH_SPEED_NUM_25G &&
165 				nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) {
166 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
167 			return -ENOTSUP;
168 		}
169 		break;
170 	case 1:
171 		if (configure_speed == RTE_ETH_SPEED_NUM_10G &&
172 				nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) {
173 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
174 			return -ENOTSUP;
175 		}
176 		break;
177 	default:
178 		PMD_DRV_LOG(ERR, "The port id is invalid.");
179 		return -EINVAL;
180 	}
181 
182 	return 0;
183 }
184 
185 static int
186 nfp_net_speed_configure(struct rte_eth_dev *dev,
187 		struct nfp_net_hw *net_hw)
188 {
189 	int ret;
190 	uint32_t speed_capa;
191 	struct nfp_nsp *nsp;
192 	uint32_t link_speeds;
193 	uint32_t configure_speed;
194 	struct nfp_eth_table_port *eth_port;
195 	struct nfp_eth_table *nfp_eth_table;
196 
197 	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
198 	eth_port = &nfp_eth_table->ports[net_hw->idx];
199 
200 	speed_capa = net_hw->pf_dev->speed_capa;
201 	if (speed_capa == 0) {
202 		PMD_DRV_LOG(ERR, "Speed_capa is invalid.");
203 		return -EINVAL;
204 	}
205 
206 	link_speeds = dev->data->dev_conf.link_speeds;
207 	configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds);
208 	if (configure_speed == RTE_ETH_SPEED_NUM_NONE &&
209 			link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
210 		PMD_DRV_LOG(ERR, "Configured speed is invalid.");
211 		return -EINVAL;
212 	}
213 
214 	/* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */
215 	if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) {
216 		ret = nfp_net_nfp4000_speed_configure_check(net_hw->idx,
217 				configure_speed, nfp_eth_table);
218 		if (ret != 0) {
219 			PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000.");
220 			return ret;
221 		}
222 	}
223 
224 	nsp = nfp_eth_config_start(net_hw->cpp, eth_port->index);
225 	if (nsp == NULL) {
226 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
227 		return -EIO;
228 	}
229 
230 	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
231 		if (eth_port->supp_aneg) {
232 			ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO);
233 			if (ret != 0) {
234 				PMD_DRV_LOG(ERR, "Failed to set ANEG enable.");
235 				goto config_cleanup;
236 			}
237 		}
238 	} else {
239 		ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED);
240 		if (ret != 0) {
241 			PMD_DRV_LOG(ERR, "Failed to set ANEG disable.");
242 			goto config_cleanup;
243 		}
244 
245 		ret = nfp_eth_set_speed(nsp, configure_speed);
246 		if (ret != 0) {
247 			PMD_DRV_LOG(ERR, "Failed to set speed.");
248 			goto config_cleanup;
249 		}
250 	}
251 
252 	return nfp_eth_config_commit_end(nsp);
253 
254 config_cleanup:
255 	nfp_eth_config_cleanup_end(nsp);
256 
257 	return ret;
258 }
259 
260 static int
261 nfp_net_start(struct rte_eth_dev *dev)
262 {
263 	int ret;
264 	uint16_t i;
265 	struct nfp_hw *hw;
266 	uint32_t new_ctrl;
267 	struct nfp_cpp *cpp;
268 	uint32_t update = 0;
269 	uint32_t cap_extend;
270 	uint32_t intr_vector;
271 	uint32_t ctrl_extend = 0;
272 	struct nfp_net_hw *net_hw;
273 	struct nfp_pf_dev *pf_dev;
274 	struct rte_eth_rxmode *rxmode;
275 	struct nfp_app_fw_nic *app_fw_nic;
276 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
277 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
278 
279 	net_hw = dev->data->dev_private;
280 	pf_dev = net_hw->pf_dev;
281 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
282 	hw = &net_hw->super;
283 
284 	/* Disabling queues just in case... */
285 	nfp_net_disable_queues(dev);
286 
287 	/* Enabling the required queues in the device */
288 	nfp_net_enable_queues(dev);
289 
290 	/* Configure the port speed and the auto-negotiation mode. */
291 	ret = nfp_net_speed_configure(dev, net_hw);
292 	if (ret < 0) {
293 		PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode.");
294 		return ret;
295 	}
296 
297 	/* Check and configure queue intr-vector mapping */
298 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
299 		if (app_fw_nic->multiport) {
300 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
301 					"with NFP multiport PF");
302 				return -EINVAL;
303 		}
304 
305 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
306 			/*
307 			 * Better not to share LSC with RX interrupts.
308 			 * Unregistering LSC interrupt handler.
309 			 */
310 			rte_intr_callback_unregister(intr_handle,
311 					nfp_net_dev_interrupt_handler, (void *)dev);
312 
313 			if (dev->data->nb_rx_queues > 1) {
314 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
315 						"supports 1 queue with UIO");
316 				return -EIO;
317 			}
318 		}
319 
320 		intr_vector = dev->data->nb_rx_queues;
321 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
322 			return -1;
323 
324 		nfp_configure_rx_interrupt(dev, intr_handle);
325 		update = NFP_NET_CFG_UPDATE_MSIX;
326 	}
327 
328 	/* Checking MTU set */
329 	if (dev->data->mtu > net_hw->flbufsz) {
330 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
331 				dev->data->mtu, net_hw->flbufsz);
332 		return -ERANGE;
333 	}
334 
335 	rte_intr_enable(intr_handle);
336 
337 	new_ctrl = nfp_check_offloads(dev);
338 
339 	/* Writing configuration parameters in the device */
340 	nfp_net_params_setup(net_hw);
341 
342 	rxmode = &dev->data->dev_conf.rxmode;
343 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
344 		nfp_net_rss_config_default(dev);
345 		update |= NFP_NET_CFG_UPDATE_RSS;
346 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
347 	}
348 
349 	/* Enable device */
350 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
351 
352 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
353 
354 	/* Enable vxlan */
355 	if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) {
356 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
357 		update |= NFP_NET_CFG_UPDATE_VXLAN;
358 	}
359 
360 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
361 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
362 
363 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
364 		return -EIO;
365 
366 	hw->ctrl = new_ctrl;
367 
368 	/* Enable packet type offload by extend ctrl word1. */
369 	cap_extend = hw->cap_ext;
370 	if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
371 		ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE;
372 
373 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0)
374 		ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC |
375 				NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP |
376 				NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
377 
378 	/* Enable flow steer by extend ctrl word1. */
379 	if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
380 		ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER;
381 
382 	update = NFP_NET_CFG_UPDATE_GEN;
383 	if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0)
384 		return -EIO;
385 
386 	hw->ctrl_ext = ctrl_extend;
387 
388 	/*
389 	 * Allocating rte mbufs for configured rx queues.
390 	 * This requires queues being enabled before.
391 	 */
392 	if (nfp_net_rx_freelist_setup(dev) != 0) {
393 		ret = -ENOMEM;
394 		goto error;
395 	}
396 
397 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
398 		cpp = net_hw->cpp;
399 	else
400 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
401 
402 	/* Configure the physical port up */
403 	nfp_eth_set_configured(cpp, net_hw->nfp_idx, 1);
404 
405 	for (i = 0; i < dev->data->nb_rx_queues; i++)
406 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
407 	for (i = 0; i < dev->data->nb_tx_queues; i++)
408 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
409 
410 	return 0;
411 
412 error:
413 	/*
414 	 * An error returned by this function should mean the app
415 	 * exiting and then the system releasing all the memory
416 	 * allocated even memory coming from hugepages.
417 	 *
418 	 * The device could be enabled at this point with some queues
419 	 * ready for getting packets. This is true if the call to
420 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
421 	 * fails for subsequent queues.
422 	 *
423 	 * This should make the app exiting but better if we tell the
424 	 * device first.
425 	 */
426 	nfp_net_disable_queues(dev);
427 
428 	return ret;
429 }
430 
431 /* Set the link up. */
432 static int
433 nfp_net_set_link_up(struct rte_eth_dev *dev)
434 {
435 	struct nfp_cpp *cpp;
436 	struct nfp_net_hw *hw;
437 
438 	hw = dev->data->dev_private;
439 
440 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
441 		cpp = hw->cpp;
442 	else
443 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
444 
445 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 1);
446 }
447 
448 /* Set the link down. */
449 static int
450 nfp_net_set_link_down(struct rte_eth_dev *dev)
451 {
452 	struct nfp_cpp *cpp;
453 	struct nfp_net_hw *hw;
454 
455 	hw = dev->data->dev_private;
456 
457 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
458 		cpp = hw->cpp;
459 	else
460 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
461 
462 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 0);
463 }
464 
465 static uint8_t
466 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
467 		uint8_t phy_port)
468 {
469 	if (pf_dev->multi_pf.enabled)
470 		return pf_dev->multi_pf.function_id;
471 
472 	return phy_port;
473 }
474 
475 static void
476 nfp_net_beat_timer(void *arg)
477 {
478 	uint64_t cur_sec;
479 	struct nfp_multi_pf *multi_pf = arg;
480 
481 	cur_sec = rte_rdtsc();
482 	nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id));
483 
484 	/* Beat once per second. */
485 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
486 			(void *)multi_pf) < 0) {
487 		PMD_DRV_LOG(ERR, "Error setting alarm");
488 	}
489 }
490 
491 static int
492 nfp_net_keepalive_init(struct nfp_cpp *cpp,
493 		struct nfp_multi_pf *multi_pf)
494 {
495 	uint8_t *base;
496 	uint64_t addr;
497 	uint32_t size;
498 	uint32_t cpp_id;
499 	struct nfp_resource *res;
500 
501 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE);
502 	if (res == NULL)
503 		return -EIO;
504 
505 	cpp_id = nfp_resource_cpp_id(res);
506 	addr = nfp_resource_address(res);
507 	size = nfp_resource_size(res);
508 
509 	nfp_resource_release(res);
510 
511 	/* Allocate a fixed area for keepalive. */
512 	base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area);
513 	if (base == NULL) {
514 		PMD_DRV_LOG(ERR, "Failed to map area for keepalive.");
515 		return -EIO;
516 	}
517 
518 	multi_pf->beat_addr = base;
519 
520 	return 0;
521 }
522 
523 static void
524 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf)
525 {
526 	nfp_cpp_area_release_free(multi_pf->beat_area);
527 }
528 
529 static int
530 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf)
531 {
532 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
533 			(void *)multi_pf) < 0) {
534 		PMD_DRV_LOG(ERR, "Error setting alarm");
535 		return -EIO;
536 	}
537 
538 	return 0;
539 }
540 
541 static void
542 nfp_net_keepalive_clear(uint8_t *beat_addr,
543 		uint8_t function_id)
544 {
545 	nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id));
546 }
547 
548 static void
549 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info,
550 		struct nfp_multi_pf *multi_pf)
551 {
552 	uint8_t port_num;
553 
554 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
555 		if (port_num == multi_pf->function_id)
556 			continue;
557 
558 		nfp_net_keepalive_clear(multi_pf->beat_addr, port_num);
559 	}
560 }
561 
562 static void
563 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf)
564 {
565 	/* Cancel keepalive for multiple PF setup */
566 	rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf);
567 }
568 
569 static void
570 nfp_net_uninit(struct rte_eth_dev *eth_dev)
571 {
572 	struct nfp_net_hw *net_hw;
573 
574 	net_hw = eth_dev->data->dev_private;
575 
576 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
577 		nfp_net_flow_priv_uninit(net_hw->pf_dev, net_hw->idx);
578 
579 	rte_free(net_hw->eth_xstats_base);
580 	nfp_ipsec_uninit(eth_dev);
581 	if (net_hw->mac_stats_area != NULL)
582 		nfp_cpp_area_release_free(net_hw->mac_stats_area);
583 }
584 
585 static void
586 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev,
587 		uint8_t id)
588 {
589 	struct rte_eth_dev *eth_dev;
590 	struct nfp_app_fw_nic *app_fw_nic;
591 
592 	app_fw_nic = pf_dev->app_fw_priv;
593 	if (app_fw_nic->ports[id] != NULL) {
594 		eth_dev = app_fw_nic->ports[id]->eth_dev;
595 		if (eth_dev != NULL)
596 			nfp_net_uninit(eth_dev);
597 
598 		app_fw_nic->ports[id] = NULL;
599 	}
600 }
601 
602 static void
603 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
604 {
605 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
606 	rte_free(pf_dev->app_fw_priv);
607 }
608 
609 void
610 nfp_pf_uninit(struct nfp_pf_dev *pf_dev)
611 {
612 	nfp_cpp_area_release_free(pf_dev->qc_area);
613 	free(pf_dev->sym_tbl);
614 	if (pf_dev->multi_pf.enabled) {
615 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
616 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
617 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
618 	}
619 	free(pf_dev->nfp_eth_table);
620 	free(pf_dev->hwinfo);
621 	nfp_cpp_free(pf_dev->cpp);
622 	nfp_sync_free(pf_dev->sync);
623 	rte_free(pf_dev);
624 }
625 
626 static int
627 nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev)
628 {
629 	free(pf_dev->sym_tbl);
630 	nfp_cpp_free(pf_dev->cpp);
631 	nfp_sync_free(pf_dev->sync);
632 	rte_free(pf_dev);
633 
634 	return 0;
635 }
636 
637 /* Reset and stop device. The device can not be restarted. */
638 static int
639 nfp_net_close(struct rte_eth_dev *dev)
640 {
641 	uint8_t i;
642 	uint8_t id;
643 	struct nfp_net_hw *hw;
644 	struct nfp_pf_dev *pf_dev;
645 	struct rte_pci_device *pci_dev;
646 	struct nfp_app_fw_nic *app_fw_nic;
647 
648 	/*
649 	 * In secondary process, a released eth device can be found by its name
650 	 * in shared memory.
651 	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
652 	 * eth device has been released.
653 	 */
654 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
655 		if (dev->state == RTE_ETH_DEV_UNUSED)
656 			return 0;
657 
658 		nfp_pf_secondary_uninit(dev->process_private);
659 		return 0;
660 	}
661 
662 	hw = dev->data->dev_private;
663 	pf_dev = hw->pf_dev;
664 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
665 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
666 
667 	/*
668 	 * We assume that the DPDK application is stopping all the
669 	 * threads/queues before calling the device close function.
670 	 */
671 	nfp_net_disable_queues(dev);
672 
673 	/* Clear queues */
674 	nfp_net_close_tx_queue(dev);
675 	nfp_net_close_rx_queue(dev);
676 
677 	/* Cancel possible impending LSC work here before releasing the port */
678 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
679 
680 	/* Only free PF resources after all physical ports have been closed */
681 	/* Mark this port as unused and free device priv resources */
682 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
683 
684 	if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC)
685 		return -EINVAL;
686 
687 	nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx);
688 
689 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
690 		id = nfp_function_id_get(pf_dev, i);
691 
692 		/* Check to see if ports are still in use */
693 		if (app_fw_nic->ports[id] != NULL)
694 			return 0;
695 	}
696 
697 	/* Enable in nfp_net_start() */
698 	rte_intr_disable(pci_dev->intr_handle);
699 
700 	/* Register in nfp_net_init() */
701 	rte_intr_callback_unregister(pci_dev->intr_handle,
702 			nfp_net_dev_interrupt_handler, (void *)dev);
703 
704 	nfp_uninit_app_fw_nic(pf_dev);
705 	nfp_pf_uninit(pf_dev);
706 
707 	return 0;
708 }
709 
710 static int
711 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
712 		uint16_t port,
713 		uint32_t *idx)
714 {
715 	uint32_t i;
716 	int free_idx = -1;
717 
718 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
719 		if (hw->vxlan_ports[i] == port) {
720 			free_idx = i;
721 			break;
722 		}
723 
724 		if (hw->vxlan_usecnt[i] == 0) {
725 			free_idx = i;
726 			break;
727 		}
728 	}
729 
730 	if (free_idx == -1)
731 		return -EINVAL;
732 
733 	*idx = free_idx;
734 
735 	return 0;
736 }
737 
738 static int
739 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
740 		struct rte_eth_udp_tunnel *tunnel_udp)
741 {
742 	int ret;
743 	uint32_t idx;
744 	uint16_t vxlan_port;
745 	struct nfp_net_hw *hw;
746 	enum rte_eth_tunnel_type tnl_type;
747 
748 	hw = dev->data->dev_private;
749 	vxlan_port = tunnel_udp->udp_port;
750 	tnl_type   = tunnel_udp->prot_type;
751 
752 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
753 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
754 		return -ENOTSUP;
755 	}
756 
757 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
758 	if (ret != 0) {
759 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
760 		return -EINVAL;
761 	}
762 
763 	if (hw->vxlan_usecnt[idx] == 0) {
764 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
765 		if (ret != 0) {
766 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
767 			return -EINVAL;
768 		}
769 	}
770 
771 	hw->vxlan_usecnt[idx]++;
772 
773 	return 0;
774 }
775 
776 static int
777 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
778 		struct rte_eth_udp_tunnel *tunnel_udp)
779 {
780 	int ret;
781 	uint32_t idx;
782 	uint16_t vxlan_port;
783 	struct nfp_net_hw *hw;
784 	enum rte_eth_tunnel_type tnl_type;
785 
786 	hw = dev->data->dev_private;
787 	vxlan_port = tunnel_udp->udp_port;
788 	tnl_type   = tunnel_udp->prot_type;
789 
790 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
791 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
792 		return -ENOTSUP;
793 	}
794 
795 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
796 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
797 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
798 		return -EINVAL;
799 	}
800 
801 	hw->vxlan_usecnt[idx]--;
802 
803 	if (hw->vxlan_usecnt[idx] == 0) {
804 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
805 		if (ret != 0) {
806 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
807 			return -EINVAL;
808 		}
809 	}
810 
811 	return 0;
812 }
813 
814 /* Initialise and register driver with DPDK Application */
815 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
816 	.dev_configure          = nfp_net_configure,
817 	.dev_start              = nfp_net_start,
818 	.dev_stop               = nfp_net_stop,
819 	.dev_set_link_up        = nfp_net_set_link_up,
820 	.dev_set_link_down      = nfp_net_set_link_down,
821 	.dev_close              = nfp_net_close,
822 	.promiscuous_enable     = nfp_net_promisc_enable,
823 	.promiscuous_disable    = nfp_net_promisc_disable,
824 	.allmulticast_enable    = nfp_net_allmulticast_enable,
825 	.allmulticast_disable   = nfp_net_allmulticast_disable,
826 	.link_update            = nfp_net_link_update,
827 	.stats_get              = nfp_net_stats_get,
828 	.stats_reset            = nfp_net_stats_reset,
829 	.xstats_get             = nfp_net_xstats_get,
830 	.xstats_reset           = nfp_net_xstats_reset,
831 	.xstats_get_names       = nfp_net_xstats_get_names,
832 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
833 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
834 	.dev_infos_get          = nfp_net_infos_get,
835 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
836 	.mtu_set                = nfp_net_dev_mtu_set,
837 	.mac_addr_set           = nfp_net_set_mac_addr,
838 	.vlan_offload_set       = nfp_net_vlan_offload_set,
839 	.reta_update            = nfp_net_reta_update,
840 	.reta_query             = nfp_net_reta_query,
841 	.rss_hash_update        = nfp_net_rss_hash_update,
842 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
843 	.rx_queue_setup         = nfp_net_rx_queue_setup,
844 	.rx_queue_release       = nfp_net_rx_queue_release,
845 	.tx_queue_setup         = nfp_net_tx_queue_setup,
846 	.tx_queue_release       = nfp_net_tx_queue_release,
847 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
848 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
849 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
850 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
851 	.fw_version_get         = nfp_net_firmware_version_get,
852 	.flow_ctrl_get          = nfp_net_flow_ctrl_get,
853 	.flow_ctrl_set          = nfp_net_flow_ctrl_set,
854 	.flow_ops_get           = nfp_net_flow_ops_get,
855 	.fec_get_capability     = nfp_net_fec_get_capability,
856 	.fec_get                = nfp_net_fec_get,
857 	.fec_set                = nfp_net_fec_set,
858 };
859 
860 static inline void
861 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw,
862 		struct rte_eth_dev *eth_dev)
863 {
864 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
865 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
866 	else
867 		eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts;
868 
869 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
870 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
871 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
872 }
873 
874 static int
875 nfp_net_init(struct rte_eth_dev *eth_dev)
876 {
877 	int err;
878 	uint16_t port;
879 	uint64_t rx_base;
880 	uint64_t tx_base;
881 	struct nfp_hw *hw;
882 	struct nfp_net_hw *net_hw;
883 	struct nfp_pf_dev *pf_dev;
884 	struct rte_pci_device *pci_dev;
885 	struct nfp_app_fw_nic *app_fw_nic;
886 
887 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
888 	net_hw = eth_dev->data->dev_private;
889 
890 	/* Use backpointer here to the PF of this eth_dev */
891 	pf_dev = net_hw->pf_dev;
892 
893 	/* Use backpointer to the CoreNIC app struct */
894 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
895 
896 	port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
897 	if (port > 7) {
898 		PMD_DRV_LOG(ERR, "Port value is wrong");
899 		return -ENODEV;
900 	}
901 
902 	hw = &net_hw->super;
903 
904 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
905 			"NFP internal port number: %d", port, net_hw->nfp_idx);
906 
907 	rte_eth_copy_pci_info(eth_dev, pci_dev);
908 
909 	if (port == 0 || pf_dev->multi_pf.enabled) {
910 		uint32_t min_size;
911 
912 		hw->ctrl_bar = pf_dev->ctrl_bar;
913 		min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index;
914 		net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats",
915 				min_size, &net_hw->mac_stats_area);
916 		if (net_hw->mac_stats_bar == NULL) {
917 			PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
918 			return -EIO;
919 		}
920 
921 		net_hw->mac_stats = net_hw->mac_stats_bar;
922 	} else {
923 		/* Use port offset in pf ctrl_bar for this ports control bar */
924 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
925 		net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar +
926 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
927 	}
928 
929 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
930 	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
931 
932 	err = nfp_net_common_init(pci_dev, net_hw);
933 	if (err != 0)
934 		goto free_area;
935 
936 	err = nfp_net_tlv_caps_parse(eth_dev);
937 	if (err != 0) {
938 		PMD_INIT_LOG(ERR, "Failed to parser TLV caps");
939 		return err;
940 		goto free_area;
941 	}
942 
943 	err = nfp_ipsec_init(eth_dev);
944 	if (err != 0) {
945 		PMD_INIT_LOG(ERR, "Failed to init IPsec module");
946 		goto free_area;
947 	}
948 
949 	nfp_net_ethdev_ops_mount(net_hw, eth_dev);
950 
951 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
952 			nfp_net_xstats_size(eth_dev), 0);
953 	if (net_hw->eth_xstats_base == NULL) {
954 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
955 				pci_dev->device.name);
956 		err = -ENOMEM;
957 		goto ipsec_exit;
958 	}
959 
960 	/* Work out where in the BAR the queues start. */
961 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
962 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
963 
964 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
965 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
966 	eth_dev->data->dev_private = net_hw;
967 
968 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
969 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
970 
971 	nfp_net_cfg_queue_setup(net_hw);
972 	net_hw->mtu = RTE_ETHER_MTU;
973 
974 	/* VLAN insertion is incompatible with LSOv2 */
975 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
976 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
977 
978 	nfp_net_log_device_information(net_hw);
979 
980 	/* Initializing spinlock for reconfigs */
981 	rte_spinlock_init(&hw->reconfig_lock);
982 
983 	/* Allocating memory for mac addr */
984 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
985 	if (eth_dev->data->mac_addrs == NULL) {
986 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
987 		err = -ENOMEM;
988 		goto xstats_free;
989 	}
990 
991 	nfp_net_pf_read_mac(app_fw_nic, port);
992 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
993 
994 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
995 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
996 		/* Using random mac addresses for VFs */
997 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
998 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
999 	}
1000 
1001 	/* Copying mac address to DPDK eth_dev struct */
1002 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
1003 
1004 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
1005 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
1006 
1007 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1008 
1009 	PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x "
1010 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
1011 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1012 			pci_dev->id.device_id,
1013 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
1014 
1015 	/* Registering LSC interrupt handler */
1016 	rte_intr_callback_register(pci_dev->intr_handle,
1017 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
1018 	/* Telling the firmware about the LSC interrupt entry */
1019 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1020 	/* Unmask the LSC interrupt */
1021 	nfp_net_irq_unmask(eth_dev);
1022 	/* Recording current stats counters values */
1023 	nfp_net_stats_reset(eth_dev);
1024 
1025 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) {
1026 		err = nfp_net_flow_priv_init(pf_dev, port);
1027 		if (err != 0) {
1028 			PMD_INIT_LOG(ERR, "Init net flow priv failed");
1029 			goto xstats_free;
1030 		}
1031 	}
1032 
1033 	return 0;
1034 
1035 xstats_free:
1036 	rte_free(net_hw->eth_xstats_base);
1037 ipsec_exit:
1038 	nfp_ipsec_uninit(eth_dev);
1039 free_area:
1040 	if (net_hw->mac_stats_area != NULL)
1041 		nfp_cpp_area_release_free(net_hw->mac_stats_area);
1042 
1043 	return err;
1044 }
1045 
1046 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
1047 
1048 static int
1049 nfp_fw_get_name(struct rte_pci_device *dev,
1050 		struct nfp_nsp *nsp,
1051 		char *card,
1052 		char *fw_name,
1053 		size_t fw_size)
1054 {
1055 	char serial[40];
1056 	uint16_t interface;
1057 	uint32_t cpp_serial_len;
1058 	const uint8_t *cpp_serial;
1059 	struct nfp_cpp *cpp = nfp_nsp_cpp(nsp);
1060 
1061 	cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial);
1062 	if (cpp_serial_len != NFP_SERIAL_LEN)
1063 		return -ERANGE;
1064 
1065 	interface = nfp_cpp_interface(cpp);
1066 
1067 	/* Looking for firmware file in order of priority */
1068 
1069 	/* First try to find a firmware image specific for this device */
1070 	snprintf(serial, sizeof(serial),
1071 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
1072 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
1073 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
1074 	snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
1075 
1076 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1077 	if (access(fw_name, F_OK) == 0)
1078 		return 0;
1079 
1080 	/* Then try the PCI name */
1081 	snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH,
1082 			dev->name);
1083 
1084 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1085 	if (access(fw_name, F_OK) == 0)
1086 		return 0;
1087 
1088 	/* Finally try the card type and media */
1089 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card);
1090 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1091 	if (access(fw_name, F_OK) == 0)
1092 		return 0;
1093 
1094 	return -ENOENT;
1095 }
1096 
1097 static int
1098 nfp_fw_upload(struct nfp_nsp *nsp,
1099 		char *fw_name)
1100 {
1101 	int err;
1102 	void *fw_buf;
1103 	size_t fsize;
1104 
1105 	err = rte_firmware_read(fw_name, &fw_buf, &fsize);
1106 	if (err != 0) {
1107 		PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name);
1108 		return -ENOENT;
1109 	}
1110 
1111 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
1112 			fw_name, fsize);
1113 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
1114 	if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) {
1115 		free(fw_buf);
1116 		PMD_DRV_LOG(ERR, "Firmware load failed.");
1117 		return -EIO;
1118 	}
1119 
1120 	PMD_DRV_LOG(INFO, "Done");
1121 
1122 	free(fw_buf);
1123 
1124 	return 0;
1125 }
1126 
1127 static void
1128 nfp_fw_unload(struct nfp_cpp *cpp)
1129 {
1130 	struct nfp_nsp *nsp;
1131 
1132 	nsp = nfp_nsp_open(cpp);
1133 	if (nsp == NULL)
1134 		return;
1135 
1136 	nfp_nsp_device_soft_reset(nsp);
1137 	nfp_nsp_close(nsp);
1138 }
1139 
1140 static int
1141 nfp_fw_check_change(struct nfp_cpp *cpp,
1142 		char *fw_name,
1143 		bool *fw_changed)
1144 {
1145 	int ret;
1146 	struct nfp_net_hw hw;
1147 	uint32_t new_version = 0;
1148 	uint32_t old_version = 0;
1149 
1150 	ret = nfp_elf_get_fw_version(&new_version, fw_name);
1151 	if (ret != 0)
1152 		return ret;
1153 
1154 	hw.cpp = cpp;
1155 	nfp_net_get_fw_version(&hw, &old_version);
1156 
1157 	if (new_version != old_version) {
1158 		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u",
1159 				new_version, old_version);
1160 		*fw_changed = true;
1161 	} else {
1162 		PMD_DRV_LOG(INFO, "FW version is not changed and is %u", new_version);
1163 		*fw_changed = false;
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static int
1170 nfp_fw_reload(struct nfp_nsp *nsp,
1171 		char *fw_name)
1172 {
1173 	int err;
1174 
1175 	nfp_nsp_device_soft_reset(nsp);
1176 	err = nfp_fw_upload(nsp, fw_name);
1177 	if (err != 0)
1178 		PMD_DRV_LOG(ERR, "NFP firmware load failed");
1179 
1180 	return err;
1181 }
1182 
1183 static bool
1184 nfp_fw_skip_load(const struct nfp_dev_info *dev_info,
1185 		struct nfp_multi_pf *multi_pf,
1186 		bool *reload_fw)
1187 {
1188 	uint8_t i;
1189 	uint64_t tmp_beat;
1190 	uint32_t port_num;
1191 	uint8_t in_use = 0;
1192 	uint64_t beat[dev_info->pf_num_per_unit];
1193 	uint32_t offset[dev_info->pf_num_per_unit];
1194 	uint8_t abnormal = dev_info->pf_num_per_unit;
1195 
1196 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1197 		offset[port_num] = NFP_BEAT_OFFSET(port_num);
1198 		beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]);
1199 		if (beat[port_num] == 0)
1200 			abnormal--;
1201 	}
1202 
1203 	if (abnormal == 0)
1204 		return true;
1205 
1206 	for (i = 0; i < 3; i++) {
1207 		sleep(1);
1208 		for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1209 			if (port_num == multi_pf->function_id)
1210 				continue;
1211 
1212 			if (beat[port_num] == 0)
1213 				continue;
1214 
1215 			tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]);
1216 			if (tmp_beat != beat[port_num]) {
1217 				in_use++;
1218 				abnormal--;
1219 				beat[port_num] = 0;
1220 				if (*reload_fw) {
1221 					*reload_fw = false;
1222 					PMD_DRV_LOG(ERR, "The param %s does not work",
1223 							NFP_PF_FORCE_RELOAD_FW);
1224 				}
1225 			}
1226 		}
1227 
1228 		if (abnormal == 0)
1229 			return true;
1230 	}
1231 
1232 	if (in_use != 0) {
1233 		PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.",
1234 				abnormal);
1235 		return true;
1236 	}
1237 
1238 	return false;
1239 }
1240 static int
1241 nfp_fw_reload_for_single_pf(struct nfp_nsp *nsp,
1242 		char *fw_name,
1243 		struct nfp_cpp *cpp,
1244 		bool force_reload_fw)
1245 {
1246 	int ret;
1247 	bool fw_changed = true;
1248 
1249 	if (nfp_nsp_fw_loaded(nsp) && !force_reload_fw) {
1250 		ret = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1251 		if (ret != 0)
1252 			return ret;
1253 	}
1254 
1255 	if (!fw_changed)
1256 		return 0;
1257 
1258 	ret = nfp_fw_reload(nsp, fw_name);
1259 	if (ret != 0)
1260 		return ret;
1261 
1262 	return 0;
1263 }
1264 
1265 static int
1266 nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp,
1267 		char *fw_name,
1268 		struct nfp_cpp *cpp,
1269 		const struct nfp_dev_info *dev_info,
1270 		struct nfp_multi_pf *multi_pf,
1271 		bool force_reload_fw)
1272 {
1273 	int err;
1274 	bool fw_changed = true;
1275 	bool skip_load_fw = false;
1276 	bool reload_fw = force_reload_fw;
1277 
1278 	err = nfp_net_keepalive_init(cpp, multi_pf);
1279 	if (err != 0) {
1280 		PMD_DRV_LOG(ERR, "NFP init beat failed");
1281 		return err;
1282 	}
1283 
1284 	err = nfp_net_keepalive_start(multi_pf);
1285 	if (err != 0) {
1286 		PMD_DRV_LOG(ERR, "NFP write beat failed");
1287 		goto keepalive_uninit;
1288 	}
1289 
1290 	if (nfp_nsp_fw_loaded(nsp) && !reload_fw) {
1291 		err = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1292 		if (err != 0)
1293 			goto keepalive_stop;
1294 	}
1295 
1296 	if (!fw_changed || reload_fw)
1297 		skip_load_fw = nfp_fw_skip_load(dev_info, multi_pf, &reload_fw);
1298 
1299 	if (skip_load_fw && !reload_fw)
1300 		return 0;
1301 
1302 	err = nfp_fw_reload(nsp, fw_name);
1303 	if (err != 0)
1304 		goto keepalive_stop;
1305 
1306 	nfp_net_keepalive_clear_others(dev_info, multi_pf);
1307 
1308 	return 0;
1309 
1310 keepalive_stop:
1311 	nfp_net_keepalive_stop(multi_pf);
1312 keepalive_uninit:
1313 	nfp_net_keepalive_uninit(multi_pf);
1314 
1315 	return err;
1316 }
1317 
1318 static int
1319 nfp_fw_setup(struct rte_pci_device *dev,
1320 		struct nfp_cpp *cpp,
1321 		struct nfp_eth_table *nfp_eth_table,
1322 		struct nfp_hwinfo *hwinfo,
1323 		const struct nfp_dev_info *dev_info,
1324 		struct nfp_multi_pf *multi_pf,
1325 		bool force_reload_fw)
1326 {
1327 	int err;
1328 	char fw_name[125];
1329 	char card_desc[100];
1330 	struct nfp_nsp *nsp;
1331 	const char *nfp_fw_model;
1332 
1333 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
1334 	if (nfp_fw_model == NULL)
1335 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
1336 
1337 	if (nfp_fw_model != NULL) {
1338 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
1339 	} else {
1340 		PMD_DRV_LOG(ERR, "firmware model NOT found");
1341 		return -EIO;
1342 	}
1343 
1344 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
1345 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
1346 				nfp_eth_table->count);
1347 		return -EIO;
1348 	}
1349 
1350 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
1351 			nfp_eth_table->count);
1352 
1353 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
1354 
1355 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
1356 			nfp_fw_model, nfp_eth_table->count,
1357 			nfp_eth_table->ports[0].speed / 1000);
1358 
1359 	nsp = nfp_nsp_open(cpp);
1360 	if (nsp == NULL) {
1361 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1362 		return -EIO;
1363 	}
1364 
1365 	err = nfp_fw_get_name(dev, nsp, card_desc, fw_name, sizeof(fw_name));
1366 	if (err != 0) {
1367 		PMD_DRV_LOG(ERR, "Can't find suitable firmware.");
1368 		nfp_nsp_close(nsp);
1369 		return err;
1370 	}
1371 
1372 	if (multi_pf->enabled)
1373 		err = nfp_fw_reload_for_multi_pf(nsp, fw_name, cpp, dev_info, multi_pf,
1374 				force_reload_fw);
1375 	else
1376 		err = nfp_fw_reload_for_single_pf(nsp, fw_name, cpp, force_reload_fw);
1377 
1378 	nfp_nsp_close(nsp);
1379 	return err;
1380 }
1381 
1382 static inline bool
1383 nfp_check_multi_pf_from_fw(uint32_t total_vnics)
1384 {
1385 	if (total_vnics == 1)
1386 		return true;
1387 
1388 	return false;
1389 }
1390 
1391 static inline bool
1392 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev,
1393 		struct nfp_cpp *cpp)
1394 {
1395 	bool flag;
1396 	struct nfp_nsp *nsp;
1397 
1398 	nsp = nfp_nsp_open(cpp);
1399 	if (nsp == NULL) {
1400 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1401 		return false;
1402 	}
1403 
1404 	flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) &&
1405 			(pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC);
1406 
1407 	nfp_nsp_close(nsp);
1408 	return flag;
1409 }
1410 
1411 static int
1412 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
1413 {
1414 	int err = 0;
1415 	uint64_t tx_base;
1416 	uint8_t *ctrl_bar;
1417 	struct nfp_hw *hw;
1418 	uint32_t cap_extend;
1419 	struct nfp_net_hw net_hw;
1420 	struct nfp_cpp_area *area;
1421 	char name[RTE_ETH_NAME_MAX_LEN];
1422 
1423 	memset(&net_hw, 0, sizeof(struct nfp_net_hw));
1424 
1425 	/* Map the symbol table */
1426 	snprintf(name, sizeof(name), "_pf%u_net_bar0",
1427 			pf_dev->multi_pf.function_id);
1428 	ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, NFP_NET_CFG_BAR_SZ,
1429 			&area);
1430 	if (ctrl_bar == NULL) {
1431 		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol");
1432 		return -ENODEV;
1433 	}
1434 
1435 	hw = &net_hw.super;
1436 	hw->ctrl_bar = ctrl_bar;
1437 
1438 	cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
1439 	if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
1440 		PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF");
1441 		err = -EINVAL;
1442 		goto end;
1443 	}
1444 
1445 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1446 	net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1447 	nfp_net_cfg_queue_setup(&net_hw);
1448 	rte_spinlock_init(&hw->reconfig_lock);
1449 	nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN);
1450 end:
1451 	nfp_cpp_area_release_free(area);
1452 	return err;
1453 }
1454 
1455 static int
1456 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
1457 		const struct nfp_dev_info *dev_info)
1458 {
1459 	uint8_t i;
1460 	uint8_t id;
1461 	int ret = 0;
1462 	uint32_t total_vnics;
1463 	struct nfp_net_hw *hw;
1464 	unsigned int numa_node;
1465 	struct rte_eth_dev *eth_dev;
1466 	struct nfp_app_fw_nic *app_fw_nic;
1467 	struct nfp_eth_table *nfp_eth_table;
1468 	char bar_name[RTE_ETH_NAME_MAX_LEN];
1469 	char port_name[RTE_ETH_NAME_MAX_LEN];
1470 	char vnic_name[RTE_ETH_NAME_MAX_LEN];
1471 
1472 	nfp_eth_table = pf_dev->nfp_eth_table;
1473 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
1474 	id = nfp_function_id_get(pf_dev, 0);
1475 
1476 	/* Allocate memory for the CoreNIC app */
1477 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
1478 	if (app_fw_nic == NULL)
1479 		return -ENOMEM;
1480 
1481 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
1482 	pf_dev->app_fw_priv = app_fw_nic;
1483 
1484 	/* Read the number of vNIC's created for the PF */
1485 	snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id);
1486 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret);
1487 	if (ret != 0 || total_vnics == 0 || total_vnics > 8) {
1488 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name);
1489 		ret = -ENODEV;
1490 		goto app_cleanup;
1491 	}
1492 
1493 	if (pf_dev->multi_pf.enabled) {
1494 		if (!nfp_check_multi_pf_from_fw(total_vnics)) {
1495 			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf");
1496 			ret = -ENODEV;
1497 			goto app_cleanup;
1498 		}
1499 	} else {
1500 		/*
1501 		 * For coreNIC the number of vNICs exposed should be the same as the
1502 		 * number of physical ports.
1503 		 */
1504 		if (total_vnics != nfp_eth_table->count) {
1505 			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
1506 			ret = -ENODEV;
1507 			goto app_cleanup;
1508 		}
1509 	}
1510 
1511 	/* Populate coreNIC app properties */
1512 	app_fw_nic->total_phyports = total_vnics;
1513 	app_fw_nic->pf_dev = pf_dev;
1514 	if (total_vnics > 1)
1515 		app_fw_nic->multiport = true;
1516 
1517 	/* Map the symbol table */
1518 	snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id);
1519 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name,
1520 			app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ,
1521 			&pf_dev->ctrl_area);
1522 	if (pf_dev->ctrl_bar == NULL) {
1523 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name);
1524 		ret = -EIO;
1525 		goto app_cleanup;
1526 	}
1527 
1528 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
1529 
1530 	/* Loop through all physical ports on PF */
1531 	numa_node = rte_socket_id();
1532 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1533 		if (pf_dev->multi_pf.enabled)
1534 			snprintf(port_name, sizeof(port_name), "%s",
1535 					pf_dev->pci_dev->device.name);
1536 		else
1537 			snprintf(port_name, sizeof(port_name), "%s_port%u",
1538 					pf_dev->pci_dev->device.name, i);
1539 
1540 		/* Allocate a eth_dev for this phyport */
1541 		eth_dev = rte_eth_dev_allocate(port_name);
1542 		if (eth_dev == NULL) {
1543 			ret = -ENODEV;
1544 			goto port_cleanup;
1545 		}
1546 
1547 		/* Allocate memory for this phyport */
1548 		eth_dev->data->dev_private = rte_zmalloc_socket(port_name,
1549 				sizeof(struct nfp_net_hw),
1550 				RTE_CACHE_LINE_SIZE, numa_node);
1551 		if (eth_dev->data->dev_private == NULL) {
1552 			ret = -ENOMEM;
1553 			rte_eth_dev_release_port(eth_dev);
1554 			goto port_cleanup;
1555 		}
1556 
1557 		hw = eth_dev->data->dev_private;
1558 		id = nfp_function_id_get(pf_dev, i);
1559 
1560 		/* Add this device to the PF's array of physical ports */
1561 		app_fw_nic->ports[id] = hw;
1562 
1563 		hw->dev_info = dev_info;
1564 		hw->pf_dev = pf_dev;
1565 		hw->cpp = pf_dev->cpp;
1566 		hw->eth_dev = eth_dev;
1567 		hw->idx = id;
1568 		hw->nfp_idx = nfp_eth_table->ports[id].index;
1569 
1570 		eth_dev->device = &pf_dev->pci_dev->device;
1571 
1572 		/*
1573 		 * Ctrl/tx/rx BAR mappings and remaining init happens in
1574 		 * @nfp_net_init()
1575 		 */
1576 		ret = nfp_net_init(eth_dev);
1577 		if (ret != 0) {
1578 			ret = -ENODEV;
1579 			goto port_cleanup;
1580 		}
1581 
1582 		rte_eth_dev_probing_finish(eth_dev);
1583 
1584 	} /* End loop, all ports on this PF */
1585 
1586 	return 0;
1587 
1588 port_cleanup:
1589 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1590 		id = nfp_function_id_get(pf_dev, i);
1591 		hw = app_fw_nic->ports[id];
1592 
1593 		if (hw != NULL && hw->eth_dev != NULL) {
1594 			nfp_net_uninit(hw->eth_dev);
1595 			rte_eth_dev_release_port(hw->eth_dev);
1596 		}
1597 	}
1598 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
1599 app_cleanup:
1600 	rte_free(app_fw_nic);
1601 
1602 	return ret;
1603 }
1604 
1605 static int
1606 nfp_net_hwinfo_set(uint8_t function_id,
1607 		struct nfp_rtsym_table *sym_tbl,
1608 		struct nfp_cpp *cpp)
1609 {
1610 	int ret = 0;
1611 	uint64_t app_cap;
1612 	uint8_t sp_indiff;
1613 	struct nfp_nsp *nsp;
1614 	char hw_info[RTE_ETH_NAME_MAX_LEN];
1615 	char app_cap_name[RTE_ETH_NAME_MAX_LEN];
1616 
1617 	/* Read the app capabilities of the firmware loaded */
1618 	snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id);
1619 	app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret);
1620 	if (ret != 0) {
1621 		PMD_INIT_LOG(ERR, "Couldn't read app_fw_cap from firmware.");
1622 		return ret;
1623 	}
1624 
1625 	/* Calculate the value of sp_indiff and write to hw_info */
1626 	sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF;
1627 	snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff);
1628 
1629 	nsp = nfp_nsp_open(cpp);
1630 	if (nsp == NULL) {
1631 		PMD_INIT_LOG(ERR, "Couldn't get NSP.");
1632 		return -EIO;
1633 	}
1634 
1635 	ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info));
1636 	nfp_nsp_close(nsp);
1637 	if (ret != 0) {
1638 		PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo.");
1639 		return ret;
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
1646 	[NFP_MEDIA_W0_RJ45_10M]     = RTE_ETH_LINK_SPEED_10M,
1647 	[NFP_MEDIA_W0_RJ45_10M_HD]  = RTE_ETH_LINK_SPEED_10M_HD,
1648 	[NFP_MEDIA_W0_RJ45_100M]    = RTE_ETH_LINK_SPEED_100M,
1649 	[NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD,
1650 	[NFP_MEDIA_W0_RJ45_1G]      = RTE_ETH_LINK_SPEED_1G,
1651 	[NFP_MEDIA_W0_RJ45_2P5G]    = RTE_ETH_LINK_SPEED_2_5G,
1652 	[NFP_MEDIA_W0_RJ45_5G]      = RTE_ETH_LINK_SPEED_5G,
1653 	[NFP_MEDIA_W0_RJ45_10G]     = RTE_ETH_LINK_SPEED_10G,
1654 	[NFP_MEDIA_1000BASE_CX]     = RTE_ETH_LINK_SPEED_1G,
1655 	[NFP_MEDIA_1000BASE_KX]     = RTE_ETH_LINK_SPEED_1G,
1656 	[NFP_MEDIA_10GBASE_KX4]     = RTE_ETH_LINK_SPEED_10G,
1657 	[NFP_MEDIA_10GBASE_KR]      = RTE_ETH_LINK_SPEED_10G,
1658 	[NFP_MEDIA_10GBASE_CX4]     = RTE_ETH_LINK_SPEED_10G,
1659 	[NFP_MEDIA_10GBASE_CR]      = RTE_ETH_LINK_SPEED_10G,
1660 	[NFP_MEDIA_10GBASE_SR]      = RTE_ETH_LINK_SPEED_10G,
1661 	[NFP_MEDIA_10GBASE_ER]      = RTE_ETH_LINK_SPEED_10G,
1662 	[NFP_MEDIA_25GBASE_KR]      = RTE_ETH_LINK_SPEED_25G,
1663 	[NFP_MEDIA_25GBASE_KR_S]    = RTE_ETH_LINK_SPEED_25G,
1664 	[NFP_MEDIA_25GBASE_CR]      = RTE_ETH_LINK_SPEED_25G,
1665 	[NFP_MEDIA_25GBASE_CR_S]    = RTE_ETH_LINK_SPEED_25G,
1666 	[NFP_MEDIA_25GBASE_SR]      = RTE_ETH_LINK_SPEED_25G,
1667 	[NFP_MEDIA_40GBASE_CR4]     = RTE_ETH_LINK_SPEED_40G,
1668 	[NFP_MEDIA_40GBASE_KR4]     = RTE_ETH_LINK_SPEED_40G,
1669 	[NFP_MEDIA_40GBASE_SR4]     = RTE_ETH_LINK_SPEED_40G,
1670 	[NFP_MEDIA_40GBASE_LR4]     = RTE_ETH_LINK_SPEED_40G,
1671 	[NFP_MEDIA_50GBASE_KR]      = RTE_ETH_LINK_SPEED_50G,
1672 	[NFP_MEDIA_50GBASE_SR]      = RTE_ETH_LINK_SPEED_50G,
1673 	[NFP_MEDIA_50GBASE_CR]      = RTE_ETH_LINK_SPEED_50G,
1674 	[NFP_MEDIA_50GBASE_LR]      = RTE_ETH_LINK_SPEED_50G,
1675 	[NFP_MEDIA_50GBASE_ER]      = RTE_ETH_LINK_SPEED_50G,
1676 	[NFP_MEDIA_50GBASE_FR]      = RTE_ETH_LINK_SPEED_50G,
1677 	[NFP_MEDIA_100GBASE_KR4]    = RTE_ETH_LINK_SPEED_100G,
1678 	[NFP_MEDIA_100GBASE_SR4]    = RTE_ETH_LINK_SPEED_100G,
1679 	[NFP_MEDIA_100GBASE_CR4]    = RTE_ETH_LINK_SPEED_100G,
1680 	[NFP_MEDIA_100GBASE_KP4]    = RTE_ETH_LINK_SPEED_100G,
1681 	[NFP_MEDIA_100GBASE_CR10]   = RTE_ETH_LINK_SPEED_100G,
1682 	[NFP_MEDIA_10GBASE_LR]      = RTE_ETH_LINK_SPEED_10G,
1683 	[NFP_MEDIA_25GBASE_LR]      = RTE_ETH_LINK_SPEED_25G,
1684 	[NFP_MEDIA_25GBASE_ER]      = RTE_ETH_LINK_SPEED_25G
1685 };
1686 
1687 static int
1688 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf,
1689 		struct nfp_pf_dev *pf_dev)
1690 {
1691 	uint32_t i;
1692 	uint32_t j;
1693 	uint32_t offset;
1694 	uint32_t speed_capa = 0;
1695 	uint64_t supported_modes;
1696 
1697 	for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) {
1698 		supported_modes = media_buf->supported_modes[i];
1699 		offset = i * UINT64_BIT;
1700 		for (j = 0; j < UINT64_BIT; j++) {
1701 			if (supported_modes == 0)
1702 				break;
1703 
1704 			if ((supported_modes & 1) != 0) {
1705 				if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) {
1706 					PMD_DRV_LOG(ERR, "Invalid offset of media table.");
1707 					return -EINVAL;
1708 				}
1709 
1710 				speed_capa |= nfp_eth_media_table[j + offset];
1711 			}
1712 
1713 			supported_modes = supported_modes >> 1;
1714 		}
1715 	}
1716 
1717 	pf_dev->speed_capa = speed_capa;
1718 
1719 	return pf_dev->speed_capa == 0 ? -EINVAL : 0;
1720 }
1721 
1722 static int
1723 nfp_net_speed_capa_get(struct nfp_pf_dev *pf_dev,
1724 		uint32_t port_id)
1725 {
1726 	int ret;
1727 	struct nfp_nsp *nsp;
1728 	struct nfp_eth_media_buf media_buf;
1729 
1730 	media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index;
1731 	pf_dev->speed_capa = 0;
1732 
1733 	nsp = nfp_nsp_open(pf_dev->cpp);
1734 	if (nsp == NULL) {
1735 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
1736 		return -EIO;
1737 	}
1738 
1739 	ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf));
1740 	nfp_nsp_close(nsp);
1741 	if (ret != 0) {
1742 		PMD_DRV_LOG(ERR, "Failed to read media.");
1743 		return ret;
1744 	}
1745 
1746 	ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev);
1747 	if (ret < 0) {
1748 		PMD_DRV_LOG(ERR, "Speed capability is invalid.");
1749 		return ret;
1750 	}
1751 
1752 	return 0;
1753 }
1754 
1755 static int
1756 nfp_pf_init(struct rte_pci_device *pci_dev)
1757 {
1758 	void *sync;
1759 	uint32_t i;
1760 	uint32_t id;
1761 	int ret = 0;
1762 	uint64_t addr;
1763 	uint32_t index;
1764 	uint32_t cpp_id;
1765 	uint8_t function_id;
1766 	struct nfp_cpp *cpp;
1767 	struct nfp_pf_dev *pf_dev;
1768 	struct nfp_hwinfo *hwinfo;
1769 	enum nfp_app_fw_id app_fw_id;
1770 	char name[RTE_ETH_NAME_MAX_LEN];
1771 	struct nfp_rtsym_table *sym_tbl;
1772 	char app_name[RTE_ETH_NAME_MAX_LEN];
1773 	struct nfp_eth_table *nfp_eth_table;
1774 	const struct nfp_dev_info *dev_info;
1775 
1776 	if (pci_dev == NULL)
1777 		return -ENODEV;
1778 
1779 	if (pci_dev->mem_resource[0].addr == NULL) {
1780 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
1781 		return -ENODEV;
1782 	}
1783 
1784 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
1785 	if (dev_info == NULL) {
1786 		PMD_INIT_LOG(ERR, "Not supported device ID");
1787 		return -ENODEV;
1788 	}
1789 
1790 	/* Allocate memory for the PF "device" */
1791 	function_id = (pci_dev->addr.function) & 0x07;
1792 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
1793 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
1794 	if (pf_dev == NULL) {
1795 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
1796 		return -ENOMEM;
1797 	}
1798 
1799 	sync = nfp_sync_alloc();
1800 	if (sync == NULL) {
1801 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
1802 		ret = -ENOMEM;
1803 		goto pf_cleanup;
1804 	}
1805 
1806 	/*
1807 	 * When device bound to UIO, the device could be used, by mistake,
1808 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1809 	 * could lead to a serious problem when configuring the NFP CPP
1810 	 * interface. Here we avoid this telling to the CPP init code to
1811 	 * use a lock file if UIO is being used.
1812 	 */
1813 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1814 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
1815 	else
1816 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
1817 
1818 	if (cpp == NULL) {
1819 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1820 		ret = -EIO;
1821 		goto sync_free;
1822 	}
1823 
1824 	hwinfo = nfp_hwinfo_read(cpp);
1825 	if (hwinfo == NULL) {
1826 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
1827 		ret = -EIO;
1828 		goto cpp_cleanup;
1829 	}
1830 
1831 	/* Read the number of physical ports from hardware */
1832 	nfp_eth_table = nfp_eth_read_ports(cpp);
1833 	if (nfp_eth_table == NULL) {
1834 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
1835 		ret = -EIO;
1836 		goto hwinfo_cleanup;
1837 	}
1838 
1839 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp);
1840 	pf_dev->multi_pf.function_id = function_id;
1841 
1842 	/* Force the physical port down to clear the possible DMA error */
1843 	for (i = 0; i < nfp_eth_table->count; i++) {
1844 		id = nfp_function_id_get(pf_dev, i);
1845 		index = nfp_eth_table->ports[id].index;
1846 		nfp_eth_set_configured(cpp, index, 0);
1847 	}
1848 
1849 	nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs);
1850 
1851 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo,
1852 			dev_info, &pf_dev->multi_pf, pf_dev->devargs.force_reload_fw) != 0) {
1853 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
1854 		ret = -EIO;
1855 		goto eth_table_cleanup;
1856 	}
1857 
1858 	/* Now the symbol table should be there */
1859 	sym_tbl = nfp_rtsym_table_read(cpp);
1860 	if (sym_tbl == NULL) {
1861 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
1862 		ret = -EIO;
1863 		goto fw_cleanup;
1864 	}
1865 
1866 	/* Read the app ID of the firmware loaded */
1867 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
1868 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
1869 	if (ret != 0) {
1870 		PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name);
1871 		ret = -EIO;
1872 		goto sym_tbl_cleanup;
1873 	}
1874 
1875 	/* Write sp_indiff to hw_info */
1876 	ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp);
1877 	if (ret != 0) {
1878 		PMD_INIT_LOG(ERR, "Failed to set hwinfo.");
1879 		ret = -EIO;
1880 		goto sym_tbl_cleanup;
1881 	}
1882 
1883 	/* Populate the newly created PF device */
1884 	pf_dev->app_fw_id = app_fw_id;
1885 	pf_dev->cpp = cpp;
1886 	pf_dev->hwinfo = hwinfo;
1887 	pf_dev->sym_tbl = sym_tbl;
1888 	pf_dev->pci_dev = pci_dev;
1889 	pf_dev->nfp_eth_table = nfp_eth_table;
1890 	pf_dev->sync = sync;
1891 
1892 	/* Get the speed capability */
1893 	for (i = 0; i < nfp_eth_table->count; i++) {
1894 		id = nfp_function_id_get(pf_dev, i);
1895 		ret = nfp_net_speed_capa_get(pf_dev, id);
1896 		if (ret != 0) {
1897 			PMD_INIT_LOG(ERR, "Failed to get speed capability.");
1898 			ret = -EIO;
1899 			goto sym_tbl_cleanup;
1900 		}
1901 	}
1902 
1903 	/* Configure access to tx/rx vNIC BARs */
1904 	addr = nfp_qcp_queue_offset(dev_info, 0);
1905 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
1906 
1907 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
1908 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
1909 	if (pf_dev->qc_bar == NULL) {
1910 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
1911 		ret = -EIO;
1912 		goto sym_tbl_cleanup;
1913 	}
1914 
1915 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
1916 
1917 	/*
1918 	 * PF initialization has been done at this point. Call app specific
1919 	 * init code now.
1920 	 */
1921 	switch (pf_dev->app_fw_id) {
1922 	case NFP_APP_FW_CORE_NIC:
1923 		if (pf_dev->multi_pf.enabled) {
1924 			ret = nfp_enable_multi_pf(pf_dev);
1925 			if (ret != 0)
1926 				goto hwqueues_cleanup;
1927 		}
1928 
1929 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1930 		ret = nfp_init_app_fw_nic(pf_dev, dev_info);
1931 		if (ret != 0) {
1932 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1933 			goto hwqueues_cleanup;
1934 		}
1935 		break;
1936 	case NFP_APP_FW_FLOWER_NIC:
1937 		PMD_INIT_LOG(INFO, "Initializing Flower");
1938 		ret = nfp_init_app_fw_flower(pf_dev, dev_info);
1939 		if (ret != 0) {
1940 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1941 			goto hwqueues_cleanup;
1942 		}
1943 		break;
1944 	default:
1945 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1946 		ret = -EINVAL;
1947 		goto hwqueues_cleanup;
1948 	}
1949 
1950 	/* Register the CPP bridge service here for primary use */
1951 	ret = nfp_enable_cpp_service(pf_dev);
1952 	if (ret != 0)
1953 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1954 
1955 	return 0;
1956 
1957 hwqueues_cleanup:
1958 	nfp_cpp_area_release_free(pf_dev->qc_area);
1959 sym_tbl_cleanup:
1960 	free(sym_tbl);
1961 fw_cleanup:
1962 	nfp_fw_unload(cpp);
1963 	nfp_net_keepalive_stop(&pf_dev->multi_pf);
1964 	nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
1965 	nfp_net_keepalive_uninit(&pf_dev->multi_pf);
1966 eth_table_cleanup:
1967 	free(nfp_eth_table);
1968 hwinfo_cleanup:
1969 	free(hwinfo);
1970 cpp_cleanup:
1971 	nfp_cpp_free(cpp);
1972 sync_free:
1973 	nfp_sync_free(sync);
1974 pf_cleanup:
1975 	rte_free(pf_dev);
1976 
1977 	return ret;
1978 }
1979 
1980 static int
1981 nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
1982 {
1983 	uint32_t i;
1984 	int err = 0;
1985 	int ret = 0;
1986 	uint8_t function_id;
1987 	uint32_t total_vnics;
1988 	struct nfp_net_hw *hw;
1989 	char pf_name[RTE_ETH_NAME_MAX_LEN];
1990 
1991 	/* Read the number of vNIC's created for the PF */
1992 	function_id = (pf_dev->pci_dev->addr.function) & 0x07;
1993 	snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id);
1994 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err);
1995 	if (err != 0 || total_vnics == 0 || total_vnics > 8) {
1996 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name);
1997 		return -ENODEV;
1998 	}
1999 
2000 	for (i = 0; i < total_vnics; i++) {
2001 		struct rte_eth_dev *eth_dev;
2002 		char port_name[RTE_ETH_NAME_MAX_LEN];
2003 
2004 		if (nfp_check_multi_pf_from_fw(total_vnics))
2005 			snprintf(port_name, sizeof(port_name), "%s",
2006 					pf_dev->pci_dev->device.name);
2007 		else
2008 			snprintf(port_name, sizeof(port_name), "%s_port%u",
2009 					pf_dev->pci_dev->device.name, i);
2010 
2011 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
2012 		eth_dev = rte_eth_dev_attach_secondary(port_name);
2013 		if (eth_dev == NULL) {
2014 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
2015 			ret = -ENODEV;
2016 			break;
2017 		}
2018 
2019 		eth_dev->process_private = pf_dev;
2020 		hw = eth_dev->data->dev_private;
2021 		nfp_net_ethdev_ops_mount(hw, eth_dev);
2022 
2023 		rte_eth_dev_probing_finish(eth_dev);
2024 	}
2025 
2026 	return ret;
2027 }
2028 
2029 /*
2030  * When attaching to the NFP4000/6000 PF on a secondary process there
2031  * is no need to initialise the PF again. Only minimal work is required
2032  * here.
2033  */
2034 static int
2035 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
2036 {
2037 	void *sync;
2038 	int ret = 0;
2039 	struct nfp_cpp *cpp;
2040 	uint8_t function_id;
2041 	struct nfp_pf_dev *pf_dev;
2042 	enum nfp_app_fw_id app_fw_id;
2043 	char name[RTE_ETH_NAME_MAX_LEN];
2044 	struct nfp_rtsym_table *sym_tbl;
2045 	const struct nfp_dev_info *dev_info;
2046 	char app_name[RTE_ETH_NAME_MAX_LEN];
2047 
2048 	if (pci_dev == NULL)
2049 		return -ENODEV;
2050 
2051 	if (pci_dev->mem_resource[0].addr == NULL) {
2052 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2053 		return -ENODEV;
2054 	}
2055 
2056 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2057 	if (dev_info == NULL) {
2058 		PMD_INIT_LOG(ERR, "Not supported device ID");
2059 		return -ENODEV;
2060 	}
2061 
2062 	/* Allocate memory for the PF "device" */
2063 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
2064 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2065 	if (pf_dev == NULL) {
2066 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
2067 		return -ENOMEM;
2068 	}
2069 
2070 	sync = nfp_sync_alloc();
2071 	if (sync == NULL) {
2072 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2073 		ret = -ENOMEM;
2074 		goto pf_cleanup;
2075 	}
2076 
2077 	/*
2078 	 * When device bound to UIO, the device could be used, by mistake,
2079 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2080 	 * could lead to a serious problem when configuring the NFP CPP
2081 	 * interface. Here we avoid this telling to the CPP init code to
2082 	 * use a lock file if UIO is being used.
2083 	 */
2084 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2085 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2086 	else
2087 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2088 
2089 	if (cpp == NULL) {
2090 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2091 		ret = -EIO;
2092 		goto sync_free;
2093 	}
2094 
2095 	/*
2096 	 * We don't have access to the PF created in the primary process
2097 	 * here so we have to read the number of ports from firmware.
2098 	 */
2099 	sym_tbl = nfp_rtsym_table_read(cpp);
2100 	if (sym_tbl == NULL) {
2101 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
2102 		ret = -EIO;
2103 		goto sync_free;
2104 	}
2105 
2106 	/* Read the app ID of the firmware loaded */
2107 	function_id = pci_dev->addr.function & 0x7;
2108 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2109 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2110 	if (ret != 0) {
2111 		PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name);
2112 		ret = -EIO;
2113 		goto sym_tbl_cleanup;
2114 	}
2115 
2116 	/* Populate the newly created PF device */
2117 	pf_dev->app_fw_id = app_fw_id;
2118 	pf_dev->cpp = cpp;
2119 	pf_dev->sym_tbl = sym_tbl;
2120 	pf_dev->pci_dev = pci_dev;
2121 	pf_dev->sync = sync;
2122 
2123 	/* Call app specific init code now */
2124 	switch (app_fw_id) {
2125 	case NFP_APP_FW_CORE_NIC:
2126 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
2127 		ret = nfp_secondary_init_app_fw_nic(pf_dev);
2128 		if (ret != 0) {
2129 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2130 			goto sym_tbl_cleanup;
2131 		}
2132 		break;
2133 	case NFP_APP_FW_FLOWER_NIC:
2134 		PMD_INIT_LOG(INFO, "Initializing Flower");
2135 		ret = nfp_secondary_init_app_fw_flower(pf_dev);
2136 		if (ret != 0) {
2137 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2138 			goto sym_tbl_cleanup;
2139 		}
2140 		break;
2141 	default:
2142 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
2143 		ret = -EINVAL;
2144 		goto sym_tbl_cleanup;
2145 	}
2146 
2147 	return 0;
2148 
2149 sym_tbl_cleanup:
2150 	free(sym_tbl);
2151 sync_free:
2152 	nfp_sync_free(sync);
2153 pf_cleanup:
2154 	rte_free(pf_dev);
2155 
2156 	return ret;
2157 }
2158 
2159 static int
2160 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2161 		struct rte_pci_device *dev)
2162 {
2163 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2164 		return nfp_pf_init(dev);
2165 	else
2166 		return nfp_pf_secondary_init(dev);
2167 }
2168 
2169 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2170 	{
2171 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2172 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2173 	},
2174 	{
2175 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2176 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2177 	},
2178 	{
2179 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2180 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2181 	},
2182 	{
2183 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2184 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2185 	},
2186 	{
2187 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2188 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2189 	},
2190 	{
2191 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2192 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2193 	},
2194 	{
2195 		.vendor_id = 0,
2196 	},
2197 };
2198 
2199 static int
2200 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
2201 {
2202 	uint16_t port_id;
2203 	struct rte_pci_device *pci_dev;
2204 
2205 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2206 
2207 	/* Free up all physical ports under PF */
2208 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2209 		rte_eth_dev_close(port_id);
2210 	/*
2211 	 * Ports can be closed and freed but hotplugging is not
2212 	 * currently supported.
2213 	 */
2214 	return -ENOTSUP;
2215 }
2216 
2217 static int
2218 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2219 {
2220 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
2221 }
2222 
2223 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2224 	.id_table = pci_id_nfp_pf_net_map,
2225 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2226 	.probe = nfp_pf_pci_probe,
2227 	.remove = eth_nfp_pci_remove,
2228 };
2229 
2230 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd);
2231 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map);
2232 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
2233 RTE_PMD_REGISTER_PARAM_STRING(NFP_PF_DRIVER_NAME, NFP_PF_FORCE_RELOAD_FW "=<0|1>");
2234