xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision 4dcbf32ffefd84dbb5924de3b2c6dd517f7809c8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <unistd.h>
9 
10 #include <eal_firmware.h>
11 #include <rte_alarm.h>
12 #include <rte_kvargs.h>
13 #include <rte_pci.h>
14 
15 #include "flower/nfp_flower.h"
16 #include "nfd3/nfp_nfd3.h"
17 #include "nfdk/nfp_nfdk.h"
18 #include "nfpcore/nfp_cpp.h"
19 #include "nfpcore/nfp_elf.h"
20 #include "nfpcore/nfp_hwinfo.h"
21 #include "nfpcore/nfp_rtsym.h"
22 #include "nfpcore/nfp_nsp.h"
23 #include "nfpcore/nfp6000_pcie.h"
24 #include "nfpcore/nfp_resource.h"
25 #include "nfpcore/nfp_sync.h"
26 
27 #include "nfp_cpp_bridge.h"
28 #include "nfp_ipsec.h"
29 #include "nfp_logs.h"
30 #include "nfp_net_flow.h"
31 #include "nfp_rxtx_vec.h"
32 
33 /* 64-bit per app capabilities */
34 #define NFP_NET_APP_CAP_SP_INDIFF       RTE_BIT64(0) /* Indifferent to port speed */
35 
36 #define NFP_PF_DRIVER_NAME net_nfp_pf
37 #define NFP_PF_FORCE_RELOAD_FW   "force_reload_fw"
38 #define NFP_CPP_SERVICE_ENABLE   "cpp_service_enable"
39 #define NFP_QUEUE_PER_VF     1
40 
41 struct nfp_net_init {
42 	/** Sequential physical port number, only valid for CoreNIC firmware */
43 	uint8_t idx;
44 
45 	/** Internal port number as seen from NFP */
46 	uint8_t nfp_idx;
47 
48 	struct nfp_net_hw_priv *hw_priv;
49 };
50 
51 static int
52 nfp_devarg_handle_int(const char *key,
53 		const char *value,
54 		void *extra_args)
55 {
56 	char *end_ptr;
57 	uint64_t *num = extra_args;
58 
59 	if (value == NULL)
60 		return -EPERM;
61 
62 	*num = strtoul(value, &end_ptr, 10);
63 	if (*num == ULONG_MAX) {
64 		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param.", key, value);
65 		return -ERANGE;
66 	} else if (value == end_ptr) {
67 		return -EPERM;
68 	}
69 
70 	return 0;
71 }
72 
73 static int
74 nfp_devarg_parse_bool_para(struct rte_kvargs *kvlist,
75 		const char *key_match,
76 		bool *value_ret)
77 {
78 	int ret;
79 	uint32_t count;
80 	uint64_t value;
81 
82 	count = rte_kvargs_count(kvlist, key_match);
83 	if (count == 0)
84 		return 0;
85 
86 	if (count > 1) {
87 		PMD_DRV_LOG(ERR, "Too much bool arguments: %s.", key_match);
88 		return -EINVAL;
89 	}
90 
91 	ret = rte_kvargs_process(kvlist, key_match, &nfp_devarg_handle_int, &value);
92 	if (ret != 0)
93 		return -EINVAL;
94 
95 	if (value == 1) {
96 		*value_ret = true;
97 	} else if (value == 0) {
98 		*value_ret = false;
99 	} else {
100 		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1.",
101 				key_match);
102 		return -EINVAL;
103 	}
104 
105 	return 0;
106 }
107 
108 static int
109 nfp_devargs_parse(struct nfp_devargs *nfp_devargs_param,
110 		const struct rte_devargs *devargs)
111 {
112 	int ret;
113 	struct rte_kvargs *kvlist;
114 
115 	if (devargs == NULL)
116 		return 0;
117 
118 	kvlist = rte_kvargs_parse(devargs->args, NULL);
119 	if (kvlist == NULL)
120 		return -EINVAL;
121 
122 	ret = nfp_devarg_parse_bool_para(kvlist, NFP_PF_FORCE_RELOAD_FW,
123 			&nfp_devargs_param->force_reload_fw);
124 	if (ret != 0)
125 		goto exit;
126 
127 	ret = nfp_devarg_parse_bool_para(kvlist, NFP_CPP_SERVICE_ENABLE,
128 			&nfp_devargs_param->cpp_service_enable);
129 	if (ret != 0)
130 		goto exit;
131 
132 exit:
133 	rte_kvargs_free(kvlist);
134 
135 	return ret;
136 }
137 
138 static void
139 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
140 		uint16_t port,
141 		struct nfp_net_hw_priv *hw_priv)
142 {
143 	struct nfp_net_hw *hw;
144 	struct nfp_eth_table *nfp_eth_table;
145 
146 	/* Grab a pointer to the correct physical port */
147 	hw = app_fw_nic->ports[port];
148 
149 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
150 
151 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
152 }
153 
154 static uint32_t
155 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap)
156 {
157 	switch (speeds_bitmap) {
158 	case RTE_ETH_LINK_SPEED_10M_HD:
159 		return RTE_ETH_SPEED_NUM_10M;
160 	case RTE_ETH_LINK_SPEED_10M:
161 		return RTE_ETH_SPEED_NUM_10M;
162 	case RTE_ETH_LINK_SPEED_100M_HD:
163 		return RTE_ETH_SPEED_NUM_100M;
164 	case RTE_ETH_LINK_SPEED_100M:
165 		return RTE_ETH_SPEED_NUM_100M;
166 	case RTE_ETH_LINK_SPEED_1G:
167 		return RTE_ETH_SPEED_NUM_1G;
168 	case RTE_ETH_LINK_SPEED_2_5G:
169 		return RTE_ETH_SPEED_NUM_2_5G;
170 	case RTE_ETH_LINK_SPEED_5G:
171 		return RTE_ETH_SPEED_NUM_5G;
172 	case RTE_ETH_LINK_SPEED_10G:
173 		return RTE_ETH_SPEED_NUM_10G;
174 	case RTE_ETH_LINK_SPEED_20G:
175 		return RTE_ETH_SPEED_NUM_20G;
176 	case RTE_ETH_LINK_SPEED_25G:
177 		return RTE_ETH_SPEED_NUM_25G;
178 	case RTE_ETH_LINK_SPEED_40G:
179 		return RTE_ETH_SPEED_NUM_40G;
180 	case RTE_ETH_LINK_SPEED_50G:
181 		return RTE_ETH_SPEED_NUM_50G;
182 	case RTE_ETH_LINK_SPEED_56G:
183 		return RTE_ETH_SPEED_NUM_56G;
184 	case RTE_ETH_LINK_SPEED_100G:
185 		return RTE_ETH_SPEED_NUM_100G;
186 	case RTE_ETH_LINK_SPEED_200G:
187 		return RTE_ETH_SPEED_NUM_200G;
188 	case RTE_ETH_LINK_SPEED_400G:
189 		return RTE_ETH_SPEED_NUM_400G;
190 	default:
191 		return RTE_ETH_SPEED_NUM_NONE;
192 	}
193 }
194 
195 static int
196 nfp_net_nfp4000_speed_configure_check(uint16_t port_id,
197 		uint32_t configure_speed,
198 		struct nfp_eth_table *nfp_eth_table)
199 {
200 	switch (port_id) {
201 	case 0:
202 		if (configure_speed == RTE_ETH_SPEED_NUM_25G &&
203 				nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) {
204 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
205 			return -ENOTSUP;
206 		}
207 		break;
208 	case 1:
209 		if (configure_speed == RTE_ETH_SPEED_NUM_10G &&
210 				nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) {
211 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
212 			return -ENOTSUP;
213 		}
214 		break;
215 	default:
216 		PMD_DRV_LOG(ERR, "The port id is invalid.");
217 		return -EINVAL;
218 	}
219 
220 	return 0;
221 }
222 
223 static int
224 nfp_net_speed_autoneg_set(struct nfp_net_hw_priv *hw_priv,
225 		struct nfp_eth_table_port *eth_port)
226 {
227 	int ret;
228 	struct nfp_nsp *nsp;
229 
230 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
231 	if (nsp == NULL) {
232 		PMD_DRV_LOG(ERR, "Could not get NSP.");
233 		return -EIO;
234 	}
235 
236 	ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO);
237 	if (ret != 0) {
238 		PMD_DRV_LOG(ERR, "Failed to set ANEG enable.");
239 		nfp_eth_config_cleanup_end(nsp);
240 		return ret;
241 	}
242 
243 	return nfp_eth_config_commit_end(nsp);
244 }
245 
246 static int
247 nfp_net_speed_fixed_set(struct nfp_net_hw_priv *hw_priv,
248 		struct nfp_eth_table_port *eth_port,
249 		uint32_t configure_speed)
250 {
251 	int ret;
252 	struct nfp_nsp *nsp;
253 
254 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
255 	if (nsp == NULL) {
256 		PMD_DRV_LOG(ERR, "Could not get NSP.");
257 		return -EIO;
258 	}
259 
260 	ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED);
261 	if (ret != 0) {
262 		PMD_DRV_LOG(ERR, "Failed to set ANEG disable.");
263 		goto config_cleanup;
264 	}
265 
266 	ret = nfp_eth_set_speed(nsp, configure_speed);
267 	if (ret != 0) {
268 		PMD_DRV_LOG(ERR, "Failed to set speed.");
269 		goto config_cleanup;
270 	}
271 
272 	return nfp_eth_config_commit_end(nsp);
273 
274 config_cleanup:
275 	nfp_eth_config_cleanup_end(nsp);
276 
277 	return ret;
278 }
279 
280 static int
281 nfp_net_speed_configure(struct rte_eth_dev *dev)
282 {
283 	int ret;
284 	uint8_t idx;
285 	uint32_t speed_capa;
286 	uint32_t link_speeds;
287 	uint32_t configure_speed;
288 	struct nfp_eth_table_port *eth_port;
289 	struct nfp_eth_table *nfp_eth_table;
290 	struct nfp_net_hw *net_hw = dev->data->dev_private;
291 	struct nfp_net_hw_priv *hw_priv = dev->process_private;
292 
293 	idx = nfp_net_get_idx(dev);
294 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
295 	eth_port = &nfp_eth_table->ports[idx];
296 
297 	speed_capa = hw_priv->pf_dev->speed_capa;
298 	if (speed_capa == 0) {
299 		PMD_DRV_LOG(ERR, "Speed_capa is invalid.");
300 		return -EINVAL;
301 	}
302 
303 	link_speeds = dev->data->dev_conf.link_speeds;
304 	configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds);
305 	if (configure_speed == RTE_ETH_SPEED_NUM_NONE &&
306 			link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
307 		PMD_DRV_LOG(ERR, "Configured speed is invalid.");
308 		return -EINVAL;
309 	}
310 
311 	/* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */
312 	if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) {
313 		ret = nfp_net_nfp4000_speed_configure_check(idx,
314 				configure_speed, nfp_eth_table);
315 		if (ret != 0) {
316 			PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000.");
317 			return ret;
318 		}
319 	}
320 
321 	if (configure_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
322 		if (!eth_port->supp_aneg)
323 			return 0;
324 
325 		if (eth_port->aneg == NFP_ANEG_AUTO)
326 			return 0;
327 
328 		ret = nfp_net_speed_autoneg_set(hw_priv, eth_port);
329 		if (ret != 0) {
330 			PMD_DRV_LOG(ERR, "Failed to set speed autoneg.");
331 			return ret;
332 		}
333 	} else {
334 		if (eth_port->aneg == NFP_ANEG_DISABLED && configure_speed == eth_port->speed)
335 			return 0;
336 
337 		ret = nfp_net_speed_fixed_set(hw_priv, eth_port, configure_speed);
338 		if (ret != 0) {
339 			PMD_DRV_LOG(ERR, "Failed to set speed fixed.");
340 			return ret;
341 		}
342 	}
343 
344 	hw_priv->pf_dev->speed_updated = true;
345 
346 	return 0;
347 }
348 
349 static int
350 nfp_net_start(struct rte_eth_dev *dev)
351 {
352 	int ret;
353 	uint16_t i;
354 	struct nfp_hw *hw;
355 	uint32_t new_ctrl;
356 	uint32_t update = 0;
357 	uint32_t cap_extend;
358 	uint32_t intr_vector;
359 	uint32_t ctrl_extend = 0;
360 	struct nfp_net_hw *net_hw;
361 	struct nfp_pf_dev *pf_dev;
362 	struct rte_eth_rxmode *rxmode;
363 	struct rte_eth_txmode *txmode;
364 	struct nfp_net_hw_priv *hw_priv;
365 	struct nfp_app_fw_nic *app_fw_nic;
366 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
367 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
368 
369 	net_hw = dev->data->dev_private;
370 	hw_priv = dev->process_private;
371 	pf_dev = hw_priv->pf_dev;
372 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
373 	hw = &net_hw->super;
374 
375 	/* Disabling queues just in case... */
376 	nfp_net_disable_queues(dev);
377 
378 	/* Enabling the required queues in the device */
379 	nfp_net_enable_queues(dev);
380 
381 	/* Configure the port speed and the auto-negotiation mode. */
382 	ret = nfp_net_speed_configure(dev);
383 	if (ret < 0) {
384 		PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode.");
385 		return ret;
386 	}
387 
388 	/* Check and configure queue intr-vector mapping */
389 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
390 		if (app_fw_nic->multiport) {
391 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
392 					"with NFP multiport PF.");
393 				return -EINVAL;
394 		}
395 
396 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
397 			/*
398 			 * Better not to share LSC with RX interrupts.
399 			 * Unregistering LSC interrupt handler.
400 			 */
401 			rte_intr_callback_unregister(intr_handle,
402 					nfp_net_dev_interrupt_handler, (void *)dev);
403 
404 			if (dev->data->nb_rx_queues > 1) {
405 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
406 						"supports 1 queue with UIO.");
407 				return -EIO;
408 			}
409 		}
410 
411 		intr_vector = dev->data->nb_rx_queues;
412 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
413 			return -1;
414 
415 		nfp_configure_rx_interrupt(dev, intr_handle);
416 		update = NFP_NET_CFG_UPDATE_MSIX;
417 	}
418 
419 	/* Checking MTU set */
420 	if (dev->data->mtu > net_hw->flbufsz) {
421 		PMD_INIT_LOG(ERR, "MTU (%u) can not be larger than the current NFP_FRAME_SIZE (%u).",
422 				dev->data->mtu, net_hw->flbufsz);
423 		return -ERANGE;
424 	}
425 
426 	rte_intr_enable(intr_handle);
427 
428 	new_ctrl = nfp_check_offloads(dev);
429 
430 	/* Writing configuration parameters in the device */
431 	nfp_net_params_setup(net_hw);
432 
433 	rxmode = &dev->data->dev_conf.rxmode;
434 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) != 0) {
435 		nfp_net_rss_config_default(dev);
436 		update |= NFP_NET_CFG_UPDATE_RSS;
437 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
438 	}
439 
440 	/* Enable device */
441 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
442 
443 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
444 
445 	txmode = &dev->data->dev_conf.txmode;
446 
447 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
448 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
449 
450 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
451 		new_ctrl |= NFP_NET_CFG_CTRL_TXRWB;
452 
453 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
454 		return -EIO;
455 
456 	hw->ctrl = new_ctrl;
457 
458 	/* Enable packet type offload by extend ctrl word1. */
459 	cap_extend = hw->cap_ext;
460 	if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
461 		ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE;
462 
463 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SECURITY) != 0 ||
464 			(txmode->offloads & RTE_ETH_TX_OFFLOAD_SECURITY) != 0) {
465 		if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0)
466 			ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC |
467 					NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP |
468 					NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
469 	}
470 
471 	/* Enable flow steer by extend ctrl word1. */
472 	if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
473 		ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER;
474 
475 	update = NFP_NET_CFG_UPDATE_GEN;
476 	if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0)
477 		return -EIO;
478 
479 	hw->ctrl_ext = ctrl_extend;
480 
481 	/*
482 	 * Allocating rte mbufs for configured rx queues.
483 	 * This requires queues being enabled before.
484 	 */
485 	if (nfp_net_rx_freelist_setup(dev) != 0) {
486 		ret = -ENOMEM;
487 		goto error;
488 	}
489 
490 	/* Configure the physical port up */
491 	ret = nfp_eth_set_configured(pf_dev->cpp, net_hw->nfp_idx, 1);
492 	if (ret < 0)
493 		goto error;
494 
495 	for (i = 0; i < dev->data->nb_rx_queues; i++)
496 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
497 	for (i = 0; i < dev->data->nb_tx_queues; i++)
498 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
499 
500 	return 0;
501 
502 error:
503 	/*
504 	 * An error returned by this function should mean the app
505 	 * exiting and then the system releasing all the memory
506 	 * allocated even memory coming from hugepages.
507 	 *
508 	 * The device could be enabled at this point with some queues
509 	 * ready for getting packets. This is true if the call to
510 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
511 	 * fails for subsequent queues.
512 	 *
513 	 * This should make the app exiting but better if we tell the
514 	 * device first.
515 	 */
516 	nfp_net_disable_queues(dev);
517 
518 	return ret;
519 }
520 
521 /* Set the link up. */
522 static int
523 nfp_net_set_link_up(struct rte_eth_dev *dev)
524 {
525 	int ret;
526 	struct nfp_net_hw *hw;
527 	struct nfp_net_hw_priv *hw_priv;
528 
529 	hw = dev->data->dev_private;
530 	hw_priv = dev->process_private;
531 
532 	ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 1);
533 	if (ret < 0)
534 		return ret;
535 
536 	return 0;
537 }
538 
539 /* Set the link down. */
540 static int
541 nfp_net_set_link_down(struct rte_eth_dev *dev)
542 {
543 	int ret;
544 	struct nfp_net_hw *hw;
545 	struct nfp_net_hw_priv *hw_priv;
546 
547 	hw = dev->data->dev_private;
548 	hw_priv = dev->process_private;
549 
550 	ret = nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0);
551 	if (ret < 0)
552 		return ret;
553 
554 	return 0;
555 }
556 
557 static void
558 nfp_net_beat_timer(void *arg)
559 {
560 	uint64_t cur_sec;
561 	struct nfp_multi_pf *multi_pf = arg;
562 
563 	cur_sec = rte_rdtsc();
564 	nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id));
565 
566 	/* Beat once per second. */
567 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
568 			(void *)multi_pf) < 0) {
569 		PMD_DRV_LOG(ERR, "Error setting alarm.");
570 	}
571 }
572 
573 static int
574 nfp_net_keepalive_init(struct nfp_cpp *cpp,
575 		struct nfp_multi_pf *multi_pf)
576 {
577 	uint8_t *base;
578 	uint64_t addr;
579 	uint32_t size;
580 	uint32_t cpp_id;
581 	struct nfp_resource *res;
582 
583 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE);
584 	if (res == NULL)
585 		return -EIO;
586 
587 	cpp_id = nfp_resource_cpp_id(res);
588 	addr = nfp_resource_address(res);
589 	size = nfp_resource_size(res);
590 
591 	nfp_resource_release(res);
592 
593 	/* Allocate a fixed area for keepalive. */
594 	base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area);
595 	if (base == NULL) {
596 		PMD_DRV_LOG(ERR, "Failed to map area for keepalive.");
597 		return -EIO;
598 	}
599 
600 	multi_pf->beat_addr = base;
601 
602 	return 0;
603 }
604 
605 static void
606 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf)
607 {
608 	nfp_cpp_area_release_free(multi_pf->beat_area);
609 }
610 
611 static int
612 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf)
613 {
614 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
615 			(void *)multi_pf) < 0) {
616 		PMD_DRV_LOG(ERR, "Error setting alarm.");
617 		return -EIO;
618 	}
619 
620 	return 0;
621 }
622 
623 static void
624 nfp_net_keepalive_clear(uint8_t *beat_addr,
625 		uint8_t function_id)
626 {
627 	nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id));
628 }
629 
630 static void
631 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info,
632 		struct nfp_multi_pf *multi_pf)
633 {
634 	uint8_t port_num;
635 
636 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
637 		if (port_num == multi_pf->function_id)
638 			continue;
639 
640 		nfp_net_keepalive_clear(multi_pf->beat_addr, port_num);
641 	}
642 }
643 
644 static void
645 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf)
646 {
647 	/* Cancel keepalive for multiple PF setup */
648 	rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf);
649 }
650 
651 static int
652 nfp_net_uninit(struct rte_eth_dev *eth_dev)
653 {
654 	struct nfp_net_hw *net_hw;
655 	struct nfp_net_hw_priv *hw_priv;
656 
657 	net_hw = eth_dev->data->dev_private;
658 	hw_priv = eth_dev->process_private;
659 
660 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
661 		nfp_net_flow_priv_uninit(hw_priv->pf_dev, net_hw->idx);
662 
663 	rte_free(net_hw->eth_xstats_base);
664 	if ((net_hw->super.cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
665 		nfp_net_txrwb_free(eth_dev);
666 	nfp_ipsec_uninit(eth_dev);
667 
668 	return 0;
669 }
670 
671 static void
672 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev,
673 		uint8_t id,
674 		struct rte_eth_dev *eth_dev)
675 {
676 	struct nfp_app_fw_nic *app_fw_nic;
677 
678 	app_fw_nic = pf_dev->app_fw_priv;
679 	if (app_fw_nic->ports[id] != NULL) {
680 		nfp_net_uninit(eth_dev);
681 		app_fw_nic->ports[id] = NULL;
682 	}
683 }
684 
685 static void
686 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
687 {
688 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
689 	rte_free(pf_dev->app_fw_priv);
690 }
691 
692 static void
693 nfp_net_vf_config_uninit(struct nfp_pf_dev *pf_dev)
694 {
695 	if (pf_dev->sriov_vf == 0)
696 		return;
697 
698 	nfp_cpp_area_release_free(pf_dev->vf_cfg_tbl_area);
699 	nfp_cpp_area_release_free(pf_dev->vf_area);
700 }
701 
702 void
703 nfp_pf_uninit(struct nfp_net_hw_priv *hw_priv)
704 {
705 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
706 
707 	if (pf_dev->devargs.cpp_service_enable)
708 		nfp_disable_cpp_service(pf_dev);
709 	nfp_net_vf_config_uninit(pf_dev);
710 	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
711 	nfp_cpp_area_release_free(pf_dev->qc_area);
712 	free(pf_dev->sym_tbl);
713 	if (pf_dev->multi_pf.enabled) {
714 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
715 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
716 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
717 	}
718 	free(pf_dev->nfp_eth_table);
719 	free(pf_dev->hwinfo);
720 	nfp_cpp_free(pf_dev->cpp);
721 	nfp_sync_free(pf_dev->sync);
722 	rte_free(pf_dev);
723 	rte_free(hw_priv);
724 }
725 
726 static int
727 nfp_pf_secondary_uninit(struct nfp_net_hw_priv *hw_priv)
728 {
729 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
730 
731 	free(pf_dev->sym_tbl);
732 	nfp_cpp_free(pf_dev->cpp);
733 	nfp_sync_free(pf_dev->sync);
734 	rte_free(pf_dev);
735 	rte_free(hw_priv);
736 
737 	return 0;
738 }
739 
740 /* Reset and stop device. The device can not be restarted. */
741 static int
742 nfp_net_close(struct rte_eth_dev *dev)
743 {
744 	uint8_t i;
745 	uint8_t id;
746 	struct nfp_net_hw *hw;
747 	struct nfp_pf_dev *pf_dev;
748 	struct rte_pci_device *pci_dev;
749 	struct nfp_net_hw_priv *hw_priv;
750 	struct nfp_app_fw_nic *app_fw_nic;
751 
752 	hw_priv = dev->process_private;
753 
754 	/*
755 	 * In secondary process, a released eth device can be found by its name
756 	 * in shared memory.
757 	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
758 	 * eth device has been released.
759 	 */
760 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
761 		if (dev->state == RTE_ETH_DEV_UNUSED)
762 			return 0;
763 
764 		nfp_pf_secondary_uninit(hw_priv);
765 		return 0;
766 	}
767 
768 	hw = dev->data->dev_private;
769 	pf_dev = hw_priv->pf_dev;
770 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
771 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
772 
773 	/*
774 	 * We assume that the DPDK application is stopping all the
775 	 * threads/queues before calling the device close function.
776 	 */
777 	nfp_net_disable_queues(dev);
778 
779 	/* Clear queues */
780 	nfp_net_close_tx_queue(dev);
781 	nfp_net_close_rx_queue(dev);
782 
783 	/* Cancel possible impending LSC work here before releasing the port */
784 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
785 
786 	/* Only free PF resources after all physical ports have been closed */
787 	/* Mark this port as unused and free device priv resources */
788 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
789 
790 	if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC)
791 		return -EINVAL;
792 
793 	nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx, dev);
794 
795 	for (i = 0; i < pf_dev->total_phyports; i++) {
796 		id = nfp_function_id_get(pf_dev, i);
797 
798 		/* Check to see if ports are still in use */
799 		if (app_fw_nic->ports[id] != NULL)
800 			return 0;
801 	}
802 
803 	/* Enable in nfp_net_start() */
804 	rte_intr_disable(pci_dev->intr_handle);
805 
806 	/* Register in nfp_net_init() */
807 	rte_intr_callback_unregister(pci_dev->intr_handle,
808 			nfp_net_dev_interrupt_handler, (void *)dev);
809 
810 	nfp_uninit_app_fw_nic(pf_dev);
811 	nfp_pf_uninit(hw_priv);
812 
813 	return 0;
814 }
815 
816 static int
817 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
818 		uint16_t port,
819 		uint32_t *idx)
820 {
821 	uint32_t i;
822 	int free_idx = -1;
823 
824 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
825 		if (hw->vxlan_ports[i] == port) {
826 			free_idx = i;
827 			break;
828 		}
829 
830 		if (hw->vxlan_usecnt[i] == 0) {
831 			free_idx = i;
832 			break;
833 		}
834 	}
835 
836 	if (free_idx == -1)
837 		return -EINVAL;
838 
839 	*idx = free_idx;
840 
841 	return 0;
842 }
843 
844 static int
845 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
846 		struct rte_eth_udp_tunnel *tunnel_udp)
847 {
848 	int ret;
849 	uint32_t idx;
850 	uint32_t ctrl;
851 	struct nfp_hw *hw;
852 	uint16_t vxlan_port;
853 	struct nfp_net_hw *net_hw;
854 	enum rte_eth_tunnel_type tnl_type;
855 
856 	net_hw = dev->data->dev_private;
857 	vxlan_port = tunnel_udp->udp_port;
858 	tnl_type   = tunnel_udp->prot_type;
859 
860 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
861 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel.");
862 		return -ENOTSUP;
863 	}
864 
865 	ret = nfp_net_find_vxlan_idx(net_hw, vxlan_port, &idx);
866 	if (ret != 0) {
867 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx.");
868 		return -EINVAL;
869 	}
870 
871 	if (net_hw->vxlan_usecnt[idx] == 0) {
872 		hw = &net_hw->super;
873 		ctrl = hw->ctrl | NFP_NET_CFG_CTRL_VXLAN;
874 
875 		ret = nfp_net_set_vxlan_port(net_hw, idx, vxlan_port, ctrl);
876 		if (ret != 0) {
877 			PMD_DRV_LOG(ERR, "Failed set vxlan port.");
878 			return -EINVAL;
879 		}
880 
881 		hw->ctrl = ctrl;
882 	}
883 
884 	net_hw->vxlan_usecnt[idx]++;
885 
886 	return 0;
887 }
888 
889 static int
890 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
891 		struct rte_eth_udp_tunnel *tunnel_udp)
892 {
893 	int ret;
894 	uint32_t idx;
895 	uint32_t ctrl;
896 	struct nfp_hw *hw;
897 	uint16_t vxlan_port;
898 	struct nfp_net_hw *net_hw;
899 	enum rte_eth_tunnel_type tnl_type;
900 
901 	net_hw = dev->data->dev_private;
902 	vxlan_port = tunnel_udp->udp_port;
903 	tnl_type   = tunnel_udp->prot_type;
904 
905 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
906 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel.");
907 		return -ENOTSUP;
908 	}
909 
910 	ret = nfp_net_find_vxlan_idx(net_hw, vxlan_port, &idx);
911 	if (ret != 0 || net_hw->vxlan_usecnt[idx] == 0) {
912 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx.");
913 		return -EINVAL;
914 	}
915 
916 	net_hw->vxlan_usecnt[idx]--;
917 
918 	if (net_hw->vxlan_usecnt[idx] == 0) {
919 		hw = &net_hw->super;
920 		ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_VXLAN;
921 
922 		ret = nfp_net_set_vxlan_port(net_hw, idx, 0, ctrl);
923 		if (ret != 0) {
924 			PMD_DRV_LOG(ERR, "Failed set vxlan port.");
925 			return -EINVAL;
926 		}
927 
928 		hw->ctrl = ctrl;
929 	}
930 
931 	return 0;
932 }
933 
934 /* Initialise and register driver with DPDK Application */
935 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
936 	.dev_configure          = nfp_net_configure,
937 	.dev_start              = nfp_net_start,
938 	.dev_stop               = nfp_net_stop,
939 	.dev_set_link_up        = nfp_net_set_link_up,
940 	.dev_set_link_down      = nfp_net_set_link_down,
941 	.dev_close              = nfp_net_close,
942 	.promiscuous_enable     = nfp_net_promisc_enable,
943 	.promiscuous_disable    = nfp_net_promisc_disable,
944 	.allmulticast_enable    = nfp_net_allmulticast_enable,
945 	.allmulticast_disable   = nfp_net_allmulticast_disable,
946 	.link_update            = nfp_net_link_update,
947 	.stats_get              = nfp_net_stats_get,
948 	.stats_reset            = nfp_net_stats_reset,
949 	.xstats_get             = nfp_net_xstats_get,
950 	.xstats_reset           = nfp_net_xstats_reset,
951 	.xstats_get_names       = nfp_net_xstats_get_names,
952 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
953 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
954 	.dev_infos_get          = nfp_net_infos_get,
955 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
956 	.dev_ptypes_set         = nfp_net_ptypes_set,
957 	.mtu_set                = nfp_net_dev_mtu_set,
958 	.mac_addr_set           = nfp_net_set_mac_addr,
959 	.vlan_offload_set       = nfp_net_vlan_offload_set,
960 	.reta_update            = nfp_net_reta_update,
961 	.reta_query             = nfp_net_reta_query,
962 	.rss_hash_update        = nfp_net_rss_hash_update,
963 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
964 	.rx_queue_setup         = nfp_net_rx_queue_setup,
965 	.rx_queue_release       = nfp_net_rx_queue_release,
966 	.rxq_info_get           = nfp_net_rx_queue_info_get,
967 	.tx_queue_setup         = nfp_net_tx_queue_setup,
968 	.tx_queue_release       = nfp_net_tx_queue_release,
969 	.txq_info_get           = nfp_net_tx_queue_info_get,
970 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
971 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
972 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
973 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
974 	.fw_version_get         = nfp_net_firmware_version_get,
975 	.flow_ctrl_get          = nfp_net_flow_ctrl_get,
976 	.flow_ctrl_set          = nfp_net_flow_ctrl_set,
977 	.flow_ops_get           = nfp_net_flow_ops_get,
978 	.fec_get_capability     = nfp_net_fec_get_capability,
979 	.fec_get                = nfp_net_fec_get,
980 	.fec_set                = nfp_net_fec_set,
981 	.get_eeprom_length      = nfp_net_get_eeprom_len,
982 	.get_eeprom             = nfp_net_get_eeprom,
983 	.set_eeprom             = nfp_net_set_eeprom,
984 	.get_module_info        = nfp_net_get_module_info,
985 	.get_module_eeprom      = nfp_net_get_module_eeprom,
986 	.dev_led_on             = nfp_net_led_on,
987 	.dev_led_off            = nfp_net_led_off,
988 	.rx_burst_mode_get      = nfp_net_rx_burst_mode_get,
989 	.tx_burst_mode_get      = nfp_net_tx_burst_mode_get,
990 };
991 
992 static inline void
993 nfp_net_ethdev_ops_mount(struct nfp_pf_dev *pf_dev,
994 		struct rte_eth_dev *eth_dev)
995 {
996 	if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
997 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
998 	else
999 		nfp_net_nfdk_xmit_pkts_set(eth_dev);
1000 
1001 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
1002 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
1003 	nfp_net_recv_pkts_set(eth_dev);
1004 }
1005 
1006 static int
1007 nfp_net_init(struct rte_eth_dev *eth_dev,
1008 		void *para)
1009 {
1010 	int err;
1011 	uint16_t port;
1012 	uint64_t rx_base;
1013 	uint64_t tx_base;
1014 	struct nfp_hw *hw;
1015 	struct nfp_net_hw *net_hw;
1016 	struct nfp_pf_dev *pf_dev;
1017 	struct nfp_net_init *hw_init;
1018 	struct rte_pci_device *pci_dev;
1019 	struct nfp_net_hw_priv *hw_priv;
1020 	struct nfp_app_fw_nic *app_fw_nic;
1021 
1022 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1023 	net_hw = eth_dev->data->dev_private;
1024 
1025 	hw_init = para;
1026 	net_hw->idx      = hw_init->idx;
1027 	net_hw->nfp_idx  = hw_init->nfp_idx;
1028 	eth_dev->process_private = hw_init->hw_priv;
1029 
1030 	/* Use backpointer here to the PF of this eth_dev */
1031 	hw_priv = eth_dev->process_private;
1032 	pf_dev = hw_priv->pf_dev;
1033 
1034 	/* Use backpointer to the CoreNIC app struct */
1035 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
1036 
1037 	/* Add this device to the PF's array of physical ports */
1038 	app_fw_nic->ports[net_hw->idx] = net_hw;
1039 
1040 	port = net_hw->idx;
1041 	if (port > 7) {
1042 		PMD_DRV_LOG(ERR, "Port value is wrong.");
1043 		return -ENODEV;
1044 	}
1045 
1046 	hw = &net_hw->super;
1047 
1048 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
1049 			"NFP internal port number: %d.", port, net_hw->nfp_idx);
1050 
1051 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1052 
1053 	if (pf_dev->multi_pf.enabled)
1054 		hw->ctrl_bar = pf_dev->ctrl_bar;
1055 	else
1056 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * pf_dev->ctrl_bar_size);
1057 
1058 	net_hw->mac_stats = pf_dev->mac_stats_bar +
1059 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
1060 
1061 	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", hw->ctrl_bar);
1062 	PMD_INIT_LOG(DEBUG, "MAC stats: %p.", net_hw->mac_stats);
1063 
1064 	err = nfp_net_common_init(pf_dev, net_hw);
1065 	if (err != 0)
1066 		return err;
1067 
1068 	err = nfp_net_tlv_caps_parse(eth_dev);
1069 	if (err != 0) {
1070 		PMD_INIT_LOG(ERR, "Failed to parser TLV caps.");
1071 		return err;
1072 	}
1073 
1074 	err = nfp_ipsec_init(eth_dev);
1075 	if (err != 0) {
1076 		PMD_INIT_LOG(ERR, "Failed to init IPsec module.");
1077 		return err;
1078 	}
1079 
1080 	nfp_net_ethdev_ops_mount(pf_dev, eth_dev);
1081 
1082 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
1083 			nfp_net_xstats_size(eth_dev), 0);
1084 	if (net_hw->eth_xstats_base == NULL) {
1085 		PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!",
1086 				pci_dev->device.name);
1087 		err = -ENOMEM;
1088 		goto ipsec_exit;
1089 	}
1090 
1091 	/* Work out where in the BAR the queues start. */
1092 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1093 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
1094 
1095 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1096 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
1097 
1098 	PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p.",
1099 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
1100 
1101 	nfp_net_cfg_queue_setup(net_hw);
1102 	net_hw->mtu = RTE_ETHER_MTU;
1103 
1104 	/* VLAN insertion is incompatible with LSOv2 */
1105 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
1106 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
1107 
1108 	nfp_net_log_device_information(net_hw, pf_dev);
1109 
1110 	/* Initializing spinlock for reconfigs */
1111 	rte_spinlock_init(&hw->reconfig_lock);
1112 
1113 	if ((port == 0 || pf_dev->multi_pf.enabled)) {
1114 		err = nfp_net_vf_config_app_init(net_hw, pf_dev);
1115 		if (err != 0) {
1116 			PMD_INIT_LOG(ERR, "Failed to init sriov module.");
1117 			goto xstats_free;
1118 		}
1119 	}
1120 
1121 	/* Allocating memory for mac addr */
1122 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
1123 	if (eth_dev->data->mac_addrs == NULL) {
1124 		PMD_INIT_LOG(ERR, "Failed to space for MAC address.");
1125 		err = -ENOMEM;
1126 		goto xstats_free;
1127 	}
1128 
1129 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0) {
1130 		err = nfp_net_txrwb_alloc(eth_dev);
1131 		if (err != 0)
1132 			goto xstats_free;
1133 	}
1134 
1135 	nfp_net_pf_read_mac(app_fw_nic, port, hw_priv);
1136 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1137 
1138 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
1139 		PMD_INIT_LOG(INFO, "Using random mac address for port %d.", port);
1140 		/* Using random mac addresses for VFs */
1141 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
1142 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1143 	}
1144 
1145 	/* Copying mac address to DPDK eth_dev struct */
1146 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
1147 
1148 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
1149 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
1150 
1151 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1152 
1153 	PMD_INIT_LOG(INFO, "Port %d VendorID=%#x DeviceID=%#x "
1154 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
1155 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1156 			pci_dev->id.device_id,
1157 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
1158 
1159 	/* Registering LSC interrupt handler */
1160 	rte_intr_callback_register(pci_dev->intr_handle,
1161 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
1162 	/* Telling the firmware about the LSC interrupt entry */
1163 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1164 	/* Unmask the LSC interrupt */
1165 	nfp_net_irq_unmask(eth_dev);
1166 	/* Recording current stats counters values */
1167 	nfp_net_stats_reset(eth_dev);
1168 
1169 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) {
1170 		err = nfp_net_flow_priv_init(pf_dev, port);
1171 		if (err != 0) {
1172 			PMD_INIT_LOG(ERR, "Init net flow priv failed.");
1173 			goto txrwb_free;
1174 		}
1175 	}
1176 
1177 	return 0;
1178 
1179 txrwb_free:
1180 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
1181 		nfp_net_txrwb_free(eth_dev);
1182 xstats_free:
1183 	rte_free(net_hw->eth_xstats_base);
1184 ipsec_exit:
1185 	nfp_ipsec_uninit(eth_dev);
1186 
1187 	return err;
1188 }
1189 
1190 static int
1191 nfp_net_device_activate(struct nfp_pf_dev *pf_dev)
1192 {
1193 	int ret;
1194 	struct nfp_nsp *nsp;
1195 	struct nfp_multi_pf *multi_pf;
1196 
1197 	multi_pf = &pf_dev->multi_pf;
1198 	if (multi_pf->enabled && multi_pf->function_id != 0) {
1199 		nsp = nfp_nsp_open(pf_dev->cpp);
1200 		if (nsp == NULL) {
1201 			PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
1202 			return -EIO;
1203 		}
1204 
1205 		ret = nfp_nsp_device_activate(nsp);
1206 		nfp_nsp_close(nsp);
1207 		if (ret != 0 && ret != -EOPNOTSUPP)
1208 			return ret;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
1215 
1216 static int
1217 nfp_fw_get_name(struct nfp_pf_dev *pf_dev,
1218 		char *fw_name,
1219 		size_t fw_size)
1220 {
1221 	char serial[40];
1222 	uint16_t interface;
1223 	char card_desc[100];
1224 	uint32_t cpp_serial_len;
1225 	const char *nfp_fw_model;
1226 	const uint8_t *cpp_serial;
1227 
1228 	cpp_serial_len = nfp_cpp_serial(pf_dev->cpp, &cpp_serial);
1229 	if (cpp_serial_len != NFP_SERIAL_LEN)
1230 		return -ERANGE;
1231 
1232 	interface = nfp_cpp_interface(pf_dev->cpp);
1233 
1234 	/* Looking for firmware file in order of priority */
1235 
1236 	/* First try to find a firmware image specific for this device */
1237 	snprintf(serial, sizeof(serial),
1238 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
1239 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
1240 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
1241 	snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
1242 
1243 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
1244 	if (access(fw_name, F_OK) == 0)
1245 		return 0;
1246 
1247 	/* Then try the PCI name */
1248 	snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH,
1249 			pf_dev->pci_dev->name);
1250 
1251 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
1252 	if (access(fw_name, F_OK) == 0)
1253 		return 0;
1254 
1255 	nfp_fw_model = nfp_hwinfo_lookup(pf_dev->hwinfo, "nffw.partno");
1256 	if (nfp_fw_model == NULL) {
1257 		nfp_fw_model = nfp_hwinfo_lookup(pf_dev->hwinfo, "assembly.partno");
1258 		if (nfp_fw_model == NULL) {
1259 			PMD_DRV_LOG(ERR, "Firmware model NOT found.");
1260 			return -EIO;
1261 		}
1262 	}
1263 
1264 	/* And then try the model name */
1265 	snprintf(card_desc, sizeof(card_desc), "%s.nffw", nfp_fw_model);
1266 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc);
1267 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
1268 	if (access(fw_name, F_OK) == 0)
1269 		return 0;
1270 
1271 	/* Finally try the card type and media */
1272 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
1273 			nfp_fw_model, pf_dev->nfp_eth_table->count,
1274 			pf_dev->nfp_eth_table->ports[0].speed / 1000);
1275 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc);
1276 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s.", fw_name);
1277 	if (access(fw_name, F_OK) == 0)
1278 		return 0;
1279 
1280 	return -ENOENT;
1281 }
1282 
1283 static int
1284 nfp_fw_upload(struct nfp_nsp *nsp,
1285 		char *fw_name)
1286 {
1287 	int err;
1288 	void *fw_buf;
1289 	size_t fsize;
1290 
1291 	err = rte_firmware_read(fw_name, &fw_buf, &fsize);
1292 	if (err != 0) {
1293 		PMD_DRV_LOG(ERR, "Firmware %s not found!", fw_name);
1294 		return -ENOENT;
1295 	}
1296 
1297 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu.",
1298 			fw_name, fsize);
1299 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
1300 	if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) {
1301 		free(fw_buf);
1302 		PMD_DRV_LOG(ERR, "Firmware load failed.");
1303 		return -EIO;
1304 	}
1305 
1306 	PMD_DRV_LOG(INFO, "Done.");
1307 
1308 	free(fw_buf);
1309 
1310 	return 0;
1311 }
1312 
1313 static void
1314 nfp_fw_unload(struct nfp_cpp *cpp)
1315 {
1316 	int err;
1317 	struct nfp_nsp *nsp;
1318 
1319 	nsp = nfp_nsp_open(cpp);
1320 	if (nsp == NULL)
1321 		return;
1322 
1323 	err = nfp_nsp_device_soft_reset(nsp);
1324 	if (err != 0)
1325 		PMD_DRV_LOG(WARNING, "Failed to do soft reset when nfp fw unload.");
1326 
1327 	nfp_nsp_close(nsp);
1328 }
1329 
1330 static int
1331 nfp_fw_check_change(struct nfp_cpp *cpp,
1332 		char *fw_name,
1333 		bool *fw_changed)
1334 {
1335 	int ret;
1336 	uint32_t new_version = 0;
1337 	uint32_t old_version = 0;
1338 
1339 	ret = nfp_elf_get_fw_version(&new_version, fw_name);
1340 	if (ret != 0)
1341 		return ret;
1342 
1343 	nfp_net_get_fw_version(cpp, &old_version);
1344 
1345 	if (new_version != old_version) {
1346 		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u.",
1347 				new_version, old_version);
1348 		*fw_changed = true;
1349 	} else {
1350 		PMD_DRV_LOG(INFO, "FW version is not changed and is %u.", new_version);
1351 		*fw_changed = false;
1352 	}
1353 
1354 	return 0;
1355 }
1356 
1357 static void
1358 nfp_pcie_reg32_write_clear(struct rte_pci_device *pci_dev,
1359 		int position)
1360 {
1361 	int ret;
1362 	uint32_t capability;
1363 
1364 	ret = rte_pci_read_config(pci_dev, &capability, 4, position);
1365 	if (ret < 0)
1366 		capability = 0xffffffff;
1367 
1368 	(void)rte_pci_write_config(pci_dev, &capability, 4, position);
1369 }
1370 
1371 static void
1372 nfp_pcie_aer_clear(struct rte_pci_device *pci_dev)
1373 {
1374 	int pos;
1375 
1376 	pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_ERR);
1377 	if (pos <= 0)
1378 		return;
1379 
1380 	nfp_pcie_reg32_write_clear(pci_dev, pos + RTE_PCI_ERR_UNCOR_STATUS);
1381 	nfp_pcie_reg32_write_clear(pci_dev, pos + RTE_PCI_ERR_COR_STATUS);
1382 }
1383 
1384 static int
1385 nfp_fw_reload(struct nfp_nsp *nsp,
1386 		char *fw_name,
1387 		struct rte_pci_device *pci_dev,
1388 		int reset)
1389 {
1390 	int err;
1391 	bool reset_flag;
1392 
1393 	reset_flag = (reset == NFP_NSP_DRV_RESET_ALWAYS) ||
1394 			(reset == NFP_NSP_DRV_RESET_DISK);
1395 
1396 	if (reset_flag) {
1397 		err = nfp_nsp_device_soft_reset(nsp);
1398 		if (err != 0) {
1399 			PMD_DRV_LOG(ERR, "NFP firmware soft reset failed.");
1400 			return err;
1401 		}
1402 	}
1403 
1404 	/*
1405 	 * Accessing device memory during soft reset may result in some
1406 	 * errors being recorded in PCIE's AER register, which is normal.
1407 	 * Therefore, after the soft reset is completed, these errors
1408 	 * should be cleared.
1409 	 */
1410 	nfp_pcie_aer_clear(pci_dev);
1411 
1412 	err = nfp_fw_upload(nsp, fw_name);
1413 	if (err != 0) {
1414 		PMD_DRV_LOG(ERR, "NFP firmware load failed.");
1415 		return err;
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 static bool
1422 nfp_fw_skip_load(const struct nfp_dev_info *dev_info,
1423 		struct nfp_multi_pf *multi_pf,
1424 		bool *reload_fw)
1425 {
1426 	uint8_t i;
1427 	uint64_t tmp_beat;
1428 	uint32_t port_num;
1429 	uint8_t in_use = 0;
1430 	uint64_t beat[dev_info->pf_num_per_unit];
1431 	uint32_t offset[dev_info->pf_num_per_unit];
1432 	uint8_t abnormal = dev_info->pf_num_per_unit;
1433 
1434 	sleep(1);
1435 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1436 		if (port_num == multi_pf->function_id) {
1437 			abnormal--;
1438 			continue;
1439 		}
1440 
1441 		offset[port_num] = NFP_BEAT_OFFSET(port_num);
1442 		beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]);
1443 		if (beat[port_num] == 0)
1444 			abnormal--;
1445 	}
1446 
1447 	if (abnormal == 0)
1448 		return true;
1449 
1450 	for (i = 0; i < 3; i++) {
1451 		sleep(1);
1452 		for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1453 			if (port_num == multi_pf->function_id)
1454 				continue;
1455 
1456 			if (beat[port_num] == 0)
1457 				continue;
1458 
1459 			tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]);
1460 			if (tmp_beat != beat[port_num]) {
1461 				in_use++;
1462 				abnormal--;
1463 				beat[port_num] = 0;
1464 				if (*reload_fw) {
1465 					*reload_fw = false;
1466 					PMD_DRV_LOG(ERR, "The param %s does not work.",
1467 							NFP_PF_FORCE_RELOAD_FW);
1468 				}
1469 			}
1470 		}
1471 
1472 		if (abnormal == 0)
1473 			return true;
1474 	}
1475 
1476 	if (in_use != 0) {
1477 		PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.",
1478 				abnormal);
1479 		return true;
1480 	}
1481 
1482 	return false;
1483 }
1484 
1485 static int
1486 nfp_fw_reload_from_flash(struct nfp_nsp *nsp)
1487 {
1488 	int ret;
1489 
1490 	ret = nfp_nsp_load_stored_fw(nsp);
1491 	if (ret != 0) {
1492 		PMD_DRV_LOG(ERR, "Load firmware from flash failed.");
1493 		return -EACCES;
1494 	}
1495 
1496 	return 0;
1497 }
1498 
1499 static int
1500 nfp_fw_reload_for_single_pf_from_disk(struct nfp_nsp *nsp,
1501 		char *fw_name,
1502 		struct nfp_pf_dev *pf_dev,
1503 		int reset)
1504 {
1505 	int ret;
1506 	bool fw_changed = true;
1507 
1508 	if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp) &&
1509 			!pf_dev->devargs.force_reload_fw) {
1510 		ret = nfp_fw_check_change(pf_dev->cpp, fw_name, &fw_changed);
1511 		if (ret != 0)
1512 			return ret;
1513 	}
1514 
1515 	if (!fw_changed)
1516 		return 0;
1517 
1518 	ret = nfp_fw_reload(nsp, fw_name, pf_dev->pci_dev, reset);
1519 	if (ret != 0)
1520 		return ret;
1521 
1522 	return 0;
1523 }
1524 
1525 static int
1526 nfp_fw_reload_for_single_pf(struct nfp_nsp *nsp,
1527 		char *fw_name,
1528 		struct nfp_pf_dev *pf_dev,
1529 		int reset,
1530 		int policy)
1531 {
1532 	int ret;
1533 
1534 	if (policy == NFP_NSP_APP_FW_LOAD_FLASH && nfp_nsp_has_stored_fw_load(nsp)) {
1535 		ret = nfp_fw_reload_from_flash(nsp);
1536 		if (ret != 0) {
1537 			PMD_DRV_LOG(ERR, "Load single PF firmware from flash failed.");
1538 			return ret;
1539 		}
1540 	} else if (fw_name[0] != 0) {
1541 		ret = nfp_fw_reload_for_single_pf_from_disk(nsp, fw_name, pf_dev, reset);
1542 		if (ret != 0) {
1543 			PMD_DRV_LOG(ERR, "Load single PF firmware from disk failed.");
1544 			return ret;
1545 		}
1546 	} else {
1547 		PMD_DRV_LOG(ERR, "Not load firmware, please update flash or recofigure card.");
1548 		return -ENODATA;
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 static int
1555 nfp_fw_reload_for_multi_pf_from_disk(struct nfp_nsp *nsp,
1556 		char *fw_name,
1557 		const struct nfp_dev_info *dev_info,
1558 		struct nfp_pf_dev *pf_dev,
1559 		int reset)
1560 {
1561 	int err;
1562 	bool fw_changed = true;
1563 	bool skip_load_fw = false;
1564 	bool reload_fw = pf_dev->devargs.force_reload_fw;
1565 
1566 	if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp) && !reload_fw) {
1567 		err = nfp_fw_check_change(pf_dev->cpp, fw_name, &fw_changed);
1568 		if (err != 0)
1569 			return err;
1570 	}
1571 
1572 	if (!fw_changed || reload_fw)
1573 		skip_load_fw = nfp_fw_skip_load(dev_info, &pf_dev->multi_pf, &reload_fw);
1574 
1575 	if (skip_load_fw && !reload_fw)
1576 		return 0;
1577 
1578 	err = nfp_fw_reload(nsp, fw_name, pf_dev->pci_dev, reset);
1579 	if (err != 0)
1580 		return err;
1581 
1582 	return 0;
1583 }
1584 
1585 static int
1586 nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp,
1587 		char *fw_name,
1588 		const struct nfp_dev_info *dev_info,
1589 		struct nfp_pf_dev *pf_dev,
1590 		int reset,
1591 		int policy)
1592 {
1593 	int err;
1594 	struct nfp_multi_pf *multi_pf;
1595 
1596 	multi_pf = &pf_dev->multi_pf;
1597 
1598 	err = nfp_net_keepalive_init(pf_dev->cpp, multi_pf);
1599 	if (err != 0) {
1600 		PMD_DRV_LOG(ERR, "NFP init beat failed.");
1601 		return err;
1602 	}
1603 
1604 	err = nfp_net_keepalive_start(multi_pf);
1605 	if (err != 0) {
1606 		PMD_DRV_LOG(ERR, "NFP write beat failed.");
1607 		goto keepalive_uninit;
1608 	}
1609 
1610 	if (policy == NFP_NSP_APP_FW_LOAD_FLASH && nfp_nsp_has_stored_fw_load(nsp)) {
1611 		err = nfp_fw_reload_from_flash(nsp);
1612 		if (err != 0) {
1613 			PMD_DRV_LOG(ERR, "Load multi PF firmware from flash failed.");
1614 			goto keepalive_stop;
1615 		}
1616 	} else if (fw_name[0] != 0) {
1617 		err = nfp_fw_reload_for_multi_pf_from_disk(nsp, fw_name, dev_info,
1618 				pf_dev, reset);
1619 		if (err != 0) {
1620 			PMD_DRV_LOG(ERR, "Load multi PF firmware from disk failed.");
1621 			goto keepalive_stop;
1622 		}
1623 	} else {
1624 		PMD_DRV_LOG(ERR, "Not load firmware, please update flash or recofigure card.");
1625 		err = -ENODATA;
1626 		goto keepalive_stop;
1627 	}
1628 
1629 	nfp_net_keepalive_clear_others(dev_info, multi_pf);
1630 
1631 	return 0;
1632 
1633 keepalive_stop:
1634 	nfp_net_keepalive_stop(multi_pf);
1635 keepalive_uninit:
1636 	nfp_net_keepalive_uninit(multi_pf);
1637 
1638 	return err;
1639 }
1640 
1641 static int
1642 nfp_strtol(const char *buf,
1643 		int base,
1644 		long *value)
1645 {
1646 	long val;
1647 	char *tmp;
1648 
1649 	if (value == NULL)
1650 		return -EINVAL;
1651 
1652 	val = strtol(buf, &tmp, base);
1653 	if (tmp == NULL || *tmp != 0)
1654 		return -EINVAL;
1655 
1656 	*value = val;
1657 
1658 	return 0;
1659 }
1660 
1661 static int
1662 nfp_fw_policy_value_get(struct nfp_nsp *nsp,
1663 		const char *key,
1664 		const char *default_val,
1665 		int max_val,
1666 		int *value)
1667 {
1668 	int ret;
1669 	int64_t val;
1670 	char buf[64];
1671 
1672 	snprintf(buf, sizeof(buf), "%s", key);
1673 	ret = nfp_nsp_hwinfo_lookup_optional(nsp, buf, sizeof(buf), default_val);
1674 	if (ret != 0)
1675 		return ret;
1676 
1677 	ret = nfp_strtol(buf, 0, &val);
1678 	if (ret != 0 || val < 0 || val > max_val) {
1679 		PMD_DRV_LOG(WARNING, "Invalid value '%s' from '%s', ignoring.",
1680 				buf, key);
1681 		/* Fall back to the default value */
1682 		ret = nfp_strtol(default_val, 0, &val);
1683 		if (ret != 0)
1684 			return ret;
1685 	}
1686 
1687 	*value = val;
1688 
1689 	return 0;
1690 }
1691 
1692 static int
1693 nfp_fw_setup(struct nfp_pf_dev *pf_dev,
1694 		const struct nfp_dev_info *dev_info)
1695 {
1696 	int err;
1697 	int reset;
1698 	int policy;
1699 	char fw_name[125];
1700 	struct nfp_nsp *nsp;
1701 
1702 	nsp = nfp_nsp_open(pf_dev->cpp);
1703 	if (nsp == NULL) {
1704 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
1705 		return -EIO;
1706 	}
1707 
1708 	err = nfp_fw_policy_value_get(nsp, "abi_drv_reset",
1709 			NFP_NSP_DRV_RESET_DEFAULT, NFP_NSP_DRV_RESET_NEVER,
1710 			&reset);
1711 	if (err != 0) {
1712 		PMD_DRV_LOG(ERR, "Get 'abi_drv_reset' from HWinfo failed.");
1713 		goto close_nsp;
1714 	}
1715 
1716 	err = nfp_fw_policy_value_get(nsp, "app_fw_from_flash",
1717 			NFP_NSP_APP_FW_LOAD_DEFAULT, NFP_NSP_APP_FW_LOAD_PREF,
1718 			&policy);
1719 	if (err != 0) {
1720 		PMD_DRV_LOG(ERR, "Get 'app_fw_from_flash' from HWinfo failed.");
1721 		goto close_nsp;
1722 	}
1723 
1724 	fw_name[0] = 0;
1725 	if (policy != NFP_NSP_APP_FW_LOAD_FLASH) {
1726 		err = nfp_fw_get_name(pf_dev, fw_name, sizeof(fw_name));
1727 		if (err != 0) {
1728 			PMD_DRV_LOG(ERR, "Can not find suitable firmware.");
1729 			goto close_nsp;
1730 		}
1731 	}
1732 
1733 	if (pf_dev->multi_pf.enabled)
1734 		err = nfp_fw_reload_for_multi_pf(nsp, fw_name, dev_info,
1735 				pf_dev, reset, policy);
1736 	else
1737 		err = nfp_fw_reload_for_single_pf(nsp, fw_name, pf_dev,
1738 				reset, policy);
1739 
1740 close_nsp:
1741 	nfp_nsp_close(nsp);
1742 	return err;
1743 }
1744 
1745 static inline bool
1746 nfp_check_multi_pf_from_fw(uint32_t total_vnics)
1747 {
1748 	if (total_vnics == 1)
1749 		return true;
1750 
1751 	return false;
1752 }
1753 
1754 static inline bool
1755 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev,
1756 		struct nfp_cpp *cpp)
1757 {
1758 	bool flag;
1759 	struct nfp_nsp *nsp;
1760 
1761 	nsp = nfp_nsp_open(cpp);
1762 	if (nsp == NULL) {
1763 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle.");
1764 		return false;
1765 	}
1766 
1767 	flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) &&
1768 			(pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC);
1769 
1770 	nfp_nsp_close(nsp);
1771 	return flag;
1772 }
1773 
1774 static int
1775 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
1776 {
1777 	int err = 0;
1778 	uint64_t tx_base;
1779 	uint8_t *ctrl_bar;
1780 	struct nfp_hw *hw;
1781 	uint32_t cap_extend;
1782 	struct nfp_net_hw net_hw;
1783 	struct nfp_cpp_area *area;
1784 	char name[RTE_ETH_NAME_MAX_LEN];
1785 
1786 	memset(&net_hw, 0, sizeof(struct nfp_net_hw));
1787 
1788 	/* Map the symbol table */
1789 	pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_MIN;
1790 	snprintf(name, sizeof(name), "_pf%u_net_bar0",
1791 			pf_dev->multi_pf.function_id);
1792 	ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, pf_dev->ctrl_bar_size,
1793 			&area);
1794 	if (ctrl_bar == NULL) {
1795 		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol.");
1796 		return -ENODEV;
1797 	}
1798 
1799 	hw = &net_hw.super;
1800 	hw->ctrl_bar = ctrl_bar;
1801 
1802 	/* Check the version from firmware */
1803 	if (!nfp_net_version_check(hw, pf_dev)) {
1804 		PMD_INIT_LOG(ERR, "Not the valid version.");
1805 		err = -EINVAL;
1806 		goto end;
1807 	}
1808 
1809 	/* Set the ctrl bar size */
1810 	nfp_net_ctrl_bar_size_set(pf_dev);
1811 
1812 	if (!pf_dev->multi_pf.enabled)
1813 		goto end;
1814 
1815 	cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
1816 	if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
1817 		PMD_INIT_LOG(ERR, "Loaded firmware does not support multiple PF.");
1818 		err = -EINVAL;
1819 		goto end;
1820 	}
1821 
1822 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1823 	net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1824 	nfp_net_cfg_queue_setup(&net_hw);
1825 	rte_spinlock_init(&hw->reconfig_lock);
1826 	err = nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF,
1827 			NFP_NET_CFG_UPDATE_GEN);
1828 	if (err != 0) {
1829 		PMD_INIT_LOG(ERR, "Configure multiple PF failed.");
1830 		goto end;
1831 	}
1832 
1833 end:
1834 	nfp_cpp_area_release_free(area);
1835 	return err;
1836 }
1837 
1838 static bool
1839 nfp_app_fw_nic_total_phyports_check(struct nfp_pf_dev *pf_dev)
1840 {
1841 	uint8_t total_phyports;
1842 
1843 	total_phyports = nfp_net_get_phyports_from_fw(pf_dev);
1844 
1845 	if (pf_dev->multi_pf.enabled) {
1846 		if (!nfp_check_multi_pf_from_fw(total_phyports)) {
1847 			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf.");
1848 			return false;
1849 		}
1850 	} else {
1851 		/*
1852 		 * For single PF the number of vNICs exposed should be the same as the
1853 		 * number of physical ports.
1854 		 */
1855 		if (total_phyports != pf_dev->nfp_eth_table->count) {
1856 			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs.");
1857 			return false;
1858 		}
1859 	}
1860 
1861 	return true;
1862 }
1863 
1864 static void
1865 nfp_port_name_generate(char *port_name,
1866 		size_t length,
1867 		int port_id,
1868 		struct nfp_pf_dev *pf_dev)
1869 {
1870 	const char *name = pf_dev->pci_dev->device.name;
1871 
1872 	if (pf_dev->multi_pf.enabled)
1873 		snprintf(port_name, length, "%s", name);
1874 	else
1875 		snprintf(port_name, length, "%s_port%u", name, port_id);
1876 }
1877 
1878 static int
1879 nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
1880 {
1881 	uint8_t i;
1882 	uint8_t id;
1883 	int ret = 0;
1884 	struct nfp_app_fw_nic *app_fw_nic;
1885 	struct nfp_eth_table *nfp_eth_table;
1886 	char bar_name[RTE_ETH_NAME_MAX_LEN];
1887 	char port_name[RTE_ETH_NAME_MAX_LEN];
1888 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
1889 	struct nfp_net_init hw_init = {
1890 		.hw_priv = hw_priv,
1891 	};
1892 
1893 	nfp_eth_table = pf_dev->nfp_eth_table;
1894 	PMD_INIT_LOG(INFO, "Total physical ports: %d.", nfp_eth_table->count);
1895 	id = nfp_function_id_get(pf_dev, 0);
1896 
1897 	/* Allocate memory for the CoreNIC app */
1898 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
1899 	if (app_fw_nic == NULL)
1900 		return -ENOMEM;
1901 
1902 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
1903 	pf_dev->app_fw_priv = app_fw_nic;
1904 
1905 	/* Check the number of vNIC's created for the PF */
1906 	if (!nfp_app_fw_nic_total_phyports_check(pf_dev)) {
1907 		ret = -ENODEV;
1908 		goto app_cleanup;
1909 	}
1910 
1911 	/* Populate coreNIC app properties */
1912 	if (pf_dev->total_phyports > 1)
1913 		app_fw_nic->multiport = true;
1914 
1915 	/* Map the symbol table */
1916 	snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id);
1917 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name,
1918 			pf_dev->total_phyports * pf_dev->ctrl_bar_size,
1919 			&pf_dev->ctrl_area);
1920 	if (pf_dev->ctrl_bar == NULL) {
1921 		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for %s.", bar_name);
1922 		ret = -EIO;
1923 		goto app_cleanup;
1924 	}
1925 
1926 	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", pf_dev->ctrl_bar);
1927 
1928 	/* Loop through all physical ports on PF */
1929 	for (i = 0; i < pf_dev->total_phyports; i++) {
1930 		nfp_port_name_generate(port_name, sizeof(port_name), i, pf_dev);
1931 
1932 		id = nfp_function_id_get(pf_dev, i);
1933 		hw_init.idx = id;
1934 		hw_init.nfp_idx = nfp_eth_table->ports[id].index;
1935 		ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name,
1936 				sizeof(struct nfp_net_hw), NULL, NULL,
1937 				nfp_net_init, &hw_init);
1938 		if (ret != 0)
1939 			goto port_cleanup;
1940 
1941 	} /* End loop, all ports on this PF */
1942 
1943 	return 0;
1944 
1945 port_cleanup:
1946 	for (uint32_t j = 0; j < i; j++) {
1947 		struct rte_eth_dev *eth_dev;
1948 
1949 		nfp_port_name_generate(port_name, sizeof(port_name), j, pf_dev);
1950 		eth_dev = rte_eth_dev_get_by_name(port_name);
1951 		if (eth_dev != NULL)
1952 			rte_eth_dev_destroy(eth_dev, nfp_net_uninit);
1953 	}
1954 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
1955 app_cleanup:
1956 	rte_free(app_fw_nic);
1957 
1958 	return ret;
1959 }
1960 
1961 static int
1962 nfp_net_hwinfo_set(uint8_t function_id,
1963 		struct nfp_rtsym_table *sym_tbl,
1964 		struct nfp_cpp *cpp,
1965 		enum nfp_app_fw_id app_fw_id)
1966 {
1967 	int ret = 0;
1968 	uint64_t app_cap;
1969 	struct nfp_nsp *nsp;
1970 	uint8_t sp_indiff = 1;
1971 	char hw_info[RTE_ETH_NAME_MAX_LEN];
1972 	char app_cap_name[RTE_ETH_NAME_MAX_LEN];
1973 
1974 	if (app_fw_id != NFP_APP_FW_FLOWER_NIC) {
1975 		/* Read the app capabilities of the firmware loaded */
1976 		snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id);
1977 		app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret);
1978 		if (ret != 0) {
1979 			PMD_INIT_LOG(ERR, "Could not read app_fw_cap from firmware.");
1980 			return ret;
1981 		}
1982 
1983 		/* Calculate the value of sp_indiff and write to hw_info */
1984 		sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF;
1985 	}
1986 
1987 	snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff);
1988 
1989 	nsp = nfp_nsp_open(cpp);
1990 	if (nsp == NULL) {
1991 		PMD_INIT_LOG(ERR, "Could not get NSP.");
1992 		return -EIO;
1993 	}
1994 
1995 	ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info));
1996 	nfp_nsp_close(nsp);
1997 	if (ret != 0) {
1998 		PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo.");
1999 		return ret;
2000 	}
2001 
2002 	return 0;
2003 }
2004 
2005 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
2006 	[NFP_MEDIA_W0_RJ45_10M]     = RTE_ETH_LINK_SPEED_10M,
2007 	[NFP_MEDIA_W0_RJ45_10M_HD]  = RTE_ETH_LINK_SPEED_10M_HD,
2008 	[NFP_MEDIA_W0_RJ45_100M]    = RTE_ETH_LINK_SPEED_100M,
2009 	[NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD,
2010 	[NFP_MEDIA_W0_RJ45_1G]      = RTE_ETH_LINK_SPEED_1G,
2011 	[NFP_MEDIA_W0_RJ45_2P5G]    = RTE_ETH_LINK_SPEED_2_5G,
2012 	[NFP_MEDIA_W0_RJ45_5G]      = RTE_ETH_LINK_SPEED_5G,
2013 	[NFP_MEDIA_W0_RJ45_10G]     = RTE_ETH_LINK_SPEED_10G,
2014 	[NFP_MEDIA_1000BASE_CX]     = RTE_ETH_LINK_SPEED_1G,
2015 	[NFP_MEDIA_1000BASE_KX]     = RTE_ETH_LINK_SPEED_1G,
2016 	[NFP_MEDIA_10GBASE_KX4]     = RTE_ETH_LINK_SPEED_10G,
2017 	[NFP_MEDIA_10GBASE_KR]      = RTE_ETH_LINK_SPEED_10G,
2018 	[NFP_MEDIA_10GBASE_CX4]     = RTE_ETH_LINK_SPEED_10G,
2019 	[NFP_MEDIA_10GBASE_CR]      = RTE_ETH_LINK_SPEED_10G,
2020 	[NFP_MEDIA_10GBASE_SR]      = RTE_ETH_LINK_SPEED_10G,
2021 	[NFP_MEDIA_10GBASE_ER]      = RTE_ETH_LINK_SPEED_10G,
2022 	[NFP_MEDIA_25GBASE_KR]      = RTE_ETH_LINK_SPEED_25G,
2023 	[NFP_MEDIA_25GBASE_KR_S]    = RTE_ETH_LINK_SPEED_25G,
2024 	[NFP_MEDIA_25GBASE_CR]      = RTE_ETH_LINK_SPEED_25G,
2025 	[NFP_MEDIA_25GBASE_CR_S]    = RTE_ETH_LINK_SPEED_25G,
2026 	[NFP_MEDIA_25GBASE_SR]      = RTE_ETH_LINK_SPEED_25G,
2027 	[NFP_MEDIA_40GBASE_CR4]     = RTE_ETH_LINK_SPEED_40G,
2028 	[NFP_MEDIA_40GBASE_KR4]     = RTE_ETH_LINK_SPEED_40G,
2029 	[NFP_MEDIA_40GBASE_SR4]     = RTE_ETH_LINK_SPEED_40G,
2030 	[NFP_MEDIA_40GBASE_LR4]     = RTE_ETH_LINK_SPEED_40G,
2031 	[NFP_MEDIA_50GBASE_KR]      = RTE_ETH_LINK_SPEED_50G,
2032 	[NFP_MEDIA_50GBASE_SR]      = RTE_ETH_LINK_SPEED_50G,
2033 	[NFP_MEDIA_50GBASE_CR]      = RTE_ETH_LINK_SPEED_50G,
2034 	[NFP_MEDIA_50GBASE_LR]      = RTE_ETH_LINK_SPEED_50G,
2035 	[NFP_MEDIA_50GBASE_ER]      = RTE_ETH_LINK_SPEED_50G,
2036 	[NFP_MEDIA_50GBASE_FR]      = RTE_ETH_LINK_SPEED_50G,
2037 	[NFP_MEDIA_100GBASE_KR4]    = RTE_ETH_LINK_SPEED_100G,
2038 	[NFP_MEDIA_100GBASE_SR4]    = RTE_ETH_LINK_SPEED_100G,
2039 	[NFP_MEDIA_100GBASE_CR4]    = RTE_ETH_LINK_SPEED_100G,
2040 	[NFP_MEDIA_100GBASE_KP4]    = RTE_ETH_LINK_SPEED_100G,
2041 	[NFP_MEDIA_100GBASE_CR10]   = RTE_ETH_LINK_SPEED_100G,
2042 	[NFP_MEDIA_10GBASE_LR]      = RTE_ETH_LINK_SPEED_10G,
2043 	[NFP_MEDIA_25GBASE_LR]      = RTE_ETH_LINK_SPEED_25G,
2044 	[NFP_MEDIA_25GBASE_ER]      = RTE_ETH_LINK_SPEED_25G
2045 };
2046 
2047 static int
2048 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf,
2049 		struct nfp_pf_dev *pf_dev)
2050 {
2051 	uint32_t i;
2052 	uint32_t j;
2053 	uint32_t offset;
2054 	uint32_t speed_capa = 0;
2055 	uint64_t supported_modes;
2056 
2057 	for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) {
2058 		supported_modes = media_buf->supported_modes[i];
2059 		offset = i * UINT64_BIT;
2060 		for (j = 0; j < UINT64_BIT; j++) {
2061 			if (supported_modes == 0)
2062 				break;
2063 
2064 			if ((supported_modes & 1) != 0) {
2065 				if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) {
2066 					PMD_DRV_LOG(ERR, "Invalid offset of media table.");
2067 					return -EINVAL;
2068 				}
2069 
2070 				speed_capa |= nfp_eth_media_table[j + offset];
2071 			}
2072 
2073 			supported_modes = supported_modes >> 1;
2074 		}
2075 	}
2076 
2077 	pf_dev->speed_capa = speed_capa;
2078 
2079 	return pf_dev->speed_capa == 0 ? -EINVAL : 0;
2080 }
2081 
2082 static int
2083 nfp_net_speed_cap_get_one(struct nfp_pf_dev *pf_dev,
2084 		uint32_t port_id)
2085 {
2086 	int ret;
2087 	struct nfp_nsp *nsp;
2088 	struct nfp_eth_media_buf media_buf;
2089 
2090 	media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index;
2091 	pf_dev->speed_capa = 0;
2092 
2093 	nsp = nfp_nsp_open(pf_dev->cpp);
2094 	if (nsp == NULL) {
2095 		PMD_DRV_LOG(ERR, "Could not get NSP.");
2096 		return -EIO;
2097 	}
2098 
2099 	ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf));
2100 	nfp_nsp_close(nsp);
2101 	if (ret != 0) {
2102 		PMD_DRV_LOG(ERR, "Failed to read media.");
2103 		return ret;
2104 	}
2105 
2106 	ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev);
2107 	if (ret < 0) {
2108 		PMD_DRV_LOG(ERR, "Speed capability is invalid.");
2109 		return ret;
2110 	}
2111 
2112 	return 0;
2113 }
2114 
2115 static int
2116 nfp_net_speed_cap_get(struct nfp_pf_dev *pf_dev)
2117 {
2118 	int ret;
2119 	uint32_t i;
2120 	uint32_t id;
2121 	uint32_t count;
2122 
2123 	count = pf_dev->total_phyports;
2124 	for (i = 0; i < count; i++) {
2125 		id = nfp_function_id_get(pf_dev, i);
2126 		ret = nfp_net_speed_cap_get_one(pf_dev, id);
2127 		if (ret != 0) {
2128 			PMD_INIT_LOG(ERR, "Failed to get port %d speed capability.", id);
2129 			return ret;
2130 		}
2131 	}
2132 
2133 	return 0;
2134 }
2135 
2136 /* Force the physical port down to clear the possible DMA error */
2137 static int
2138 nfp_net_force_port_down(struct nfp_pf_dev *pf_dev)
2139 {
2140 	int ret;
2141 	uint32_t i;
2142 	uint32_t id;
2143 	uint32_t index;
2144 	uint32_t count;
2145 
2146 	count = pf_dev->total_phyports;
2147 	for (i = 0; i < count; i++) {
2148 		id = nfp_function_id_get(pf_dev, i);
2149 		index = pf_dev->nfp_eth_table->ports[id].index;
2150 		ret = nfp_eth_set_configured(pf_dev->cpp, index, 0);
2151 		if (ret < 0)
2152 			return ret;
2153 	}
2154 
2155 	return 0;
2156 }
2157 
2158 static int
2159 nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv)
2160 {
2161 	int ret;
2162 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
2163 
2164 	switch (pf_dev->app_fw_id) {
2165 	case NFP_APP_FW_CORE_NIC:
2166 		PMD_INIT_LOG(INFO, "Initializing coreNIC.");
2167 		ret = nfp_init_app_fw_nic(hw_priv);
2168 		if (ret != 0) {
2169 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2170 			return ret;
2171 		}
2172 		break;
2173 	case NFP_APP_FW_FLOWER_NIC:
2174 		PMD_INIT_LOG(INFO, "Initializing Flower.");
2175 		ret = nfp_init_app_fw_flower(hw_priv);
2176 		if (ret != 0) {
2177 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2178 			return ret;
2179 		}
2180 		break;
2181 	default:
2182 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded.");
2183 		ret = -EINVAL;
2184 		return ret;
2185 	}
2186 
2187 	return 0;
2188 }
2189 
2190 static int
2191 nfp_pf_get_max_vf(struct nfp_pf_dev *pf_dev)
2192 {
2193 	int ret;
2194 	uint32_t max_vfs;
2195 
2196 	max_vfs = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_vf_cfg_max_vfs", &ret);
2197 	if (ret != 0)
2198 		return ret;
2199 
2200 	pf_dev->max_vfs = max_vfs;
2201 
2202 	return 0;
2203 }
2204 
2205 static int
2206 nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev,
2207 		const struct nfp_dev_info *dev_info)
2208 {
2209 	int ret;
2210 	off_t pos;
2211 	uint16_t offset;
2212 	uint16_t sriov_vf;
2213 
2214 	/* For 3800 single-PF and 4000 card */
2215 	if (!pf_dev->multi_pf.enabled) {
2216 		pf_dev->sriov_vf = pf_dev->max_vfs;
2217 		return 0;
2218 	}
2219 
2220 	pos = rte_pci_find_ext_capability(pf_dev->pci_dev, RTE_PCI_EXT_CAP_ID_SRIOV);
2221 	if (pos == 0) {
2222 		PMD_INIT_LOG(ERR, "Can not get the pci sriov cap.");
2223 		return -EIO;
2224 	}
2225 
2226 	/*
2227 	 * Management firmware ensures that sriov capability registers
2228 	 * are initialized correctly.
2229 	 */
2230 	ret = rte_pci_read_config(pf_dev->pci_dev, &sriov_vf, sizeof(sriov_vf),
2231 			pos + RTE_PCI_SRIOV_TOTAL_VF);
2232 	if (ret < 0) {
2233 		PMD_INIT_LOG(ERR, "Can not read the sriov toatl VF.");
2234 		return -EIO;
2235 	}
2236 
2237 	/* Offset of first VF is relative to its PF. */
2238 	ret = rte_pci_read_config(pf_dev->pci_dev, &offset, sizeof(offset),
2239 			pos + RTE_PCI_SRIOV_VF_OFFSET);
2240 	if (ret < 0) {
2241 		PMD_INIT_LOG(ERR, "Can not get the VF offset.");
2242 		return -EIO;
2243 	}
2244 
2245 	offset += pf_dev->multi_pf.function_id;
2246 	if (offset < dev_info->pf_num_per_unit)
2247 		return -ERANGE;
2248 
2249 	offset -= dev_info->pf_num_per_unit;
2250 	if (offset >= pf_dev->max_vfs || offset + sriov_vf > pf_dev->max_vfs) {
2251 		PMD_INIT_LOG(ERR, "The pci allocate VF is more than the MAX VF.");
2252 		return -ERANGE;
2253 	}
2254 
2255 	pf_dev->vf_base_id = offset;
2256 	pf_dev->sriov_vf = sriov_vf;
2257 
2258 	return 0;
2259 }
2260 
2261 static int
2262 nfp_net_get_vf_info(struct nfp_pf_dev *pf_dev,
2263 		const struct nfp_dev_info *dev_info)
2264 {
2265 	int ret;
2266 
2267 	ret = nfp_pf_get_max_vf(pf_dev);
2268 	if (ret != 0) {
2269 		if (ret != -ENOENT) {
2270 			PMD_INIT_LOG(ERR, "Read max VFs failed.");
2271 			return ret;
2272 		}
2273 
2274 		PMD_INIT_LOG(WARNING, "The firmware can not support read max VFs.");
2275 		return 0;
2276 	}
2277 
2278 	if (pf_dev->max_vfs == 0)
2279 		return 0;
2280 
2281 	ret = nfp_pf_get_sriov_vf(pf_dev, dev_info);
2282 	if (ret < 0)
2283 		return ret;
2284 
2285 	pf_dev->queue_per_vf = NFP_QUEUE_PER_VF;
2286 
2287 	return 0;
2288 }
2289 
2290 static int
2291 nfp_net_vf_config_init(struct nfp_pf_dev *pf_dev)
2292 {
2293 	int ret = 0;
2294 	uint32_t min_size;
2295 	char vf_bar_name[RTE_ETH_NAME_MAX_LEN];
2296 	char vf_cfg_name[RTE_ETH_NAME_MAX_LEN];
2297 
2298 	if (pf_dev->sriov_vf == 0)
2299 		return 0;
2300 
2301 	min_size = pf_dev->ctrl_bar_size * pf_dev->sriov_vf;
2302 	snprintf(vf_bar_name, sizeof(vf_bar_name), "_pf%d_net_vf_bar",
2303 			pf_dev->multi_pf.function_id);
2304 	pf_dev->vf_bar = nfp_rtsym_map_offset(pf_dev->sym_tbl, vf_bar_name,
2305 			pf_dev->ctrl_bar_size * pf_dev->vf_base_id,
2306 			min_size, &pf_dev->vf_area);
2307 	if (pf_dev->vf_bar == NULL) {
2308 		PMD_INIT_LOG(ERR, "Failed to get vf cfg.");
2309 		return -EIO;
2310 	}
2311 
2312 	min_size = NFP_NET_VF_CFG_SZ * pf_dev->sriov_vf + NFP_NET_VF_CFG_MB_SZ;
2313 	snprintf(vf_cfg_name, sizeof(vf_cfg_name), "_pf%d_net_vf_cfg2",
2314 			pf_dev->multi_pf.function_id);
2315 	pf_dev->vf_cfg_tbl_bar = nfp_rtsym_map(pf_dev->sym_tbl, vf_cfg_name,
2316 			min_size, &pf_dev->vf_cfg_tbl_area);
2317 	if (pf_dev->vf_cfg_tbl_bar == NULL) {
2318 		PMD_INIT_LOG(ERR, "Failed to get vf configure table.");
2319 		ret = -EIO;
2320 		goto vf_bar_cleanup;
2321 	}
2322 
2323 	return 0;
2324 
2325 vf_bar_cleanup:
2326 	nfp_cpp_area_release_free(pf_dev->vf_area);
2327 
2328 	return ret;
2329 }
2330 
2331 static int
2332 nfp_pf_init(struct rte_pci_device *pci_dev)
2333 {
2334 	void *sync;
2335 	int ret = 0;
2336 	uint64_t addr;
2337 	uint32_t cpp_id;
2338 	uint8_t function_id;
2339 	struct nfp_cpp *cpp;
2340 	struct nfp_pf_dev *pf_dev;
2341 	struct nfp_hwinfo *hwinfo;
2342 	enum nfp_app_fw_id app_fw_id;
2343 	char name[RTE_ETH_NAME_MAX_LEN];
2344 	struct nfp_rtsym_table *sym_tbl;
2345 	struct nfp_net_hw_priv *hw_priv;
2346 	char app_name[RTE_ETH_NAME_MAX_LEN];
2347 	struct nfp_eth_table *nfp_eth_table;
2348 	const struct nfp_dev_info *dev_info;
2349 
2350 	if (pci_dev == NULL)
2351 		return -ENODEV;
2352 
2353 	if (pci_dev->mem_resource[0].addr == NULL) {
2354 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2355 		return -ENODEV;
2356 	}
2357 
2358 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2359 	if (dev_info == NULL) {
2360 		PMD_INIT_LOG(ERR, "Not supported device ID.");
2361 		return -ENODEV;
2362 	}
2363 
2364 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
2365 	if (hw_priv == NULL) {
2366 		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data.");
2367 		return -ENOMEM;
2368 	}
2369 
2370 	/* Allocate memory for the PF "device" */
2371 	function_id = (pci_dev->addr.function) & 0x07;
2372 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
2373 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2374 	if (pf_dev == NULL) {
2375 		PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device.");
2376 		ret = -ENOMEM;
2377 		goto hw_priv_free;
2378 	}
2379 
2380 	hw_priv->dev_info = dev_info;
2381 	hw_priv->pf_dev = pf_dev;
2382 
2383 	sync = nfp_sync_alloc();
2384 	if (sync == NULL) {
2385 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2386 		ret = -ENOMEM;
2387 		goto pf_cleanup;
2388 	}
2389 
2390 	pf_dev->sync = sync;
2391 
2392 	/*
2393 	 * When device bound to UIO, the device could be used, by mistake,
2394 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2395 	 * could lead to a serious problem when configuring the NFP CPP
2396 	 * interface. Here we avoid this telling to the CPP init code to
2397 	 * use a lock file if UIO is being used.
2398 	 */
2399 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2400 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2401 	else
2402 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2403 
2404 	if (cpp == NULL) {
2405 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained.");
2406 		ret = -EIO;
2407 		goto sync_free;
2408 	}
2409 
2410 	pf_dev->cpp = cpp;
2411 	pf_dev->pci_dev = pci_dev;
2412 
2413 	hwinfo = nfp_hwinfo_read(cpp);
2414 	if (hwinfo == NULL) {
2415 		PMD_INIT_LOG(ERR, "Error reading hwinfo table.");
2416 		ret = -EIO;
2417 		goto cpp_cleanup;
2418 	}
2419 
2420 	pf_dev->hwinfo = hwinfo;
2421 
2422 	/* Read the number of physical ports from hardware */
2423 	nfp_eth_table = nfp_eth_read_ports(cpp);
2424 	if (nfp_eth_table == NULL) {
2425 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table.");
2426 		ret = -EIO;
2427 		goto hwinfo_cleanup;
2428 	}
2429 
2430 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
2431 		PMD_INIT_LOG(ERR, "NFP ethernet table reports wrong ports: %u.",
2432 				nfp_eth_table->count);
2433 		ret = -EIO;
2434 		goto eth_table_cleanup;
2435 	}
2436 
2437 	pf_dev->nfp_eth_table = nfp_eth_table;
2438 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp);
2439 	pf_dev->multi_pf.function_id = function_id;
2440 	pf_dev->total_phyports = nfp_net_get_phyports_from_nsp(pf_dev);
2441 
2442 	ret = nfp_net_force_port_down(pf_dev);
2443 	if (ret != 0) {
2444 		PMD_INIT_LOG(ERR, "Failed to force port down.");
2445 		ret = -EIO;
2446 		goto eth_table_cleanup;
2447 	}
2448 
2449 	ret = nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs);
2450 	if (ret != 0) {
2451 		PMD_INIT_LOG(ERR, "Error when parsing device args.");
2452 		ret = -EINVAL;
2453 		goto eth_table_cleanup;
2454 	}
2455 
2456 	ret = nfp_net_device_activate(pf_dev);
2457 	if (ret != 0) {
2458 		PMD_INIT_LOG(ERR, "Failed to activate the NFP device.");
2459 		ret = -EIO;
2460 		goto eth_table_cleanup;
2461 	}
2462 
2463 	ret = nfp_fw_setup(pf_dev, dev_info);
2464 	if (ret != 0) {
2465 		PMD_INIT_LOG(ERR, "Error when uploading firmware.");
2466 		ret = -EIO;
2467 		goto eth_table_cleanup;
2468 	}
2469 
2470 	/* Now the symbol table should be there */
2471 	sym_tbl = nfp_rtsym_table_read(cpp);
2472 	if (sym_tbl == NULL) {
2473 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table.");
2474 		ret = -EIO;
2475 		goto fw_cleanup;
2476 	}
2477 
2478 	pf_dev->sym_tbl = sym_tbl;
2479 
2480 	/* Read the app ID of the firmware loaded */
2481 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2482 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2483 	if (ret != 0) {
2484 		PMD_INIT_LOG(ERR, "Could not read %s from firmware.", app_name);
2485 		ret = -EIO;
2486 		goto sym_tbl_cleanup;
2487 	}
2488 
2489 	pf_dev->app_fw_id = app_fw_id;
2490 
2491 	/* Write sp_indiff to hw_info */
2492 	ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp, app_fw_id);
2493 	if (ret != 0) {
2494 		PMD_INIT_LOG(ERR, "Failed to set hwinfo.");
2495 		ret = -EIO;
2496 		goto sym_tbl_cleanup;
2497 	}
2498 
2499 	ret = nfp_net_speed_cap_get(pf_dev);
2500 	if (ret != 0) {
2501 		PMD_INIT_LOG(ERR, "Failed to get speed capability.");
2502 		ret = -EIO;
2503 		goto sym_tbl_cleanup;
2504 	}
2505 
2506 	/* Get the VF info */
2507 	ret = nfp_net_get_vf_info(pf_dev, dev_info);
2508 	if (ret != 0) {
2509 		PMD_INIT_LOG(ERR, "Failed to get VF info.");
2510 		ret = -EIO;
2511 		goto sym_tbl_cleanup;
2512 	}
2513 
2514 	/* Configure access to tx/rx vNIC BARs */
2515 	addr = nfp_qcp_queue_offset(dev_info, 0);
2516 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
2517 
2518 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
2519 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
2520 	if (pf_dev->qc_bar == NULL) {
2521 		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for net.qc.");
2522 		ret = -EIO;
2523 		goto sym_tbl_cleanup;
2524 	}
2525 
2526 	PMD_INIT_LOG(DEBUG, "The qc_bar address: %p.", pf_dev->qc_bar);
2527 
2528 	pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats",
2529 			NFP_MAC_STATS_SIZE * nfp_eth_table->max_index,
2530 			&pf_dev->mac_stats_area);
2531 	if (pf_dev->mac_stats_bar == NULL) {
2532 		PMD_INIT_LOG(ERR, "The nfp_rtsym_map fails for _mac_stats.");
2533 		goto hwqueues_cleanup;
2534 	}
2535 
2536 	ret = nfp_enable_multi_pf(pf_dev);
2537 	if (ret != 0)
2538 		goto mac_stats_cleanup;
2539 
2540 	ret = nfp_net_vf_config_init(pf_dev);
2541 	if (ret != 0) {
2542 		PMD_INIT_LOG(ERR, "Failed to init VF config.");
2543 		goto vf_cfg_tbl_cleanup;
2544 	}
2545 
2546 	hw_priv->is_pf = true;
2547 
2548 	if (!nfp_net_recv_pkt_meta_check_register(hw_priv)) {
2549 		PMD_INIT_LOG(ERR, "PF register meta check function failed.");
2550 		ret = -EIO;
2551 		goto hw_priv_free;
2552 	}
2553 
2554 	/*
2555 	 * PF initialization has been done at this point. Call app specific
2556 	 * init code now.
2557 	 */
2558 	ret = nfp_fw_app_primary_init(hw_priv);
2559 	if (ret != 0) {
2560 		PMD_INIT_LOG(ERR, "Failed to init hw app primary.");
2561 		goto vf_cfg_tbl_cleanup;
2562 	}
2563 
2564 	/* Register the CPP bridge service here for primary use */
2565 	if (pf_dev->devargs.cpp_service_enable) {
2566 		ret = nfp_enable_cpp_service(pf_dev);
2567 		if (ret != 0) {
2568 			PMD_INIT_LOG(ERR, "Enable CPP service failed.");
2569 			goto vf_cfg_tbl_cleanup;
2570 		}
2571 	}
2572 
2573 	return 0;
2574 
2575 vf_cfg_tbl_cleanup:
2576 	nfp_net_vf_config_uninit(pf_dev);
2577 mac_stats_cleanup:
2578 	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
2579 hwqueues_cleanup:
2580 	nfp_cpp_area_release_free(pf_dev->qc_area);
2581 sym_tbl_cleanup:
2582 	free(sym_tbl);
2583 fw_cleanup:
2584 	nfp_fw_unload(cpp);
2585 	if (pf_dev->multi_pf.enabled) {
2586 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
2587 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
2588 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
2589 	}
2590 eth_table_cleanup:
2591 	free(nfp_eth_table);
2592 hwinfo_cleanup:
2593 	free(hwinfo);
2594 cpp_cleanup:
2595 	nfp_cpp_free(cpp);
2596 sync_free:
2597 	nfp_sync_free(sync);
2598 pf_cleanup:
2599 	rte_free(pf_dev);
2600 hw_priv_free:
2601 	rte_free(hw_priv);
2602 
2603 	return ret;
2604 }
2605 
2606 static int
2607 nfp_secondary_net_init(struct rte_eth_dev *eth_dev,
2608 		void *para)
2609 {
2610 	struct nfp_net_hw_priv *hw_priv;
2611 
2612 	hw_priv = para;
2613 	nfp_net_ethdev_ops_mount(hw_priv->pf_dev, eth_dev);
2614 
2615 	eth_dev->process_private = para;
2616 
2617 	return 0;
2618 }
2619 
2620 static int
2621 nfp_secondary_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
2622 {
2623 	uint32_t i;
2624 	int ret = 0;
2625 	uint32_t total_vnics;
2626 	char port_name[RTE_ETH_NAME_MAX_LEN];
2627 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
2628 
2629 	total_vnics = nfp_net_get_phyports_from_fw(pf_dev);
2630 
2631 	for (i = 0; i < total_vnics; i++) {
2632 		nfp_port_name_generate(port_name, sizeof(port_name), i, pf_dev);
2633 
2634 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s.", port_name);
2635 		ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name, 0,
2636 				NULL, NULL, nfp_secondary_net_init, hw_priv);
2637 		if (ret != 0) {
2638 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed.", port_name);
2639 			goto port_cleanup;
2640 		}
2641 	}
2642 
2643 	return 0;
2644 
2645 port_cleanup:
2646 	for (uint32_t j = 0; j < i; j++) {
2647 		struct rte_eth_dev *eth_dev;
2648 
2649 		nfp_port_name_generate(port_name, sizeof(port_name), j, pf_dev);
2650 		eth_dev = rte_eth_dev_get_by_name(port_name);
2651 		if (eth_dev != NULL)
2652 			rte_eth_dev_destroy(eth_dev, NULL);
2653 	}
2654 
2655 	return ret;
2656 }
2657 
2658 static int
2659 nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv)
2660 {
2661 	int ret;
2662 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
2663 
2664 	switch (pf_dev->app_fw_id) {
2665 	case NFP_APP_FW_CORE_NIC:
2666 		PMD_INIT_LOG(INFO, "Initializing coreNIC.");
2667 		ret = nfp_secondary_init_app_fw_nic(hw_priv);
2668 		if (ret != 0) {
2669 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2670 			return ret;
2671 		}
2672 		break;
2673 	case NFP_APP_FW_FLOWER_NIC:
2674 		PMD_INIT_LOG(INFO, "Initializing Flower.");
2675 		ret = nfp_secondary_init_app_fw_flower(hw_priv);
2676 		if (ret != 0) {
2677 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2678 			return ret;
2679 		}
2680 		break;
2681 	default:
2682 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded.");
2683 		ret = -EINVAL;
2684 		return ret;
2685 	}
2686 
2687 	return 0;
2688 }
2689 
2690 /*
2691  * When attaching to the NFP4000/6000 PF on a secondary process there
2692  * is no need to initialise the PF again. Only minimal work is required
2693  * here.
2694  */
2695 static int
2696 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
2697 {
2698 	void *sync;
2699 	int ret = 0;
2700 	struct nfp_cpp *cpp;
2701 	uint8_t function_id;
2702 	struct nfp_pf_dev *pf_dev;
2703 	enum nfp_app_fw_id app_fw_id;
2704 	char name[RTE_ETH_NAME_MAX_LEN];
2705 	struct nfp_rtsym_table *sym_tbl;
2706 	struct nfp_net_hw_priv *hw_priv;
2707 	const struct nfp_dev_info *dev_info;
2708 	char app_name[RTE_ETH_NAME_MAX_LEN];
2709 
2710 	if (pci_dev == NULL)
2711 		return -ENODEV;
2712 
2713 	if (pci_dev->mem_resource[0].addr == NULL) {
2714 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2715 		return -ENODEV;
2716 	}
2717 
2718 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2719 	if (dev_info == NULL) {
2720 		PMD_INIT_LOG(ERR, "Not supported device ID.");
2721 		return -ENODEV;
2722 	}
2723 
2724 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
2725 	if (hw_priv == NULL) {
2726 		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data.");
2727 		return -ENOMEM;
2728 	}
2729 
2730 	/* Allocate memory for the PF "device" */
2731 	function_id = pci_dev->addr.function & 0x7;
2732 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
2733 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2734 	if (pf_dev == NULL) {
2735 		PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device.");
2736 		ret = -ENOMEM;
2737 		goto hw_priv_free;
2738 	}
2739 
2740 	hw_priv->pf_dev = pf_dev;
2741 	hw_priv->dev_info = dev_info;
2742 
2743 	sync = nfp_sync_alloc();
2744 	if (sync == NULL) {
2745 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2746 		ret = -ENOMEM;
2747 		goto pf_cleanup;
2748 	}
2749 
2750 	pf_dev->sync = sync;
2751 
2752 	/*
2753 	 * When device bound to UIO, the device could be used, by mistake,
2754 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2755 	 * could lead to a serious problem when configuring the NFP CPP
2756 	 * interface. Here we avoid this telling to the CPP init code to
2757 	 * use a lock file if UIO is being used.
2758 	 */
2759 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2760 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2761 	else
2762 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2763 
2764 	if (cpp == NULL) {
2765 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained.");
2766 		ret = -EIO;
2767 		goto sync_free;
2768 	}
2769 
2770 	pf_dev->cpp = cpp;
2771 	pf_dev->pci_dev = pci_dev;
2772 
2773 	/*
2774 	 * We don't have access to the PF created in the primary process
2775 	 * here so we have to read the number of ports from firmware.
2776 	 */
2777 	sym_tbl = nfp_rtsym_table_read(cpp);
2778 	if (sym_tbl == NULL) {
2779 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table.");
2780 		ret = -EIO;
2781 		goto cpp_cleanup;
2782 	}
2783 
2784 	pf_dev->sym_tbl = sym_tbl;
2785 
2786 	/* Read the number of physical ports from firmware */
2787 	pf_dev->multi_pf.function_id = function_id;
2788 	pf_dev->total_phyports = nfp_net_get_phyports_from_fw(pf_dev);
2789 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_fw(pf_dev->total_phyports);
2790 
2791 	/* Read the app ID of the firmware loaded */
2792 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2793 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2794 	if (ret != 0) {
2795 		PMD_INIT_LOG(ERR, "Could not read %s from fw.", app_name);
2796 		ret = -EIO;
2797 		goto sym_tbl_cleanup;
2798 	}
2799 
2800 	pf_dev->app_fw_id = app_fw_id;
2801 
2802 	hw_priv->is_pf = true;
2803 
2804 	/* Call app specific init code now */
2805 	ret = nfp_fw_app_secondary_init(hw_priv);
2806 	if (ret != 0) {
2807 		PMD_INIT_LOG(ERR, "Failed to init hw app primary.");
2808 		goto sym_tbl_cleanup;
2809 	}
2810 
2811 	return 0;
2812 
2813 sym_tbl_cleanup:
2814 	free(sym_tbl);
2815 cpp_cleanup:
2816 	nfp_cpp_free(cpp);
2817 sync_free:
2818 	nfp_sync_free(sync);
2819 pf_cleanup:
2820 	rte_free(pf_dev);
2821 hw_priv_free:
2822 	rte_free(hw_priv);
2823 
2824 	return ret;
2825 }
2826 
2827 static int
2828 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2829 		struct rte_pci_device *dev)
2830 {
2831 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2832 		return nfp_pf_init(dev);
2833 	else
2834 		return nfp_pf_secondary_init(dev);
2835 }
2836 
2837 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2838 	{
2839 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2840 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2841 	},
2842 	{
2843 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2844 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2845 	},
2846 	{
2847 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2848 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2849 	},
2850 	{
2851 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2852 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2853 	},
2854 	{
2855 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2856 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2857 	},
2858 	{
2859 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2860 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2861 	},
2862 	{
2863 		.vendor_id = 0,
2864 	},
2865 };
2866 
2867 static int
2868 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
2869 {
2870 	uint16_t port_id;
2871 	struct rte_pci_device *pci_dev;
2872 
2873 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2874 
2875 	/* Free up all physical ports under PF */
2876 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2877 		rte_eth_dev_close(port_id);
2878 	/*
2879 	 * Ports can be closed and freed but hotplugging is not
2880 	 * currently supported.
2881 	 */
2882 	return -ENOTSUP;
2883 }
2884 
2885 static int
2886 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2887 {
2888 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
2889 }
2890 
2891 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2892 	.id_table = pci_id_nfp_pf_net_map,
2893 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2894 	.probe = nfp_pf_pci_probe,
2895 	.remove = eth_nfp_pci_remove,
2896 };
2897 
2898 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd);
2899 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map);
2900 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
2901 RTE_PMD_REGISTER_PARAM_STRING(NFP_PF_DRIVER_NAME,
2902 		NFP_PF_FORCE_RELOAD_FW "=<0|1>"
2903 		NFP_CPP_SERVICE_ENABLE "=<0|1>");
2904