xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <unistd.h>
9 
10 #include <eal_firmware.h>
11 #include <rte_alarm.h>
12 #include <rte_kvargs.h>
13 
14 #include "flower/nfp_flower.h"
15 #include "nfd3/nfp_nfd3.h"
16 #include "nfdk/nfp_nfdk.h"
17 #include "nfpcore/nfp_cpp.h"
18 #include "nfpcore/nfp_elf.h"
19 #include "nfpcore/nfp_hwinfo.h"
20 #include "nfpcore/nfp_rtsym.h"
21 #include "nfpcore/nfp_nsp.h"
22 #include "nfpcore/nfp6000_pcie.h"
23 #include "nfpcore/nfp_resource.h"
24 #include "nfpcore/nfp_sync.h"
25 
26 #include "nfp_cpp_bridge.h"
27 #include "nfp_ipsec.h"
28 #include "nfp_logs.h"
29 #include "nfp_net_flow.h"
30 
31 /* 64-bit per app capabilities */
32 #define NFP_NET_APP_CAP_SP_INDIFF       RTE_BIT64(0) /* Indifferent to port speed */
33 
34 #define NFP_PF_DRIVER_NAME net_nfp_pf
35 #define NFP_PF_FORCE_RELOAD_FW   "force_reload_fw"
36 
37 static int
38 nfp_devarg_handle_int(const char *key,
39 		const char *value,
40 		void *extra_args)
41 {
42 	char *end_ptr;
43 	uint64_t *num = extra_args;
44 
45 	if (value == NULL)
46 		return -EPERM;
47 
48 	*num = strtoul(value, &end_ptr, 10);
49 	if (*num == ULONG_MAX) {
50 		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param", key, value);
51 		return -ERANGE;
52 	} else if (value == end_ptr) {
53 		return -EPERM;
54 	}
55 
56 	return 0;
57 }
58 
59 static void
60 nfp_devarg_parse_force_reload_fw(struct rte_kvargs *kvlist,
61 		bool *force_reload_fw)
62 {
63 	int ret;
64 	uint64_t value;
65 
66 
67 	if (rte_kvargs_count(kvlist, NFP_PF_FORCE_RELOAD_FW) != 1)
68 		return;
69 
70 	ret = rte_kvargs_process(kvlist, NFP_PF_FORCE_RELOAD_FW, &nfp_devarg_handle_int, &value);
71 	if (ret != 0)
72 		return;
73 
74 	if (value == 1)
75 		*force_reload_fw = true;
76 	else if (value == 0)
77 		*force_reload_fw = false;
78 	else
79 		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1",
80 				NFP_PF_FORCE_RELOAD_FW);
81 }
82 
83 static void
84 nfp_devargs_parse(struct nfp_devargs *nfp_devargs_param,
85 		const struct rte_devargs *devargs)
86 {
87 	struct rte_kvargs *kvlist;
88 
89 	if (devargs == NULL)
90 		return;
91 
92 	kvlist = rte_kvargs_parse(devargs->args, NULL);
93 	if (kvlist == NULL)
94 		return;
95 
96 	nfp_devarg_parse_force_reload_fw(kvlist, &nfp_devargs_param->force_reload_fw);
97 
98 	rte_kvargs_free(kvlist);
99 }
100 
101 static void
102 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
103 		uint16_t port)
104 {
105 	struct nfp_net_hw *hw;
106 	struct nfp_eth_table *nfp_eth_table;
107 
108 	/* Grab a pointer to the correct physical port */
109 	hw = app_fw_nic->ports[port];
110 
111 	nfp_eth_table = app_fw_nic->pf_dev->nfp_eth_table;
112 
113 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
114 }
115 
116 static uint32_t
117 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap)
118 {
119 	switch (speeds_bitmap) {
120 	case RTE_ETH_LINK_SPEED_10M_HD:
121 		return RTE_ETH_SPEED_NUM_10M;
122 	case RTE_ETH_LINK_SPEED_10M:
123 		return RTE_ETH_SPEED_NUM_10M;
124 	case RTE_ETH_LINK_SPEED_100M_HD:
125 		return RTE_ETH_SPEED_NUM_100M;
126 	case RTE_ETH_LINK_SPEED_100M:
127 		return RTE_ETH_SPEED_NUM_100M;
128 	case RTE_ETH_LINK_SPEED_1G:
129 		return RTE_ETH_SPEED_NUM_1G;
130 	case RTE_ETH_LINK_SPEED_2_5G:
131 		return RTE_ETH_SPEED_NUM_2_5G;
132 	case RTE_ETH_LINK_SPEED_5G:
133 		return RTE_ETH_SPEED_NUM_5G;
134 	case RTE_ETH_LINK_SPEED_10G:
135 		return RTE_ETH_SPEED_NUM_10G;
136 	case RTE_ETH_LINK_SPEED_20G:
137 		return RTE_ETH_SPEED_NUM_20G;
138 	case RTE_ETH_LINK_SPEED_25G:
139 		return RTE_ETH_SPEED_NUM_25G;
140 	case RTE_ETH_LINK_SPEED_40G:
141 		return RTE_ETH_SPEED_NUM_40G;
142 	case RTE_ETH_LINK_SPEED_50G:
143 		return RTE_ETH_SPEED_NUM_50G;
144 	case RTE_ETH_LINK_SPEED_56G:
145 		return RTE_ETH_SPEED_NUM_56G;
146 	case RTE_ETH_LINK_SPEED_100G:
147 		return RTE_ETH_SPEED_NUM_100G;
148 	case RTE_ETH_LINK_SPEED_200G:
149 		return RTE_ETH_SPEED_NUM_200G;
150 	case RTE_ETH_LINK_SPEED_400G:
151 		return RTE_ETH_SPEED_NUM_400G;
152 	default:
153 		return RTE_ETH_SPEED_NUM_NONE;
154 	}
155 }
156 
157 static int
158 nfp_net_nfp4000_speed_configure_check(uint16_t port_id,
159 		uint32_t configure_speed,
160 		struct nfp_eth_table *nfp_eth_table)
161 {
162 	switch (port_id) {
163 	case 0:
164 		if (configure_speed == RTE_ETH_SPEED_NUM_25G &&
165 				nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) {
166 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
167 			return -ENOTSUP;
168 		}
169 		break;
170 	case 1:
171 		if (configure_speed == RTE_ETH_SPEED_NUM_10G &&
172 				nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) {
173 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
174 			return -ENOTSUP;
175 		}
176 		break;
177 	default:
178 		PMD_DRV_LOG(ERR, "The port id is invalid.");
179 		return -EINVAL;
180 	}
181 
182 	return 0;
183 }
184 
185 static int
186 nfp_net_speed_configure(struct rte_eth_dev *dev,
187 		struct nfp_net_hw *net_hw)
188 {
189 	int ret;
190 	uint32_t speed_capa;
191 	struct nfp_nsp *nsp;
192 	uint32_t link_speeds;
193 	uint32_t configure_speed;
194 	struct nfp_eth_table_port *eth_port;
195 	struct nfp_eth_table *nfp_eth_table;
196 
197 	nfp_eth_table = net_hw->pf_dev->nfp_eth_table;
198 	eth_port = &nfp_eth_table->ports[net_hw->idx];
199 
200 	speed_capa = net_hw->pf_dev->speed_capa;
201 	if (speed_capa == 0) {
202 		PMD_DRV_LOG(ERR, "Speed_capa is invalid.");
203 		return -EINVAL;
204 	}
205 
206 	link_speeds = dev->data->dev_conf.link_speeds;
207 	configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds);
208 	if (configure_speed == RTE_ETH_SPEED_NUM_NONE &&
209 			link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
210 		PMD_DRV_LOG(ERR, "Configured speed is invalid.");
211 		return -EINVAL;
212 	}
213 
214 	/* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */
215 	if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) {
216 		ret = nfp_net_nfp4000_speed_configure_check(net_hw->idx,
217 				configure_speed, nfp_eth_table);
218 		if (ret != 0) {
219 			PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000.");
220 			return ret;
221 		}
222 	}
223 
224 	nsp = nfp_eth_config_start(net_hw->cpp, eth_port->index);
225 	if (nsp == NULL) {
226 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
227 		return -EIO;
228 	}
229 
230 	if (link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
231 		if (eth_port->supp_aneg) {
232 			ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO);
233 			if (ret != 0) {
234 				PMD_DRV_LOG(ERR, "Failed to set ANEG enable.");
235 				goto config_cleanup;
236 			}
237 		}
238 	} else {
239 		ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED);
240 		if (ret != 0) {
241 			PMD_DRV_LOG(ERR, "Failed to set ANEG disable.");
242 			goto config_cleanup;
243 		}
244 
245 		ret = nfp_eth_set_speed(nsp, configure_speed);
246 		if (ret != 0) {
247 			PMD_DRV_LOG(ERR, "Failed to set speed.");
248 			goto config_cleanup;
249 		}
250 	}
251 
252 	return nfp_eth_config_commit_end(nsp);
253 
254 config_cleanup:
255 	nfp_eth_config_cleanup_end(nsp);
256 
257 	return ret;
258 }
259 
260 static int
261 nfp_net_start(struct rte_eth_dev *dev)
262 {
263 	int ret;
264 	uint16_t i;
265 	struct nfp_hw *hw;
266 	uint32_t new_ctrl;
267 	struct nfp_cpp *cpp;
268 	uint32_t update = 0;
269 	uint32_t cap_extend;
270 	uint32_t intr_vector;
271 	uint32_t ctrl_extend = 0;
272 	struct nfp_net_hw *net_hw;
273 	struct nfp_pf_dev *pf_dev;
274 	struct rte_eth_rxmode *rxmode;
275 	struct nfp_app_fw_nic *app_fw_nic;
276 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
277 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
278 
279 	net_hw = dev->data->dev_private;
280 	pf_dev = net_hw->pf_dev;
281 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
282 	hw = &net_hw->super;
283 
284 	/* Disabling queues just in case... */
285 	nfp_net_disable_queues(dev);
286 
287 	/* Enabling the required queues in the device */
288 	nfp_net_enable_queues(dev);
289 
290 	/* Configure the port speed and the auto-negotiation mode. */
291 	ret = nfp_net_speed_configure(dev, net_hw);
292 	if (ret < 0) {
293 		PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode.");
294 		return ret;
295 	}
296 
297 	/* Check and configure queue intr-vector mapping */
298 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
299 		if (app_fw_nic->multiport) {
300 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
301 					"with NFP multiport PF");
302 				return -EINVAL;
303 		}
304 
305 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
306 			/*
307 			 * Better not to share LSC with RX interrupts.
308 			 * Unregistering LSC interrupt handler.
309 			 */
310 			rte_intr_callback_unregister(intr_handle,
311 					nfp_net_dev_interrupt_handler, (void *)dev);
312 
313 			if (dev->data->nb_rx_queues > 1) {
314 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
315 						"supports 1 queue with UIO");
316 				return -EIO;
317 			}
318 		}
319 
320 		intr_vector = dev->data->nb_rx_queues;
321 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
322 			return -1;
323 
324 		nfp_configure_rx_interrupt(dev, intr_handle);
325 		update = NFP_NET_CFG_UPDATE_MSIX;
326 	}
327 
328 	/* Checking MTU set */
329 	if (dev->data->mtu > net_hw->flbufsz) {
330 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
331 				dev->data->mtu, net_hw->flbufsz);
332 		return -ERANGE;
333 	}
334 
335 	rte_intr_enable(intr_handle);
336 
337 	new_ctrl = nfp_check_offloads(dev);
338 
339 	/* Writing configuration parameters in the device */
340 	nfp_net_params_setup(net_hw);
341 
342 	rxmode = &dev->data->dev_conf.rxmode;
343 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
344 		nfp_net_rss_config_default(dev);
345 		update |= NFP_NET_CFG_UPDATE_RSS;
346 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
347 	}
348 
349 	/* Enable device */
350 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
351 
352 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
353 
354 	/* Enable vxlan */
355 	if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) {
356 		new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
357 		update |= NFP_NET_CFG_UPDATE_VXLAN;
358 	}
359 
360 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
361 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
362 
363 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
364 		new_ctrl |= NFP_NET_CFG_CTRL_TXRWB;
365 
366 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
367 		return -EIO;
368 
369 	hw->ctrl = new_ctrl;
370 
371 	/* Enable packet type offload by extend ctrl word1. */
372 	cap_extend = hw->cap_ext;
373 	if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
374 		ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE;
375 
376 	if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0)
377 		ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC |
378 				NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP |
379 				NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
380 
381 	/* Enable flow steer by extend ctrl word1. */
382 	if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
383 		ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER;
384 
385 	update = NFP_NET_CFG_UPDATE_GEN;
386 	if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0)
387 		return -EIO;
388 
389 	hw->ctrl_ext = ctrl_extend;
390 
391 	/*
392 	 * Allocating rte mbufs for configured rx queues.
393 	 * This requires queues being enabled before.
394 	 */
395 	if (nfp_net_rx_freelist_setup(dev) != 0) {
396 		ret = -ENOMEM;
397 		goto error;
398 	}
399 
400 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
401 		cpp = net_hw->cpp;
402 	else
403 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
404 
405 	/* Configure the physical port up */
406 	nfp_eth_set_configured(cpp, net_hw->nfp_idx, 1);
407 
408 	for (i = 0; i < dev->data->nb_rx_queues; i++)
409 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
410 	for (i = 0; i < dev->data->nb_tx_queues; i++)
411 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
412 
413 	return 0;
414 
415 error:
416 	/*
417 	 * An error returned by this function should mean the app
418 	 * exiting and then the system releasing all the memory
419 	 * allocated even memory coming from hugepages.
420 	 *
421 	 * The device could be enabled at this point with some queues
422 	 * ready for getting packets. This is true if the call to
423 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
424 	 * fails for subsequent queues.
425 	 *
426 	 * This should make the app exiting but better if we tell the
427 	 * device first.
428 	 */
429 	nfp_net_disable_queues(dev);
430 
431 	return ret;
432 }
433 
434 /* Set the link up. */
435 static int
436 nfp_net_set_link_up(struct rte_eth_dev *dev)
437 {
438 	struct nfp_cpp *cpp;
439 	struct nfp_net_hw *hw;
440 
441 	hw = dev->data->dev_private;
442 
443 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
444 		cpp = hw->cpp;
445 	else
446 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
447 
448 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 1);
449 }
450 
451 /* Set the link down. */
452 static int
453 nfp_net_set_link_down(struct rte_eth_dev *dev)
454 {
455 	struct nfp_cpp *cpp;
456 	struct nfp_net_hw *hw;
457 
458 	hw = dev->data->dev_private;
459 
460 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
461 		cpp = hw->cpp;
462 	else
463 		cpp = ((struct nfp_pf_dev *)(dev->process_private))->cpp;
464 
465 	return nfp_eth_set_configured(cpp, hw->nfp_idx, 0);
466 }
467 
468 static uint8_t
469 nfp_function_id_get(const struct nfp_pf_dev *pf_dev,
470 		uint8_t phy_port)
471 {
472 	if (pf_dev->multi_pf.enabled)
473 		return pf_dev->multi_pf.function_id;
474 
475 	return phy_port;
476 }
477 
478 static void
479 nfp_net_beat_timer(void *arg)
480 {
481 	uint64_t cur_sec;
482 	struct nfp_multi_pf *multi_pf = arg;
483 
484 	cur_sec = rte_rdtsc();
485 	nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id));
486 
487 	/* Beat once per second. */
488 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
489 			(void *)multi_pf) < 0) {
490 		PMD_DRV_LOG(ERR, "Error setting alarm");
491 	}
492 }
493 
494 static int
495 nfp_net_keepalive_init(struct nfp_cpp *cpp,
496 		struct nfp_multi_pf *multi_pf)
497 {
498 	uint8_t *base;
499 	uint64_t addr;
500 	uint32_t size;
501 	uint32_t cpp_id;
502 	struct nfp_resource *res;
503 
504 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE);
505 	if (res == NULL)
506 		return -EIO;
507 
508 	cpp_id = nfp_resource_cpp_id(res);
509 	addr = nfp_resource_address(res);
510 	size = nfp_resource_size(res);
511 
512 	nfp_resource_release(res);
513 
514 	/* Allocate a fixed area for keepalive. */
515 	base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area);
516 	if (base == NULL) {
517 		PMD_DRV_LOG(ERR, "Failed to map area for keepalive.");
518 		return -EIO;
519 	}
520 
521 	multi_pf->beat_addr = base;
522 
523 	return 0;
524 }
525 
526 static void
527 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf)
528 {
529 	nfp_cpp_area_release_free(multi_pf->beat_area);
530 }
531 
532 static int
533 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf)
534 {
535 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
536 			(void *)multi_pf) < 0) {
537 		PMD_DRV_LOG(ERR, "Error setting alarm");
538 		return -EIO;
539 	}
540 
541 	return 0;
542 }
543 
544 static void
545 nfp_net_keepalive_clear(uint8_t *beat_addr,
546 		uint8_t function_id)
547 {
548 	nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id));
549 }
550 
551 static void
552 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info,
553 		struct nfp_multi_pf *multi_pf)
554 {
555 	uint8_t port_num;
556 
557 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
558 		if (port_num == multi_pf->function_id)
559 			continue;
560 
561 		nfp_net_keepalive_clear(multi_pf->beat_addr, port_num);
562 	}
563 }
564 
565 static void
566 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf)
567 {
568 	/* Cancel keepalive for multiple PF setup */
569 	rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf);
570 }
571 
572 static void
573 nfp_net_uninit(struct rte_eth_dev *eth_dev)
574 {
575 	struct nfp_net_hw *net_hw;
576 
577 	net_hw = eth_dev->data->dev_private;
578 
579 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
580 		nfp_net_flow_priv_uninit(net_hw->pf_dev, net_hw->idx);
581 
582 	rte_free(net_hw->eth_xstats_base);
583 	if ((net_hw->super.cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
584 		nfp_net_txrwb_free(eth_dev);
585 	nfp_ipsec_uninit(eth_dev);
586 	if (net_hw->mac_stats_area != NULL)
587 		nfp_cpp_area_release_free(net_hw->mac_stats_area);
588 }
589 
590 static void
591 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev,
592 		uint8_t id)
593 {
594 	struct rte_eth_dev *eth_dev;
595 	struct nfp_app_fw_nic *app_fw_nic;
596 
597 	app_fw_nic = pf_dev->app_fw_priv;
598 	if (app_fw_nic->ports[id] != NULL) {
599 		eth_dev = app_fw_nic->ports[id]->eth_dev;
600 		if (eth_dev != NULL)
601 			nfp_net_uninit(eth_dev);
602 
603 		app_fw_nic->ports[id] = NULL;
604 	}
605 }
606 
607 static void
608 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
609 {
610 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
611 	rte_free(pf_dev->app_fw_priv);
612 }
613 
614 void
615 nfp_pf_uninit(struct nfp_pf_dev *pf_dev)
616 {
617 	nfp_cpp_area_release_free(pf_dev->qc_area);
618 	free(pf_dev->sym_tbl);
619 	if (pf_dev->multi_pf.enabled) {
620 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
621 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
622 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
623 	}
624 	free(pf_dev->nfp_eth_table);
625 	free(pf_dev->hwinfo);
626 	nfp_cpp_free(pf_dev->cpp);
627 	nfp_sync_free(pf_dev->sync);
628 	rte_free(pf_dev);
629 }
630 
631 static int
632 nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev)
633 {
634 	free(pf_dev->sym_tbl);
635 	nfp_cpp_free(pf_dev->cpp);
636 	nfp_sync_free(pf_dev->sync);
637 	rte_free(pf_dev);
638 
639 	return 0;
640 }
641 
642 /* Reset and stop device. The device can not be restarted. */
643 static int
644 nfp_net_close(struct rte_eth_dev *dev)
645 {
646 	uint8_t i;
647 	uint8_t id;
648 	struct nfp_net_hw *hw;
649 	struct nfp_pf_dev *pf_dev;
650 	struct rte_pci_device *pci_dev;
651 	struct nfp_app_fw_nic *app_fw_nic;
652 
653 	/*
654 	 * In secondary process, a released eth device can be found by its name
655 	 * in shared memory.
656 	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
657 	 * eth device has been released.
658 	 */
659 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
660 		if (dev->state == RTE_ETH_DEV_UNUSED)
661 			return 0;
662 
663 		nfp_pf_secondary_uninit(dev->process_private);
664 		return 0;
665 	}
666 
667 	hw = dev->data->dev_private;
668 	pf_dev = hw->pf_dev;
669 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
670 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
671 
672 	/*
673 	 * We assume that the DPDK application is stopping all the
674 	 * threads/queues before calling the device close function.
675 	 */
676 	nfp_net_disable_queues(dev);
677 
678 	/* Clear queues */
679 	nfp_net_close_tx_queue(dev);
680 	nfp_net_close_rx_queue(dev);
681 
682 	/* Cancel possible impending LSC work here before releasing the port */
683 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
684 
685 	/* Only free PF resources after all physical ports have been closed */
686 	/* Mark this port as unused and free device priv resources */
687 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
688 
689 	if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC)
690 		return -EINVAL;
691 
692 	nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx);
693 
694 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
695 		id = nfp_function_id_get(pf_dev, i);
696 
697 		/* Check to see if ports are still in use */
698 		if (app_fw_nic->ports[id] != NULL)
699 			return 0;
700 	}
701 
702 	/* Enable in nfp_net_start() */
703 	rte_intr_disable(pci_dev->intr_handle);
704 
705 	/* Register in nfp_net_init() */
706 	rte_intr_callback_unregister(pci_dev->intr_handle,
707 			nfp_net_dev_interrupt_handler, (void *)dev);
708 
709 	nfp_uninit_app_fw_nic(pf_dev);
710 	nfp_pf_uninit(pf_dev);
711 
712 	return 0;
713 }
714 
715 static int
716 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
717 		uint16_t port,
718 		uint32_t *idx)
719 {
720 	uint32_t i;
721 	int free_idx = -1;
722 
723 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
724 		if (hw->vxlan_ports[i] == port) {
725 			free_idx = i;
726 			break;
727 		}
728 
729 		if (hw->vxlan_usecnt[i] == 0) {
730 			free_idx = i;
731 			break;
732 		}
733 	}
734 
735 	if (free_idx == -1)
736 		return -EINVAL;
737 
738 	*idx = free_idx;
739 
740 	return 0;
741 }
742 
743 static int
744 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
745 		struct rte_eth_udp_tunnel *tunnel_udp)
746 {
747 	int ret;
748 	uint32_t idx;
749 	uint16_t vxlan_port;
750 	struct nfp_net_hw *hw;
751 	enum rte_eth_tunnel_type tnl_type;
752 
753 	hw = dev->data->dev_private;
754 	vxlan_port = tunnel_udp->udp_port;
755 	tnl_type   = tunnel_udp->prot_type;
756 
757 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
758 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
759 		return -ENOTSUP;
760 	}
761 
762 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
763 	if (ret != 0) {
764 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
765 		return -EINVAL;
766 	}
767 
768 	if (hw->vxlan_usecnt[idx] == 0) {
769 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
770 		if (ret != 0) {
771 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
772 			return -EINVAL;
773 		}
774 	}
775 
776 	hw->vxlan_usecnt[idx]++;
777 
778 	return 0;
779 }
780 
781 static int
782 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
783 		struct rte_eth_udp_tunnel *tunnel_udp)
784 {
785 	int ret;
786 	uint32_t idx;
787 	uint16_t vxlan_port;
788 	struct nfp_net_hw *hw;
789 	enum rte_eth_tunnel_type tnl_type;
790 
791 	hw = dev->data->dev_private;
792 	vxlan_port = tunnel_udp->udp_port;
793 	tnl_type   = tunnel_udp->prot_type;
794 
795 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
796 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
797 		return -ENOTSUP;
798 	}
799 
800 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
801 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
802 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
803 		return -EINVAL;
804 	}
805 
806 	hw->vxlan_usecnt[idx]--;
807 
808 	if (hw->vxlan_usecnt[idx] == 0) {
809 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
810 		if (ret != 0) {
811 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
812 			return -EINVAL;
813 		}
814 	}
815 
816 	return 0;
817 }
818 
819 /* Initialise and register driver with DPDK Application */
820 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
821 	.dev_configure          = nfp_net_configure,
822 	.dev_start              = nfp_net_start,
823 	.dev_stop               = nfp_net_stop,
824 	.dev_set_link_up        = nfp_net_set_link_up,
825 	.dev_set_link_down      = nfp_net_set_link_down,
826 	.dev_close              = nfp_net_close,
827 	.promiscuous_enable     = nfp_net_promisc_enable,
828 	.promiscuous_disable    = nfp_net_promisc_disable,
829 	.allmulticast_enable    = nfp_net_allmulticast_enable,
830 	.allmulticast_disable   = nfp_net_allmulticast_disable,
831 	.link_update            = nfp_net_link_update,
832 	.stats_get              = nfp_net_stats_get,
833 	.stats_reset            = nfp_net_stats_reset,
834 	.xstats_get             = nfp_net_xstats_get,
835 	.xstats_reset           = nfp_net_xstats_reset,
836 	.xstats_get_names       = nfp_net_xstats_get_names,
837 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
838 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
839 	.dev_infos_get          = nfp_net_infos_get,
840 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
841 	.mtu_set                = nfp_net_dev_mtu_set,
842 	.mac_addr_set           = nfp_net_set_mac_addr,
843 	.vlan_offload_set       = nfp_net_vlan_offload_set,
844 	.reta_update            = nfp_net_reta_update,
845 	.reta_query             = nfp_net_reta_query,
846 	.rss_hash_update        = nfp_net_rss_hash_update,
847 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
848 	.rx_queue_setup         = nfp_net_rx_queue_setup,
849 	.rx_queue_release       = nfp_net_rx_queue_release,
850 	.tx_queue_setup         = nfp_net_tx_queue_setup,
851 	.tx_queue_release       = nfp_net_tx_queue_release,
852 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
853 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
854 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
855 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
856 	.fw_version_get         = nfp_net_firmware_version_get,
857 	.flow_ctrl_get          = nfp_net_flow_ctrl_get,
858 	.flow_ctrl_set          = nfp_net_flow_ctrl_set,
859 	.flow_ops_get           = nfp_net_flow_ops_get,
860 	.fec_get_capability     = nfp_net_fec_get_capability,
861 	.fec_get                = nfp_net_fec_get,
862 	.fec_set                = nfp_net_fec_set,
863 };
864 
865 static inline void
866 nfp_net_ethdev_ops_mount(struct nfp_net_hw *hw,
867 		struct rte_eth_dev *eth_dev)
868 {
869 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
870 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
871 	else
872 		eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts;
873 
874 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
875 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
876 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
877 }
878 
879 static int
880 nfp_net_init(struct rte_eth_dev *eth_dev)
881 {
882 	int err;
883 	uint16_t port;
884 	uint64_t rx_base;
885 	uint64_t tx_base;
886 	struct nfp_hw *hw;
887 	struct nfp_net_hw *net_hw;
888 	struct nfp_pf_dev *pf_dev;
889 	struct rte_pci_device *pci_dev;
890 	struct nfp_app_fw_nic *app_fw_nic;
891 
892 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
893 	net_hw = eth_dev->data->dev_private;
894 
895 	/* Use backpointer here to the PF of this eth_dev */
896 	pf_dev = net_hw->pf_dev;
897 
898 	/* Use backpointer to the CoreNIC app struct */
899 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
900 
901 	port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx;
902 	if (port > 7) {
903 		PMD_DRV_LOG(ERR, "Port value is wrong");
904 		return -ENODEV;
905 	}
906 
907 	hw = &net_hw->super;
908 
909 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
910 			"NFP internal port number: %d", port, net_hw->nfp_idx);
911 
912 	rte_eth_copy_pci_info(eth_dev, pci_dev);
913 
914 	if (port == 0 || pf_dev->multi_pf.enabled) {
915 		uint32_t min_size;
916 
917 		hw->ctrl_bar = pf_dev->ctrl_bar;
918 		min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index;
919 		net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats",
920 				min_size, &net_hw->mac_stats_area);
921 		if (net_hw->mac_stats_bar == NULL) {
922 			PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
923 			return -EIO;
924 		}
925 
926 		net_hw->mac_stats = net_hw->mac_stats_bar;
927 	} else {
928 		/* Use port offset in pf ctrl_bar for this ports control bar */
929 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
930 		net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar +
931 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
932 	}
933 
934 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
935 	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
936 
937 	err = nfp_net_common_init(pci_dev, net_hw);
938 	if (err != 0)
939 		goto free_area;
940 
941 	err = nfp_net_tlv_caps_parse(eth_dev);
942 	if (err != 0) {
943 		PMD_INIT_LOG(ERR, "Failed to parser TLV caps");
944 		goto free_area;
945 	}
946 
947 	err = nfp_ipsec_init(eth_dev);
948 	if (err != 0) {
949 		PMD_INIT_LOG(ERR, "Failed to init IPsec module");
950 		goto free_area;
951 	}
952 
953 	nfp_net_ethdev_ops_mount(net_hw, eth_dev);
954 
955 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
956 			nfp_net_xstats_size(eth_dev), 0);
957 	if (net_hw->eth_xstats_base == NULL) {
958 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
959 				pci_dev->device.name);
960 		err = -ENOMEM;
961 		goto ipsec_exit;
962 	}
963 
964 	/* Work out where in the BAR the queues start. */
965 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
966 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
967 
968 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
969 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
970 	eth_dev->data->dev_private = net_hw;
971 
972 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
973 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
974 
975 	nfp_net_cfg_queue_setup(net_hw);
976 	net_hw->mtu = RTE_ETHER_MTU;
977 
978 	/* VLAN insertion is incompatible with LSOv2 */
979 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
980 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
981 
982 	nfp_net_log_device_information(net_hw);
983 
984 	/* Initializing spinlock for reconfigs */
985 	rte_spinlock_init(&hw->reconfig_lock);
986 
987 	/* Allocating memory for mac addr */
988 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
989 	if (eth_dev->data->mac_addrs == NULL) {
990 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
991 		err = -ENOMEM;
992 		goto xstats_free;
993 	}
994 
995 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0) {
996 		err = nfp_net_txrwb_alloc(eth_dev);
997 		if (err != 0)
998 			goto xstats_free;
999 	}
1000 
1001 	nfp_net_pf_read_mac(app_fw_nic, port);
1002 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1003 
1004 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
1005 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
1006 		/* Using random mac addresses for VFs */
1007 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
1008 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1009 	}
1010 
1011 	/* Copying mac address to DPDK eth_dev struct */
1012 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
1013 
1014 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
1015 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
1016 
1017 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1018 
1019 	PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x "
1020 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
1021 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1022 			pci_dev->id.device_id,
1023 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
1024 
1025 	/* Registering LSC interrupt handler */
1026 	rte_intr_callback_register(pci_dev->intr_handle,
1027 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
1028 	/* Telling the firmware about the LSC interrupt entry */
1029 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1030 	/* Unmask the LSC interrupt */
1031 	nfp_net_irq_unmask(eth_dev);
1032 	/* Recording current stats counters values */
1033 	nfp_net_stats_reset(eth_dev);
1034 
1035 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) {
1036 		err = nfp_net_flow_priv_init(pf_dev, port);
1037 		if (err != 0) {
1038 			PMD_INIT_LOG(ERR, "Init net flow priv failed");
1039 			goto txrwb_free;
1040 		}
1041 	}
1042 
1043 	return 0;
1044 
1045 txrwb_free:
1046 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
1047 		nfp_net_txrwb_free(eth_dev);
1048 xstats_free:
1049 	rte_free(net_hw->eth_xstats_base);
1050 ipsec_exit:
1051 	nfp_ipsec_uninit(eth_dev);
1052 free_area:
1053 	if (net_hw->mac_stats_area != NULL)
1054 		nfp_cpp_area_release_free(net_hw->mac_stats_area);
1055 
1056 	return err;
1057 }
1058 
1059 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
1060 
1061 static int
1062 nfp_fw_get_name(struct rte_pci_device *dev,
1063 		struct nfp_nsp *nsp,
1064 		char *card,
1065 		char *fw_name,
1066 		size_t fw_size)
1067 {
1068 	char serial[40];
1069 	uint16_t interface;
1070 	uint32_t cpp_serial_len;
1071 	const uint8_t *cpp_serial;
1072 	struct nfp_cpp *cpp = nfp_nsp_cpp(nsp);
1073 
1074 	cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial);
1075 	if (cpp_serial_len != NFP_SERIAL_LEN)
1076 		return -ERANGE;
1077 
1078 	interface = nfp_cpp_interface(cpp);
1079 
1080 	/* Looking for firmware file in order of priority */
1081 
1082 	/* First try to find a firmware image specific for this device */
1083 	snprintf(serial, sizeof(serial),
1084 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
1085 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
1086 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
1087 	snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
1088 
1089 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1090 	if (access(fw_name, F_OK) == 0)
1091 		return 0;
1092 
1093 	/* Then try the PCI name */
1094 	snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH,
1095 			dev->name);
1096 
1097 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1098 	if (access(fw_name, F_OK) == 0)
1099 		return 0;
1100 
1101 	/* Finally try the card type and media */
1102 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card);
1103 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1104 	if (access(fw_name, F_OK) == 0)
1105 		return 0;
1106 
1107 	return -ENOENT;
1108 }
1109 
1110 static int
1111 nfp_fw_upload(struct nfp_nsp *nsp,
1112 		char *fw_name)
1113 {
1114 	int err;
1115 	void *fw_buf;
1116 	size_t fsize;
1117 
1118 	err = rte_firmware_read(fw_name, &fw_buf, &fsize);
1119 	if (err != 0) {
1120 		PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name);
1121 		return -ENOENT;
1122 	}
1123 
1124 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
1125 			fw_name, fsize);
1126 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
1127 	if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) {
1128 		free(fw_buf);
1129 		PMD_DRV_LOG(ERR, "Firmware load failed.");
1130 		return -EIO;
1131 	}
1132 
1133 	PMD_DRV_LOG(INFO, "Done");
1134 
1135 	free(fw_buf);
1136 
1137 	return 0;
1138 }
1139 
1140 static void
1141 nfp_fw_unload(struct nfp_cpp *cpp)
1142 {
1143 	struct nfp_nsp *nsp;
1144 
1145 	nsp = nfp_nsp_open(cpp);
1146 	if (nsp == NULL)
1147 		return;
1148 
1149 	nfp_nsp_device_soft_reset(nsp);
1150 	nfp_nsp_close(nsp);
1151 }
1152 
1153 static int
1154 nfp_fw_check_change(struct nfp_cpp *cpp,
1155 		char *fw_name,
1156 		bool *fw_changed)
1157 {
1158 	int ret;
1159 	struct nfp_net_hw hw;
1160 	uint32_t new_version = 0;
1161 	uint32_t old_version = 0;
1162 
1163 	ret = nfp_elf_get_fw_version(&new_version, fw_name);
1164 	if (ret != 0)
1165 		return ret;
1166 
1167 	hw.cpp = cpp;
1168 	nfp_net_get_fw_version(&hw, &old_version);
1169 
1170 	if (new_version != old_version) {
1171 		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u",
1172 				new_version, old_version);
1173 		*fw_changed = true;
1174 	} else {
1175 		PMD_DRV_LOG(INFO, "FW version is not changed and is %u", new_version);
1176 		*fw_changed = false;
1177 	}
1178 
1179 	return 0;
1180 }
1181 
1182 static int
1183 nfp_fw_reload(struct nfp_nsp *nsp,
1184 		char *fw_name)
1185 {
1186 	int err;
1187 
1188 	nfp_nsp_device_soft_reset(nsp);
1189 	err = nfp_fw_upload(nsp, fw_name);
1190 	if (err != 0)
1191 		PMD_DRV_LOG(ERR, "NFP firmware load failed");
1192 
1193 	return err;
1194 }
1195 
1196 static bool
1197 nfp_fw_skip_load(const struct nfp_dev_info *dev_info,
1198 		struct nfp_multi_pf *multi_pf,
1199 		bool *reload_fw)
1200 {
1201 	uint8_t i;
1202 	uint64_t tmp_beat;
1203 	uint32_t port_num;
1204 	uint8_t in_use = 0;
1205 	uint64_t beat[dev_info->pf_num_per_unit];
1206 	uint32_t offset[dev_info->pf_num_per_unit];
1207 	uint8_t abnormal = dev_info->pf_num_per_unit;
1208 
1209 	sleep(1);
1210 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1211 		if (port_num == multi_pf->function_id) {
1212 			abnormal--;
1213 			continue;
1214 		}
1215 
1216 		offset[port_num] = NFP_BEAT_OFFSET(port_num);
1217 		beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]);
1218 		if (beat[port_num] == 0)
1219 			abnormal--;
1220 	}
1221 
1222 	if (abnormal == 0)
1223 		return true;
1224 
1225 	for (i = 0; i < 3; i++) {
1226 		sleep(1);
1227 		for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1228 			if (port_num == multi_pf->function_id)
1229 				continue;
1230 
1231 			if (beat[port_num] == 0)
1232 				continue;
1233 
1234 			tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]);
1235 			if (tmp_beat != beat[port_num]) {
1236 				in_use++;
1237 				abnormal--;
1238 				beat[port_num] = 0;
1239 				if (*reload_fw) {
1240 					*reload_fw = false;
1241 					PMD_DRV_LOG(ERR, "The param %s does not work",
1242 							NFP_PF_FORCE_RELOAD_FW);
1243 				}
1244 			}
1245 		}
1246 
1247 		if (abnormal == 0)
1248 			return true;
1249 	}
1250 
1251 	if (in_use != 0) {
1252 		PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.",
1253 				abnormal);
1254 		return true;
1255 	}
1256 
1257 	return false;
1258 }
1259 static int
1260 nfp_fw_reload_for_single_pf(struct nfp_nsp *nsp,
1261 		char *fw_name,
1262 		struct nfp_cpp *cpp,
1263 		bool force_reload_fw)
1264 {
1265 	int ret;
1266 	bool fw_changed = true;
1267 
1268 	if (nfp_nsp_fw_loaded(nsp) && !force_reload_fw) {
1269 		ret = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1270 		if (ret != 0)
1271 			return ret;
1272 	}
1273 
1274 	if (!fw_changed)
1275 		return 0;
1276 
1277 	ret = nfp_fw_reload(nsp, fw_name);
1278 	if (ret != 0)
1279 		return ret;
1280 
1281 	return 0;
1282 }
1283 
1284 static int
1285 nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp,
1286 		char *fw_name,
1287 		struct nfp_cpp *cpp,
1288 		const struct nfp_dev_info *dev_info,
1289 		struct nfp_multi_pf *multi_pf,
1290 		bool force_reload_fw)
1291 {
1292 	int err;
1293 	bool fw_changed = true;
1294 	bool skip_load_fw = false;
1295 	bool reload_fw = force_reload_fw;
1296 
1297 	err = nfp_net_keepalive_init(cpp, multi_pf);
1298 	if (err != 0) {
1299 		PMD_DRV_LOG(ERR, "NFP init beat failed");
1300 		return err;
1301 	}
1302 
1303 	err = nfp_net_keepalive_start(multi_pf);
1304 	if (err != 0) {
1305 		PMD_DRV_LOG(ERR, "NFP write beat failed");
1306 		goto keepalive_uninit;
1307 	}
1308 
1309 	if (nfp_nsp_fw_loaded(nsp) && !reload_fw) {
1310 		err = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1311 		if (err != 0)
1312 			goto keepalive_stop;
1313 	}
1314 
1315 	if (!fw_changed || reload_fw)
1316 		skip_load_fw = nfp_fw_skip_load(dev_info, multi_pf, &reload_fw);
1317 
1318 	if (skip_load_fw && !reload_fw)
1319 		return 0;
1320 
1321 	err = nfp_fw_reload(nsp, fw_name);
1322 	if (err != 0)
1323 		goto keepalive_stop;
1324 
1325 	nfp_net_keepalive_clear_others(dev_info, multi_pf);
1326 
1327 	return 0;
1328 
1329 keepalive_stop:
1330 	nfp_net_keepalive_stop(multi_pf);
1331 keepalive_uninit:
1332 	nfp_net_keepalive_uninit(multi_pf);
1333 
1334 	return err;
1335 }
1336 
1337 static int
1338 nfp_fw_setup(struct rte_pci_device *dev,
1339 		struct nfp_cpp *cpp,
1340 		struct nfp_eth_table *nfp_eth_table,
1341 		struct nfp_hwinfo *hwinfo,
1342 		const struct nfp_dev_info *dev_info,
1343 		struct nfp_multi_pf *multi_pf,
1344 		bool force_reload_fw)
1345 {
1346 	int err;
1347 	char fw_name[125];
1348 	char card_desc[100];
1349 	struct nfp_nsp *nsp;
1350 	const char *nfp_fw_model;
1351 
1352 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
1353 	if (nfp_fw_model == NULL)
1354 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
1355 
1356 	if (nfp_fw_model != NULL) {
1357 		PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
1358 	} else {
1359 		PMD_DRV_LOG(ERR, "firmware model NOT found");
1360 		return -EIO;
1361 	}
1362 
1363 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
1364 		PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
1365 				nfp_eth_table->count);
1366 		return -EIO;
1367 	}
1368 
1369 	PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
1370 			nfp_eth_table->count);
1371 
1372 	PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
1373 
1374 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
1375 			nfp_fw_model, nfp_eth_table->count,
1376 			nfp_eth_table->ports[0].speed / 1000);
1377 
1378 	nsp = nfp_nsp_open(cpp);
1379 	if (nsp == NULL) {
1380 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1381 		return -EIO;
1382 	}
1383 
1384 	err = nfp_fw_get_name(dev, nsp, card_desc, fw_name, sizeof(fw_name));
1385 	if (err != 0) {
1386 		PMD_DRV_LOG(ERR, "Can't find suitable firmware.");
1387 		nfp_nsp_close(nsp);
1388 		return err;
1389 	}
1390 
1391 	if (multi_pf->enabled)
1392 		err = nfp_fw_reload_for_multi_pf(nsp, fw_name, cpp, dev_info, multi_pf,
1393 				force_reload_fw);
1394 	else
1395 		err = nfp_fw_reload_for_single_pf(nsp, fw_name, cpp, force_reload_fw);
1396 
1397 	nfp_nsp_close(nsp);
1398 	return err;
1399 }
1400 
1401 static inline bool
1402 nfp_check_multi_pf_from_fw(uint32_t total_vnics)
1403 {
1404 	if (total_vnics == 1)
1405 		return true;
1406 
1407 	return false;
1408 }
1409 
1410 static inline bool
1411 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev,
1412 		struct nfp_cpp *cpp)
1413 {
1414 	bool flag;
1415 	struct nfp_nsp *nsp;
1416 
1417 	nsp = nfp_nsp_open(cpp);
1418 	if (nsp == NULL) {
1419 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1420 		return false;
1421 	}
1422 
1423 	flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) &&
1424 			(pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC);
1425 
1426 	nfp_nsp_close(nsp);
1427 	return flag;
1428 }
1429 
1430 static int
1431 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
1432 {
1433 	int err = 0;
1434 	uint64_t tx_base;
1435 	uint8_t *ctrl_bar;
1436 	struct nfp_hw *hw;
1437 	uint32_t cap_extend;
1438 	struct nfp_net_hw net_hw;
1439 	struct nfp_cpp_area *area;
1440 	char name[RTE_ETH_NAME_MAX_LEN];
1441 
1442 	memset(&net_hw, 0, sizeof(struct nfp_net_hw));
1443 
1444 	/* Map the symbol table */
1445 	snprintf(name, sizeof(name), "_pf%u_net_bar0",
1446 			pf_dev->multi_pf.function_id);
1447 	ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, NFP_NET_CFG_BAR_SZ,
1448 			&area);
1449 	if (ctrl_bar == NULL) {
1450 		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol");
1451 		return -ENODEV;
1452 	}
1453 
1454 	hw = &net_hw.super;
1455 	hw->ctrl_bar = ctrl_bar;
1456 
1457 	cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
1458 	if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
1459 		PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF");
1460 		err = -EINVAL;
1461 		goto end;
1462 	}
1463 
1464 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1465 	net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1466 	nfp_net_cfg_queue_setup(&net_hw);
1467 	rte_spinlock_init(&hw->reconfig_lock);
1468 	nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN);
1469 end:
1470 	nfp_cpp_area_release_free(area);
1471 	return err;
1472 }
1473 
1474 static int
1475 nfp_init_app_fw_nic(struct nfp_pf_dev *pf_dev,
1476 		const struct nfp_dev_info *dev_info)
1477 {
1478 	uint8_t i;
1479 	uint8_t id;
1480 	int ret = 0;
1481 	uint32_t total_vnics;
1482 	struct nfp_net_hw *hw;
1483 	unsigned int numa_node;
1484 	struct rte_eth_dev *eth_dev;
1485 	struct nfp_app_fw_nic *app_fw_nic;
1486 	struct nfp_eth_table *nfp_eth_table;
1487 	char bar_name[RTE_ETH_NAME_MAX_LEN];
1488 	char port_name[RTE_ETH_NAME_MAX_LEN];
1489 	char vnic_name[RTE_ETH_NAME_MAX_LEN];
1490 
1491 	nfp_eth_table = pf_dev->nfp_eth_table;
1492 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
1493 	id = nfp_function_id_get(pf_dev, 0);
1494 
1495 	/* Allocate memory for the CoreNIC app */
1496 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
1497 	if (app_fw_nic == NULL)
1498 		return -ENOMEM;
1499 
1500 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
1501 	pf_dev->app_fw_priv = app_fw_nic;
1502 
1503 	/* Read the number of vNIC's created for the PF */
1504 	snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id);
1505 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret);
1506 	if (ret != 0 || total_vnics == 0 || total_vnics > 8) {
1507 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name);
1508 		ret = -ENODEV;
1509 		goto app_cleanup;
1510 	}
1511 
1512 	if (pf_dev->multi_pf.enabled) {
1513 		if (!nfp_check_multi_pf_from_fw(total_vnics)) {
1514 			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf");
1515 			ret = -ENODEV;
1516 			goto app_cleanup;
1517 		}
1518 	} else {
1519 		/*
1520 		 * For coreNIC the number of vNICs exposed should be the same as the
1521 		 * number of physical ports.
1522 		 */
1523 		if (total_vnics != nfp_eth_table->count) {
1524 			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
1525 			ret = -ENODEV;
1526 			goto app_cleanup;
1527 		}
1528 	}
1529 
1530 	/* Populate coreNIC app properties */
1531 	app_fw_nic->total_phyports = total_vnics;
1532 	app_fw_nic->pf_dev = pf_dev;
1533 	if (total_vnics > 1)
1534 		app_fw_nic->multiport = true;
1535 
1536 	/* Map the symbol table */
1537 	snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id);
1538 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name,
1539 			app_fw_nic->total_phyports * NFP_NET_CFG_BAR_SZ,
1540 			&pf_dev->ctrl_area);
1541 	if (pf_dev->ctrl_bar == NULL) {
1542 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name);
1543 		ret = -EIO;
1544 		goto app_cleanup;
1545 	}
1546 
1547 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
1548 
1549 	/* Loop through all physical ports on PF */
1550 	numa_node = rte_socket_id();
1551 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1552 		if (pf_dev->multi_pf.enabled)
1553 			snprintf(port_name, sizeof(port_name), "%s",
1554 					pf_dev->pci_dev->device.name);
1555 		else
1556 			snprintf(port_name, sizeof(port_name), "%s_port%u",
1557 					pf_dev->pci_dev->device.name, i);
1558 
1559 		/* Allocate a eth_dev for this phyport */
1560 		eth_dev = rte_eth_dev_allocate(port_name);
1561 		if (eth_dev == NULL) {
1562 			ret = -ENODEV;
1563 			goto port_cleanup;
1564 		}
1565 
1566 		/* Allocate memory for this phyport */
1567 		eth_dev->data->dev_private = rte_zmalloc_socket(port_name,
1568 				sizeof(struct nfp_net_hw),
1569 				RTE_CACHE_LINE_SIZE, numa_node);
1570 		if (eth_dev->data->dev_private == NULL) {
1571 			ret = -ENOMEM;
1572 			rte_eth_dev_release_port(eth_dev);
1573 			goto port_cleanup;
1574 		}
1575 
1576 		hw = eth_dev->data->dev_private;
1577 		id = nfp_function_id_get(pf_dev, i);
1578 
1579 		/* Add this device to the PF's array of physical ports */
1580 		app_fw_nic->ports[id] = hw;
1581 
1582 		hw->dev_info = dev_info;
1583 		hw->pf_dev = pf_dev;
1584 		hw->cpp = pf_dev->cpp;
1585 		hw->eth_dev = eth_dev;
1586 		hw->idx = id;
1587 		hw->nfp_idx = nfp_eth_table->ports[id].index;
1588 
1589 		eth_dev->device = &pf_dev->pci_dev->device;
1590 
1591 		/*
1592 		 * Ctrl/tx/rx BAR mappings and remaining init happens in
1593 		 * @nfp_net_init()
1594 		 */
1595 		ret = nfp_net_init(eth_dev);
1596 		if (ret != 0) {
1597 			ret = -ENODEV;
1598 			goto port_cleanup;
1599 		}
1600 
1601 		rte_eth_dev_probing_finish(eth_dev);
1602 
1603 	} /* End loop, all ports on this PF */
1604 
1605 	return 0;
1606 
1607 port_cleanup:
1608 	for (i = 0; i < app_fw_nic->total_phyports; i++) {
1609 		id = nfp_function_id_get(pf_dev, i);
1610 		hw = app_fw_nic->ports[id];
1611 
1612 		if (hw != NULL && hw->eth_dev != NULL) {
1613 			nfp_net_uninit(hw->eth_dev);
1614 			rte_eth_dev_release_port(hw->eth_dev);
1615 		}
1616 	}
1617 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
1618 app_cleanup:
1619 	rte_free(app_fw_nic);
1620 
1621 	return ret;
1622 }
1623 
1624 static int
1625 nfp_net_hwinfo_set(uint8_t function_id,
1626 		struct nfp_rtsym_table *sym_tbl,
1627 		struct nfp_cpp *cpp,
1628 		enum nfp_app_fw_id app_fw_id)
1629 {
1630 	int ret = 0;
1631 	uint64_t app_cap;
1632 	struct nfp_nsp *nsp;
1633 	uint8_t sp_indiff = 1;
1634 	char hw_info[RTE_ETH_NAME_MAX_LEN];
1635 	char app_cap_name[RTE_ETH_NAME_MAX_LEN];
1636 
1637 	if (app_fw_id != NFP_APP_FW_FLOWER_NIC) {
1638 		/* Read the app capabilities of the firmware loaded */
1639 		snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id);
1640 		app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret);
1641 		if (ret != 0) {
1642 			PMD_INIT_LOG(ERR, "Could not read app_fw_cap from firmware.");
1643 			return ret;
1644 		}
1645 
1646 		/* Calculate the value of sp_indiff and write to hw_info */
1647 		sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF;
1648 	}
1649 
1650 	snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff);
1651 
1652 	nsp = nfp_nsp_open(cpp);
1653 	if (nsp == NULL) {
1654 		PMD_INIT_LOG(ERR, "Could not get NSP.");
1655 		return -EIO;
1656 	}
1657 
1658 	ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info));
1659 	nfp_nsp_close(nsp);
1660 	if (ret != 0) {
1661 		PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo.");
1662 		return ret;
1663 	}
1664 
1665 	return 0;
1666 }
1667 
1668 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
1669 	[NFP_MEDIA_W0_RJ45_10M]     = RTE_ETH_LINK_SPEED_10M,
1670 	[NFP_MEDIA_W0_RJ45_10M_HD]  = RTE_ETH_LINK_SPEED_10M_HD,
1671 	[NFP_MEDIA_W0_RJ45_100M]    = RTE_ETH_LINK_SPEED_100M,
1672 	[NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD,
1673 	[NFP_MEDIA_W0_RJ45_1G]      = RTE_ETH_LINK_SPEED_1G,
1674 	[NFP_MEDIA_W0_RJ45_2P5G]    = RTE_ETH_LINK_SPEED_2_5G,
1675 	[NFP_MEDIA_W0_RJ45_5G]      = RTE_ETH_LINK_SPEED_5G,
1676 	[NFP_MEDIA_W0_RJ45_10G]     = RTE_ETH_LINK_SPEED_10G,
1677 	[NFP_MEDIA_1000BASE_CX]     = RTE_ETH_LINK_SPEED_1G,
1678 	[NFP_MEDIA_1000BASE_KX]     = RTE_ETH_LINK_SPEED_1G,
1679 	[NFP_MEDIA_10GBASE_KX4]     = RTE_ETH_LINK_SPEED_10G,
1680 	[NFP_MEDIA_10GBASE_KR]      = RTE_ETH_LINK_SPEED_10G,
1681 	[NFP_MEDIA_10GBASE_CX4]     = RTE_ETH_LINK_SPEED_10G,
1682 	[NFP_MEDIA_10GBASE_CR]      = RTE_ETH_LINK_SPEED_10G,
1683 	[NFP_MEDIA_10GBASE_SR]      = RTE_ETH_LINK_SPEED_10G,
1684 	[NFP_MEDIA_10GBASE_ER]      = RTE_ETH_LINK_SPEED_10G,
1685 	[NFP_MEDIA_25GBASE_KR]      = RTE_ETH_LINK_SPEED_25G,
1686 	[NFP_MEDIA_25GBASE_KR_S]    = RTE_ETH_LINK_SPEED_25G,
1687 	[NFP_MEDIA_25GBASE_CR]      = RTE_ETH_LINK_SPEED_25G,
1688 	[NFP_MEDIA_25GBASE_CR_S]    = RTE_ETH_LINK_SPEED_25G,
1689 	[NFP_MEDIA_25GBASE_SR]      = RTE_ETH_LINK_SPEED_25G,
1690 	[NFP_MEDIA_40GBASE_CR4]     = RTE_ETH_LINK_SPEED_40G,
1691 	[NFP_MEDIA_40GBASE_KR4]     = RTE_ETH_LINK_SPEED_40G,
1692 	[NFP_MEDIA_40GBASE_SR4]     = RTE_ETH_LINK_SPEED_40G,
1693 	[NFP_MEDIA_40GBASE_LR4]     = RTE_ETH_LINK_SPEED_40G,
1694 	[NFP_MEDIA_50GBASE_KR]      = RTE_ETH_LINK_SPEED_50G,
1695 	[NFP_MEDIA_50GBASE_SR]      = RTE_ETH_LINK_SPEED_50G,
1696 	[NFP_MEDIA_50GBASE_CR]      = RTE_ETH_LINK_SPEED_50G,
1697 	[NFP_MEDIA_50GBASE_LR]      = RTE_ETH_LINK_SPEED_50G,
1698 	[NFP_MEDIA_50GBASE_ER]      = RTE_ETH_LINK_SPEED_50G,
1699 	[NFP_MEDIA_50GBASE_FR]      = RTE_ETH_LINK_SPEED_50G,
1700 	[NFP_MEDIA_100GBASE_KR4]    = RTE_ETH_LINK_SPEED_100G,
1701 	[NFP_MEDIA_100GBASE_SR4]    = RTE_ETH_LINK_SPEED_100G,
1702 	[NFP_MEDIA_100GBASE_CR4]    = RTE_ETH_LINK_SPEED_100G,
1703 	[NFP_MEDIA_100GBASE_KP4]    = RTE_ETH_LINK_SPEED_100G,
1704 	[NFP_MEDIA_100GBASE_CR10]   = RTE_ETH_LINK_SPEED_100G,
1705 	[NFP_MEDIA_10GBASE_LR]      = RTE_ETH_LINK_SPEED_10G,
1706 	[NFP_MEDIA_25GBASE_LR]      = RTE_ETH_LINK_SPEED_25G,
1707 	[NFP_MEDIA_25GBASE_ER]      = RTE_ETH_LINK_SPEED_25G
1708 };
1709 
1710 static int
1711 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf,
1712 		struct nfp_pf_dev *pf_dev)
1713 {
1714 	uint32_t i;
1715 	uint32_t j;
1716 	uint32_t offset;
1717 	uint32_t speed_capa = 0;
1718 	uint64_t supported_modes;
1719 
1720 	for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) {
1721 		supported_modes = media_buf->supported_modes[i];
1722 		offset = i * UINT64_BIT;
1723 		for (j = 0; j < UINT64_BIT; j++) {
1724 			if (supported_modes == 0)
1725 				break;
1726 
1727 			if ((supported_modes & 1) != 0) {
1728 				if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) {
1729 					PMD_DRV_LOG(ERR, "Invalid offset of media table.");
1730 					return -EINVAL;
1731 				}
1732 
1733 				speed_capa |= nfp_eth_media_table[j + offset];
1734 			}
1735 
1736 			supported_modes = supported_modes >> 1;
1737 		}
1738 	}
1739 
1740 	pf_dev->speed_capa = speed_capa;
1741 
1742 	return pf_dev->speed_capa == 0 ? -EINVAL : 0;
1743 }
1744 
1745 static int
1746 nfp_net_speed_capa_get(struct nfp_pf_dev *pf_dev,
1747 		uint32_t port_id)
1748 {
1749 	int ret;
1750 	struct nfp_nsp *nsp;
1751 	struct nfp_eth_media_buf media_buf;
1752 
1753 	media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index;
1754 	pf_dev->speed_capa = 0;
1755 
1756 	nsp = nfp_nsp_open(pf_dev->cpp);
1757 	if (nsp == NULL) {
1758 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
1759 		return -EIO;
1760 	}
1761 
1762 	ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf));
1763 	nfp_nsp_close(nsp);
1764 	if (ret != 0) {
1765 		PMD_DRV_LOG(ERR, "Failed to read media.");
1766 		return ret;
1767 	}
1768 
1769 	ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev);
1770 	if (ret < 0) {
1771 		PMD_DRV_LOG(ERR, "Speed capability is invalid.");
1772 		return ret;
1773 	}
1774 
1775 	return 0;
1776 }
1777 
1778 static int
1779 nfp_pf_init(struct rte_pci_device *pci_dev)
1780 {
1781 	void *sync;
1782 	uint32_t i;
1783 	uint32_t id;
1784 	int ret = 0;
1785 	uint64_t addr;
1786 	uint32_t index;
1787 	uint32_t cpp_id;
1788 	uint8_t function_id;
1789 	struct nfp_cpp *cpp;
1790 	struct nfp_pf_dev *pf_dev;
1791 	struct nfp_hwinfo *hwinfo;
1792 	enum nfp_app_fw_id app_fw_id;
1793 	char name[RTE_ETH_NAME_MAX_LEN];
1794 	struct nfp_rtsym_table *sym_tbl;
1795 	char app_name[RTE_ETH_NAME_MAX_LEN];
1796 	struct nfp_eth_table *nfp_eth_table;
1797 	const struct nfp_dev_info *dev_info;
1798 
1799 	if (pci_dev == NULL)
1800 		return -ENODEV;
1801 
1802 	if (pci_dev->mem_resource[0].addr == NULL) {
1803 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
1804 		return -ENODEV;
1805 	}
1806 
1807 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
1808 	if (dev_info == NULL) {
1809 		PMD_INIT_LOG(ERR, "Not supported device ID");
1810 		return -ENODEV;
1811 	}
1812 
1813 	/* Allocate memory for the PF "device" */
1814 	function_id = (pci_dev->addr.function) & 0x07;
1815 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
1816 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
1817 	if (pf_dev == NULL) {
1818 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
1819 		return -ENOMEM;
1820 	}
1821 
1822 	sync = nfp_sync_alloc();
1823 	if (sync == NULL) {
1824 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
1825 		ret = -ENOMEM;
1826 		goto pf_cleanup;
1827 	}
1828 
1829 	/*
1830 	 * When device bound to UIO, the device could be used, by mistake,
1831 	 * by two DPDK apps, and the UIO driver does not avoid it. This
1832 	 * could lead to a serious problem when configuring the NFP CPP
1833 	 * interface. Here we avoid this telling to the CPP init code to
1834 	 * use a lock file if UIO is being used.
1835 	 */
1836 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
1837 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
1838 	else
1839 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
1840 
1841 	if (cpp == NULL) {
1842 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
1843 		ret = -EIO;
1844 		goto sync_free;
1845 	}
1846 
1847 	hwinfo = nfp_hwinfo_read(cpp);
1848 	if (hwinfo == NULL) {
1849 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
1850 		ret = -EIO;
1851 		goto cpp_cleanup;
1852 	}
1853 
1854 	/* Read the number of physical ports from hardware */
1855 	nfp_eth_table = nfp_eth_read_ports(cpp);
1856 	if (nfp_eth_table == NULL) {
1857 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
1858 		ret = -EIO;
1859 		goto hwinfo_cleanup;
1860 	}
1861 
1862 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp);
1863 	pf_dev->multi_pf.function_id = function_id;
1864 
1865 	/* Force the physical port down to clear the possible DMA error */
1866 	for (i = 0; i < nfp_eth_table->count; i++) {
1867 		id = nfp_function_id_get(pf_dev, i);
1868 		index = nfp_eth_table->ports[id].index;
1869 		nfp_eth_set_configured(cpp, index, 0);
1870 	}
1871 
1872 	nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs);
1873 
1874 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo,
1875 			dev_info, &pf_dev->multi_pf, pf_dev->devargs.force_reload_fw) != 0) {
1876 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
1877 		ret = -EIO;
1878 		goto eth_table_cleanup;
1879 	}
1880 
1881 	/* Now the symbol table should be there */
1882 	sym_tbl = nfp_rtsym_table_read(cpp);
1883 	if (sym_tbl == NULL) {
1884 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
1885 		ret = -EIO;
1886 		goto fw_cleanup;
1887 	}
1888 
1889 	/* Read the app ID of the firmware loaded */
1890 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
1891 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
1892 	if (ret != 0) {
1893 		PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name);
1894 		ret = -EIO;
1895 		goto sym_tbl_cleanup;
1896 	}
1897 
1898 	/* Write sp_indiff to hw_info */
1899 	ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp, app_fw_id);
1900 	if (ret != 0) {
1901 		PMD_INIT_LOG(ERR, "Failed to set hwinfo.");
1902 		ret = -EIO;
1903 		goto sym_tbl_cleanup;
1904 	}
1905 
1906 	/* Populate the newly created PF device */
1907 	pf_dev->app_fw_id = app_fw_id;
1908 	pf_dev->cpp = cpp;
1909 	pf_dev->hwinfo = hwinfo;
1910 	pf_dev->sym_tbl = sym_tbl;
1911 	pf_dev->pci_dev = pci_dev;
1912 	pf_dev->nfp_eth_table = nfp_eth_table;
1913 	pf_dev->sync = sync;
1914 
1915 	/* Get the speed capability */
1916 	for (i = 0; i < nfp_eth_table->count; i++) {
1917 		id = nfp_function_id_get(pf_dev, i);
1918 		ret = nfp_net_speed_capa_get(pf_dev, id);
1919 		if (ret != 0) {
1920 			PMD_INIT_LOG(ERR, "Failed to get speed capability.");
1921 			ret = -EIO;
1922 			goto sym_tbl_cleanup;
1923 		}
1924 	}
1925 
1926 	/* Configure access to tx/rx vNIC BARs */
1927 	addr = nfp_qcp_queue_offset(dev_info, 0);
1928 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
1929 
1930 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
1931 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
1932 	if (pf_dev->qc_bar == NULL) {
1933 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
1934 		ret = -EIO;
1935 		goto sym_tbl_cleanup;
1936 	}
1937 
1938 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
1939 
1940 	/*
1941 	 * PF initialization has been done at this point. Call app specific
1942 	 * init code now.
1943 	 */
1944 	switch (pf_dev->app_fw_id) {
1945 	case NFP_APP_FW_CORE_NIC:
1946 		if (pf_dev->multi_pf.enabled) {
1947 			ret = nfp_enable_multi_pf(pf_dev);
1948 			if (ret != 0)
1949 				goto hwqueues_cleanup;
1950 		}
1951 
1952 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
1953 		ret = nfp_init_app_fw_nic(pf_dev, dev_info);
1954 		if (ret != 0) {
1955 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
1956 			goto hwqueues_cleanup;
1957 		}
1958 		break;
1959 	case NFP_APP_FW_FLOWER_NIC:
1960 		PMD_INIT_LOG(INFO, "Initializing Flower");
1961 		ret = nfp_init_app_fw_flower(pf_dev, dev_info);
1962 		if (ret != 0) {
1963 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
1964 			goto hwqueues_cleanup;
1965 		}
1966 		break;
1967 	default:
1968 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
1969 		ret = -EINVAL;
1970 		goto hwqueues_cleanup;
1971 	}
1972 
1973 	/* Register the CPP bridge service here for primary use */
1974 	ret = nfp_enable_cpp_service(pf_dev);
1975 	if (ret != 0)
1976 		PMD_INIT_LOG(INFO, "Enable cpp service failed.");
1977 
1978 	return 0;
1979 
1980 hwqueues_cleanup:
1981 	nfp_cpp_area_release_free(pf_dev->qc_area);
1982 sym_tbl_cleanup:
1983 	free(sym_tbl);
1984 fw_cleanup:
1985 	nfp_fw_unload(cpp);
1986 	nfp_net_keepalive_stop(&pf_dev->multi_pf);
1987 	nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
1988 	nfp_net_keepalive_uninit(&pf_dev->multi_pf);
1989 eth_table_cleanup:
1990 	free(nfp_eth_table);
1991 hwinfo_cleanup:
1992 	free(hwinfo);
1993 cpp_cleanup:
1994 	nfp_cpp_free(cpp);
1995 sync_free:
1996 	nfp_sync_free(sync);
1997 pf_cleanup:
1998 	rte_free(pf_dev);
1999 
2000 	return ret;
2001 }
2002 
2003 static int
2004 nfp_secondary_init_app_fw_nic(struct nfp_pf_dev *pf_dev)
2005 {
2006 	uint32_t i;
2007 	int err = 0;
2008 	int ret = 0;
2009 	uint8_t function_id;
2010 	uint32_t total_vnics;
2011 	struct nfp_net_hw *hw;
2012 	char pf_name[RTE_ETH_NAME_MAX_LEN];
2013 
2014 	/* Read the number of vNIC's created for the PF */
2015 	function_id = (pf_dev->pci_dev->addr.function) & 0x07;
2016 	snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id);
2017 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err);
2018 	if (err != 0 || total_vnics == 0 || total_vnics > 8) {
2019 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name);
2020 		return -ENODEV;
2021 	}
2022 
2023 	for (i = 0; i < total_vnics; i++) {
2024 		struct rte_eth_dev *eth_dev;
2025 		char port_name[RTE_ETH_NAME_MAX_LEN];
2026 
2027 		if (nfp_check_multi_pf_from_fw(total_vnics))
2028 			snprintf(port_name, sizeof(port_name), "%s",
2029 					pf_dev->pci_dev->device.name);
2030 		else
2031 			snprintf(port_name, sizeof(port_name), "%s_port%u",
2032 					pf_dev->pci_dev->device.name, i);
2033 
2034 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
2035 		eth_dev = rte_eth_dev_attach_secondary(port_name);
2036 		if (eth_dev == NULL) {
2037 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
2038 			ret = -ENODEV;
2039 			break;
2040 		}
2041 
2042 		eth_dev->process_private = pf_dev;
2043 		hw = eth_dev->data->dev_private;
2044 		nfp_net_ethdev_ops_mount(hw, eth_dev);
2045 
2046 		rte_eth_dev_probing_finish(eth_dev);
2047 	}
2048 
2049 	return ret;
2050 }
2051 
2052 /*
2053  * When attaching to the NFP4000/6000 PF on a secondary process there
2054  * is no need to initialise the PF again. Only minimal work is required
2055  * here.
2056  */
2057 static int
2058 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
2059 {
2060 	void *sync;
2061 	int ret = 0;
2062 	struct nfp_cpp *cpp;
2063 	uint8_t function_id;
2064 	struct nfp_pf_dev *pf_dev;
2065 	enum nfp_app_fw_id app_fw_id;
2066 	char name[RTE_ETH_NAME_MAX_LEN];
2067 	struct nfp_rtsym_table *sym_tbl;
2068 	const struct nfp_dev_info *dev_info;
2069 	char app_name[RTE_ETH_NAME_MAX_LEN];
2070 
2071 	if (pci_dev == NULL)
2072 		return -ENODEV;
2073 
2074 	if (pci_dev->mem_resource[0].addr == NULL) {
2075 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2076 		return -ENODEV;
2077 	}
2078 
2079 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2080 	if (dev_info == NULL) {
2081 		PMD_INIT_LOG(ERR, "Not supported device ID");
2082 		return -ENODEV;
2083 	}
2084 
2085 	/* Allocate memory for the PF "device" */
2086 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
2087 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2088 	if (pf_dev == NULL) {
2089 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
2090 		return -ENOMEM;
2091 	}
2092 
2093 	sync = nfp_sync_alloc();
2094 	if (sync == NULL) {
2095 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2096 		ret = -ENOMEM;
2097 		goto pf_cleanup;
2098 	}
2099 
2100 	/*
2101 	 * When device bound to UIO, the device could be used, by mistake,
2102 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2103 	 * could lead to a serious problem when configuring the NFP CPP
2104 	 * interface. Here we avoid this telling to the CPP init code to
2105 	 * use a lock file if UIO is being used.
2106 	 */
2107 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2108 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2109 	else
2110 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2111 
2112 	if (cpp == NULL) {
2113 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2114 		ret = -EIO;
2115 		goto sync_free;
2116 	}
2117 
2118 	/*
2119 	 * We don't have access to the PF created in the primary process
2120 	 * here so we have to read the number of ports from firmware.
2121 	 */
2122 	sym_tbl = nfp_rtsym_table_read(cpp);
2123 	if (sym_tbl == NULL) {
2124 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
2125 		ret = -EIO;
2126 		goto sync_free;
2127 	}
2128 
2129 	/* Read the app ID of the firmware loaded */
2130 	function_id = pci_dev->addr.function & 0x7;
2131 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2132 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2133 	if (ret != 0) {
2134 		PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name);
2135 		ret = -EIO;
2136 		goto sym_tbl_cleanup;
2137 	}
2138 
2139 	/* Populate the newly created PF device */
2140 	pf_dev->app_fw_id = app_fw_id;
2141 	pf_dev->cpp = cpp;
2142 	pf_dev->sym_tbl = sym_tbl;
2143 	pf_dev->pci_dev = pci_dev;
2144 	pf_dev->sync = sync;
2145 
2146 	/* Call app specific init code now */
2147 	switch (app_fw_id) {
2148 	case NFP_APP_FW_CORE_NIC:
2149 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
2150 		ret = nfp_secondary_init_app_fw_nic(pf_dev);
2151 		if (ret != 0) {
2152 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2153 			goto sym_tbl_cleanup;
2154 		}
2155 		break;
2156 	case NFP_APP_FW_FLOWER_NIC:
2157 		PMD_INIT_LOG(INFO, "Initializing Flower");
2158 		ret = nfp_secondary_init_app_fw_flower(pf_dev);
2159 		if (ret != 0) {
2160 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2161 			goto sym_tbl_cleanup;
2162 		}
2163 		break;
2164 	default:
2165 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
2166 		ret = -EINVAL;
2167 		goto sym_tbl_cleanup;
2168 	}
2169 
2170 	return 0;
2171 
2172 sym_tbl_cleanup:
2173 	free(sym_tbl);
2174 sync_free:
2175 	nfp_sync_free(sync);
2176 pf_cleanup:
2177 	rte_free(pf_dev);
2178 
2179 	return ret;
2180 }
2181 
2182 static int
2183 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2184 		struct rte_pci_device *dev)
2185 {
2186 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2187 		return nfp_pf_init(dev);
2188 	else
2189 		return nfp_pf_secondary_init(dev);
2190 }
2191 
2192 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2193 	{
2194 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2195 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2196 	},
2197 	{
2198 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2199 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2200 	},
2201 	{
2202 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2203 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2204 	},
2205 	{
2206 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2207 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2208 	},
2209 	{
2210 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2211 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2212 	},
2213 	{
2214 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2215 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2216 	},
2217 	{
2218 		.vendor_id = 0,
2219 	},
2220 };
2221 
2222 static int
2223 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
2224 {
2225 	uint16_t port_id;
2226 	struct rte_pci_device *pci_dev;
2227 
2228 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2229 
2230 	/* Free up all physical ports under PF */
2231 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2232 		rte_eth_dev_close(port_id);
2233 	/*
2234 	 * Ports can be closed and freed but hotplugging is not
2235 	 * currently supported.
2236 	 */
2237 	return -ENOTSUP;
2238 }
2239 
2240 static int
2241 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2242 {
2243 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
2244 }
2245 
2246 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2247 	.id_table = pci_id_nfp_pf_net_map,
2248 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2249 	.probe = nfp_pf_pci_probe,
2250 	.remove = eth_nfp_pci_remove,
2251 };
2252 
2253 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd);
2254 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map);
2255 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
2256 RTE_PMD_REGISTER_PARAM_STRING(NFP_PF_DRIVER_NAME, NFP_PF_FORCE_RELOAD_FW "=<0|1>");
2257