xref: /dpdk/drivers/net/nfp/nfp_ethdev.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <unistd.h>
9 
10 #include <eal_firmware.h>
11 #include <rte_alarm.h>
12 #include <rte_kvargs.h>
13 #include <rte_pci.h>
14 
15 #include "flower/nfp_flower.h"
16 #include "nfd3/nfp_nfd3.h"
17 #include "nfdk/nfp_nfdk.h"
18 #include "nfpcore/nfp_cpp.h"
19 #include "nfpcore/nfp_elf.h"
20 #include "nfpcore/nfp_hwinfo.h"
21 #include "nfpcore/nfp_rtsym.h"
22 #include "nfpcore/nfp_nsp.h"
23 #include "nfpcore/nfp6000_pcie.h"
24 #include "nfpcore/nfp_resource.h"
25 #include "nfpcore/nfp_sync.h"
26 
27 #include "nfp_cpp_bridge.h"
28 #include "nfp_ipsec.h"
29 #include "nfp_logs.h"
30 #include "nfp_net_flow.h"
31 #include "nfp_rxtx_vec.h"
32 
33 /* 64-bit per app capabilities */
34 #define NFP_NET_APP_CAP_SP_INDIFF       RTE_BIT64(0) /* Indifferent to port speed */
35 
36 #define NFP_PF_DRIVER_NAME net_nfp_pf
37 #define NFP_PF_FORCE_RELOAD_FW   "force_reload_fw"
38 #define NFP_CPP_SERVICE_ENABLE   "cpp_service_enable"
39 #define NFP_QUEUE_PER_VF     1
40 
41 struct nfp_net_init {
42 	/** Sequential physical port number, only valid for CoreNIC firmware */
43 	uint8_t idx;
44 
45 	/** Internal port number as seen from NFP */
46 	uint8_t nfp_idx;
47 
48 	struct nfp_net_hw_priv *hw_priv;
49 };
50 
51 static int
52 nfp_devarg_handle_int(const char *key,
53 		const char *value,
54 		void *extra_args)
55 {
56 	char *end_ptr;
57 	uint64_t *num = extra_args;
58 
59 	if (value == NULL)
60 		return -EPERM;
61 
62 	*num = strtoul(value, &end_ptr, 10);
63 	if (*num == ULONG_MAX) {
64 		PMD_DRV_LOG(ERR, "%s: '%s' is not a valid param", key, value);
65 		return -ERANGE;
66 	} else if (value == end_ptr) {
67 		return -EPERM;
68 	}
69 
70 	return 0;
71 }
72 
73 static int
74 nfp_devarg_parse_bool_para(struct rte_kvargs *kvlist,
75 		const char *key_match,
76 		bool *value_ret)
77 {
78 	int ret;
79 	uint32_t count;
80 	uint64_t value;
81 
82 	count = rte_kvargs_count(kvlist, key_match);
83 	if (count == 0)
84 		return 0;
85 
86 	if (count > 1) {
87 		PMD_DRV_LOG(ERR, "Too much bool arguments: %s", key_match);
88 		return -EINVAL;
89 	}
90 
91 	ret = rte_kvargs_process(kvlist, key_match, &nfp_devarg_handle_int, &value);
92 	if (ret != 0)
93 		return -EINVAL;
94 
95 	if (value == 1) {
96 		*value_ret = true;
97 	} else if (value == 0) {
98 		*value_ret = false;
99 	} else {
100 		PMD_DRV_LOG(ERR, "The param does not work, the format is %s=0/1",
101 				key_match);
102 		return -EINVAL;
103 	}
104 
105 	return 0;
106 }
107 
108 static int
109 nfp_devargs_parse(struct nfp_devargs *nfp_devargs_param,
110 		const struct rte_devargs *devargs)
111 {
112 	int ret;
113 	struct rte_kvargs *kvlist;
114 
115 	if (devargs == NULL)
116 		return 0;
117 
118 	kvlist = rte_kvargs_parse(devargs->args, NULL);
119 	if (kvlist == NULL)
120 		return -EINVAL;
121 
122 	ret = nfp_devarg_parse_bool_para(kvlist, NFP_PF_FORCE_RELOAD_FW,
123 			&nfp_devargs_param->force_reload_fw);
124 	if (ret != 0)
125 		goto exit;
126 
127 	ret = nfp_devarg_parse_bool_para(kvlist, NFP_CPP_SERVICE_ENABLE,
128 			&nfp_devargs_param->cpp_service_enable);
129 	if (ret != 0)
130 		goto exit;
131 
132 exit:
133 	rte_kvargs_free(kvlist);
134 
135 	return ret;
136 }
137 
138 static void
139 nfp_net_pf_read_mac(struct nfp_app_fw_nic *app_fw_nic,
140 		uint16_t port,
141 		struct nfp_net_hw_priv *hw_priv)
142 {
143 	struct nfp_net_hw *hw;
144 	struct nfp_eth_table *nfp_eth_table;
145 
146 	/* Grab a pointer to the correct physical port */
147 	hw = app_fw_nic->ports[port];
148 
149 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
150 
151 	rte_ether_addr_copy(&nfp_eth_table->ports[port].mac_addr, &hw->super.mac_addr);
152 }
153 
154 static uint32_t
155 nfp_net_speed_bitmap2speed(uint32_t speeds_bitmap)
156 {
157 	switch (speeds_bitmap) {
158 	case RTE_ETH_LINK_SPEED_10M_HD:
159 		return RTE_ETH_SPEED_NUM_10M;
160 	case RTE_ETH_LINK_SPEED_10M:
161 		return RTE_ETH_SPEED_NUM_10M;
162 	case RTE_ETH_LINK_SPEED_100M_HD:
163 		return RTE_ETH_SPEED_NUM_100M;
164 	case RTE_ETH_LINK_SPEED_100M:
165 		return RTE_ETH_SPEED_NUM_100M;
166 	case RTE_ETH_LINK_SPEED_1G:
167 		return RTE_ETH_SPEED_NUM_1G;
168 	case RTE_ETH_LINK_SPEED_2_5G:
169 		return RTE_ETH_SPEED_NUM_2_5G;
170 	case RTE_ETH_LINK_SPEED_5G:
171 		return RTE_ETH_SPEED_NUM_5G;
172 	case RTE_ETH_LINK_SPEED_10G:
173 		return RTE_ETH_SPEED_NUM_10G;
174 	case RTE_ETH_LINK_SPEED_20G:
175 		return RTE_ETH_SPEED_NUM_20G;
176 	case RTE_ETH_LINK_SPEED_25G:
177 		return RTE_ETH_SPEED_NUM_25G;
178 	case RTE_ETH_LINK_SPEED_40G:
179 		return RTE_ETH_SPEED_NUM_40G;
180 	case RTE_ETH_LINK_SPEED_50G:
181 		return RTE_ETH_SPEED_NUM_50G;
182 	case RTE_ETH_LINK_SPEED_56G:
183 		return RTE_ETH_SPEED_NUM_56G;
184 	case RTE_ETH_LINK_SPEED_100G:
185 		return RTE_ETH_SPEED_NUM_100G;
186 	case RTE_ETH_LINK_SPEED_200G:
187 		return RTE_ETH_SPEED_NUM_200G;
188 	case RTE_ETH_LINK_SPEED_400G:
189 		return RTE_ETH_SPEED_NUM_400G;
190 	default:
191 		return RTE_ETH_SPEED_NUM_NONE;
192 	}
193 }
194 
195 static int
196 nfp_net_nfp4000_speed_configure_check(uint16_t port_id,
197 		uint32_t configure_speed,
198 		struct nfp_eth_table *nfp_eth_table)
199 {
200 	switch (port_id) {
201 	case 0:
202 		if (configure_speed == RTE_ETH_SPEED_NUM_25G &&
203 				nfp_eth_table->ports[1].speed == RTE_ETH_SPEED_NUM_10G) {
204 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
205 			return -ENOTSUP;
206 		}
207 		break;
208 	case 1:
209 		if (configure_speed == RTE_ETH_SPEED_NUM_10G &&
210 				nfp_eth_table->ports[0].speed == RTE_ETH_SPEED_NUM_25G) {
211 			PMD_DRV_LOG(ERR, "The speed configuration is not supported for NFP4000.");
212 			return -ENOTSUP;
213 		}
214 		break;
215 	default:
216 		PMD_DRV_LOG(ERR, "The port id is invalid.");
217 		return -EINVAL;
218 	}
219 
220 	return 0;
221 }
222 
223 static int
224 nfp_net_speed_autoneg_set(struct nfp_net_hw_priv *hw_priv,
225 		struct nfp_eth_table_port *eth_port)
226 {
227 	int ret;
228 	struct nfp_nsp *nsp;
229 
230 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
231 	if (nsp == NULL) {
232 		PMD_DRV_LOG(ERR, "Could not get NSP.");
233 		return -EIO;
234 	}
235 
236 	ret = nfp_eth_set_aneg(nsp, NFP_ANEG_AUTO);
237 	if (ret != 0) {
238 		PMD_DRV_LOG(ERR, "Failed to set ANEG enable.");
239 		nfp_eth_config_cleanup_end(nsp);
240 		return ret;
241 	}
242 
243 	return nfp_eth_config_commit_end(nsp);
244 }
245 
246 static int
247 nfp_net_speed_fixed_set(struct nfp_net_hw_priv *hw_priv,
248 		struct nfp_eth_table_port *eth_port,
249 		uint32_t configure_speed)
250 {
251 	int ret;
252 	struct nfp_nsp *nsp;
253 
254 	nsp = nfp_eth_config_start(hw_priv->pf_dev->cpp, eth_port->index);
255 	if (nsp == NULL) {
256 		PMD_DRV_LOG(ERR, "Could not get NSP.");
257 		return -EIO;
258 	}
259 
260 	ret = nfp_eth_set_aneg(nsp, NFP_ANEG_DISABLED);
261 	if (ret != 0) {
262 		PMD_DRV_LOG(ERR, "Failed to set ANEG disable.");
263 		goto config_cleanup;
264 	}
265 
266 	ret = nfp_eth_set_speed(nsp, configure_speed);
267 	if (ret != 0) {
268 		PMD_DRV_LOG(ERR, "Failed to set speed.");
269 		goto config_cleanup;
270 	}
271 
272 	return nfp_eth_config_commit_end(nsp);
273 
274 config_cleanup:
275 	nfp_eth_config_cleanup_end(nsp);
276 
277 	return ret;
278 }
279 
280 static int
281 nfp_net_speed_configure(struct rte_eth_dev *dev)
282 {
283 	int ret;
284 	uint8_t idx;
285 	uint32_t speed_capa;
286 	uint32_t link_speeds;
287 	uint32_t configure_speed;
288 	struct nfp_eth_table_port *eth_port;
289 	struct nfp_eth_table *nfp_eth_table;
290 	struct nfp_net_hw *net_hw = dev->data->dev_private;
291 	struct nfp_net_hw_priv *hw_priv = dev->process_private;
292 
293 	idx = nfp_net_get_idx(dev);
294 	nfp_eth_table = hw_priv->pf_dev->nfp_eth_table;
295 	eth_port = &nfp_eth_table->ports[idx];
296 
297 	speed_capa = hw_priv->pf_dev->speed_capa;
298 	if (speed_capa == 0) {
299 		PMD_DRV_LOG(ERR, "Speed_capa is invalid.");
300 		return -EINVAL;
301 	}
302 
303 	link_speeds = dev->data->dev_conf.link_speeds;
304 	configure_speed = nfp_net_speed_bitmap2speed(speed_capa & link_speeds);
305 	if (configure_speed == RTE_ETH_SPEED_NUM_NONE &&
306 			link_speeds != RTE_ETH_LINK_SPEED_AUTONEG) {
307 		PMD_DRV_LOG(ERR, "Configured speed is invalid.");
308 		return -EINVAL;
309 	}
310 
311 	/* NFP4000 does not allow the port 0 25Gbps and port 1 10Gbps at the same time. */
312 	if (net_hw->device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) {
313 		ret = nfp_net_nfp4000_speed_configure_check(idx,
314 				configure_speed, nfp_eth_table);
315 		if (ret != 0) {
316 			PMD_DRV_LOG(ERR, "Failed to configure speed for NFP4000.");
317 			return ret;
318 		}
319 	}
320 
321 	if (configure_speed == RTE_ETH_LINK_SPEED_AUTONEG) {
322 		if (!eth_port->supp_aneg)
323 			return 0;
324 
325 		if (eth_port->aneg == NFP_ANEG_AUTO)
326 			return 0;
327 
328 		ret = nfp_net_speed_autoneg_set(hw_priv, eth_port);
329 		if (ret != 0) {
330 			PMD_DRV_LOG(ERR, "Failed to set speed autoneg.");
331 			return ret;
332 		}
333 	} else {
334 		if (eth_port->aneg == NFP_ANEG_DISABLED && configure_speed == eth_port->speed)
335 			return 0;
336 
337 		ret = nfp_net_speed_fixed_set(hw_priv, eth_port, configure_speed);
338 		if (ret != 0) {
339 			PMD_DRV_LOG(ERR, "Failed to set speed fixed.");
340 			return ret;
341 		}
342 	}
343 
344 	hw_priv->pf_dev->speed_updated = true;
345 
346 	return 0;
347 }
348 
349 static int
350 nfp_net_start(struct rte_eth_dev *dev)
351 {
352 	int ret;
353 	uint16_t i;
354 	struct nfp_hw *hw;
355 	uint32_t new_ctrl;
356 	uint32_t update = 0;
357 	uint32_t cap_extend;
358 	uint32_t intr_vector;
359 	uint32_t ctrl_extend = 0;
360 	struct nfp_net_hw *net_hw;
361 	struct nfp_pf_dev *pf_dev;
362 	struct rte_eth_rxmode *rxmode;
363 	struct rte_eth_txmode *txmode;
364 	struct nfp_net_hw_priv *hw_priv;
365 	struct nfp_app_fw_nic *app_fw_nic;
366 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
367 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
368 
369 	net_hw = dev->data->dev_private;
370 	hw_priv = dev->process_private;
371 	pf_dev = hw_priv->pf_dev;
372 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
373 	hw = &net_hw->super;
374 
375 	/* Disabling queues just in case... */
376 	nfp_net_disable_queues(dev);
377 
378 	/* Enabling the required queues in the device */
379 	nfp_net_enable_queues(dev);
380 
381 	/* Configure the port speed and the auto-negotiation mode. */
382 	ret = nfp_net_speed_configure(dev);
383 	if (ret < 0) {
384 		PMD_DRV_LOG(ERR, "Failed to set the speed and auto-negotiation mode.");
385 		return ret;
386 	}
387 
388 	/* Check and configure queue intr-vector mapping */
389 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
390 		if (app_fw_nic->multiport) {
391 			PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
392 					"with NFP multiport PF");
393 				return -EINVAL;
394 		}
395 
396 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
397 			/*
398 			 * Better not to share LSC with RX interrupts.
399 			 * Unregistering LSC interrupt handler.
400 			 */
401 			rte_intr_callback_unregister(intr_handle,
402 					nfp_net_dev_interrupt_handler, (void *)dev);
403 
404 			if (dev->data->nb_rx_queues > 1) {
405 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
406 						"supports 1 queue with UIO");
407 				return -EIO;
408 			}
409 		}
410 
411 		intr_vector = dev->data->nb_rx_queues;
412 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
413 			return -1;
414 
415 		nfp_configure_rx_interrupt(dev, intr_handle);
416 		update = NFP_NET_CFG_UPDATE_MSIX;
417 	}
418 
419 	/* Checking MTU set */
420 	if (dev->data->mtu > net_hw->flbufsz) {
421 		PMD_INIT_LOG(ERR, "MTU (%u) can't be larger than the current NFP_FRAME_SIZE (%u)",
422 				dev->data->mtu, net_hw->flbufsz);
423 		return -ERANGE;
424 	}
425 
426 	rte_intr_enable(intr_handle);
427 
428 	new_ctrl = nfp_check_offloads(dev);
429 
430 	/* Writing configuration parameters in the device */
431 	nfp_net_params_setup(net_hw);
432 
433 	rxmode = &dev->data->dev_conf.rxmode;
434 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) != 0) {
435 		nfp_net_rss_config_default(dev);
436 		update |= NFP_NET_CFG_UPDATE_RSS;
437 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
438 	}
439 
440 	/* Enable device */
441 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
442 
443 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
444 
445 	txmode = &dev->data->dev_conf.txmode;
446 	/* Enable vxlan */
447 	if ((txmode->offloads & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
448 		if ((hw->cap & NFP_NET_CFG_CTRL_VXLAN) != 0) {
449 			new_ctrl |= NFP_NET_CFG_CTRL_VXLAN;
450 			update |= NFP_NET_CFG_UPDATE_VXLAN;
451 		}
452 	}
453 
454 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
455 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
456 
457 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
458 		new_ctrl |= NFP_NET_CFG_CTRL_TXRWB;
459 
460 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
461 		return -EIO;
462 
463 	hw->ctrl = new_ctrl;
464 
465 	/* Enable packet type offload by extend ctrl word1. */
466 	cap_extend = hw->cap_ext;
467 	if ((cap_extend & NFP_NET_CFG_CTRL_PKT_TYPE) != 0)
468 		ctrl_extend = NFP_NET_CFG_CTRL_PKT_TYPE;
469 
470 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SECURITY) != 0 ||
471 			(txmode->offloads & RTE_ETH_TX_OFFLOAD_SECURITY) != 0) {
472 		if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) != 0)
473 			ctrl_extend |= NFP_NET_CFG_CTRL_IPSEC |
474 					NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP |
475 					NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP;
476 	}
477 
478 	/* Enable flow steer by extend ctrl word1. */
479 	if ((cap_extend & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
480 		ctrl_extend |= NFP_NET_CFG_CTRL_FLOW_STEER;
481 
482 	update = NFP_NET_CFG_UPDATE_GEN;
483 	if (nfp_ext_reconfig(hw, ctrl_extend, update) != 0)
484 		return -EIO;
485 
486 	hw->ctrl_ext = ctrl_extend;
487 
488 	/*
489 	 * Allocating rte mbufs for configured rx queues.
490 	 * This requires queues being enabled before.
491 	 */
492 	if (nfp_net_rx_freelist_setup(dev) != 0) {
493 		ret = -ENOMEM;
494 		goto error;
495 	}
496 
497 	/* Configure the physical port up */
498 	nfp_eth_set_configured(pf_dev->cpp, net_hw->nfp_idx, 1);
499 
500 	for (i = 0; i < dev->data->nb_rx_queues; i++)
501 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
502 	for (i = 0; i < dev->data->nb_tx_queues; i++)
503 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
504 
505 	return 0;
506 
507 error:
508 	/*
509 	 * An error returned by this function should mean the app
510 	 * exiting and then the system releasing all the memory
511 	 * allocated even memory coming from hugepages.
512 	 *
513 	 * The device could be enabled at this point with some queues
514 	 * ready for getting packets. This is true if the call to
515 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
516 	 * fails for subsequent queues.
517 	 *
518 	 * This should make the app exiting but better if we tell the
519 	 * device first.
520 	 */
521 	nfp_net_disable_queues(dev);
522 
523 	return ret;
524 }
525 
526 /* Set the link up. */
527 static int
528 nfp_net_set_link_up(struct rte_eth_dev *dev)
529 {
530 	struct nfp_net_hw *hw;
531 	struct nfp_net_hw_priv *hw_priv;
532 
533 	hw = dev->data->dev_private;
534 	hw_priv = dev->process_private;
535 
536 	return nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 1);
537 }
538 
539 /* Set the link down. */
540 static int
541 nfp_net_set_link_down(struct rte_eth_dev *dev)
542 {
543 	struct nfp_net_hw *hw;
544 	struct nfp_net_hw_priv *hw_priv;
545 
546 	hw = dev->data->dev_private;
547 	hw_priv = dev->process_private;
548 
549 	return nfp_eth_set_configured(hw_priv->pf_dev->cpp, hw->nfp_idx, 0);
550 }
551 
552 static void
553 nfp_net_beat_timer(void *arg)
554 {
555 	uint64_t cur_sec;
556 	struct nfp_multi_pf *multi_pf = arg;
557 
558 	cur_sec = rte_rdtsc();
559 	nn_writeq(cur_sec, multi_pf->beat_addr + NFP_BEAT_OFFSET(multi_pf->function_id));
560 
561 	/* Beat once per second. */
562 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
563 			(void *)multi_pf) < 0) {
564 		PMD_DRV_LOG(ERR, "Error setting alarm");
565 	}
566 }
567 
568 static int
569 nfp_net_keepalive_init(struct nfp_cpp *cpp,
570 		struct nfp_multi_pf *multi_pf)
571 {
572 	uint8_t *base;
573 	uint64_t addr;
574 	uint32_t size;
575 	uint32_t cpp_id;
576 	struct nfp_resource *res;
577 
578 	res = nfp_resource_acquire(cpp, NFP_RESOURCE_KEEPALIVE);
579 	if (res == NULL)
580 		return -EIO;
581 
582 	cpp_id = nfp_resource_cpp_id(res);
583 	addr = nfp_resource_address(res);
584 	size = nfp_resource_size(res);
585 
586 	nfp_resource_release(res);
587 
588 	/* Allocate a fixed area for keepalive. */
589 	base = nfp_cpp_map_area(cpp, cpp_id, addr, size, &multi_pf->beat_area);
590 	if (base == NULL) {
591 		PMD_DRV_LOG(ERR, "Failed to map area for keepalive.");
592 		return -EIO;
593 	}
594 
595 	multi_pf->beat_addr = base;
596 
597 	return 0;
598 }
599 
600 static void
601 nfp_net_keepalive_uninit(struct nfp_multi_pf *multi_pf)
602 {
603 	nfp_cpp_area_release_free(multi_pf->beat_area);
604 }
605 
606 static int
607 nfp_net_keepalive_start(struct nfp_multi_pf *multi_pf)
608 {
609 	if (rte_eal_alarm_set(1000 * 1000, nfp_net_beat_timer,
610 			(void *)multi_pf) < 0) {
611 		PMD_DRV_LOG(ERR, "Error setting alarm");
612 		return -EIO;
613 	}
614 
615 	return 0;
616 }
617 
618 static void
619 nfp_net_keepalive_clear(uint8_t *beat_addr,
620 		uint8_t function_id)
621 {
622 	nn_writeq(0, beat_addr + NFP_BEAT_OFFSET(function_id));
623 }
624 
625 static void
626 nfp_net_keepalive_clear_others(const struct nfp_dev_info *dev_info,
627 		struct nfp_multi_pf *multi_pf)
628 {
629 	uint8_t port_num;
630 
631 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
632 		if (port_num == multi_pf->function_id)
633 			continue;
634 
635 		nfp_net_keepalive_clear(multi_pf->beat_addr, port_num);
636 	}
637 }
638 
639 static void
640 nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf)
641 {
642 	/* Cancel keepalive for multiple PF setup */
643 	rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf);
644 }
645 
646 static int
647 nfp_net_uninit(struct rte_eth_dev *eth_dev)
648 {
649 	struct nfp_net_hw *net_hw;
650 	struct nfp_net_hw_priv *hw_priv;
651 
652 	net_hw = eth_dev->data->dev_private;
653 	hw_priv = eth_dev->process_private;
654 
655 	if ((net_hw->super.cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0)
656 		nfp_net_flow_priv_uninit(hw_priv->pf_dev, net_hw->idx);
657 
658 	rte_free(net_hw->eth_xstats_base);
659 	if ((net_hw->super.cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
660 		nfp_net_txrwb_free(eth_dev);
661 	nfp_ipsec_uninit(eth_dev);
662 
663 	return 0;
664 }
665 
666 static void
667 nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev,
668 		uint8_t id,
669 		struct rte_eth_dev *eth_dev)
670 {
671 	struct nfp_app_fw_nic *app_fw_nic;
672 
673 	app_fw_nic = pf_dev->app_fw_priv;
674 	if (app_fw_nic->ports[id] != NULL) {
675 		nfp_net_uninit(eth_dev);
676 		app_fw_nic->ports[id] = NULL;
677 	}
678 }
679 
680 static void
681 nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev)
682 {
683 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
684 	rte_free(pf_dev->app_fw_priv);
685 }
686 
687 static void
688 nfp_net_vf_config_uninit(struct nfp_pf_dev *pf_dev)
689 {
690 	if (pf_dev->sriov_vf == 0)
691 		return;
692 
693 	nfp_cpp_area_release_free(pf_dev->vf_cfg_tbl_area);
694 	nfp_cpp_area_release_free(pf_dev->vf_area);
695 }
696 
697 void
698 nfp_pf_uninit(struct nfp_net_hw_priv *hw_priv)
699 {
700 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
701 
702 	if (pf_dev->devargs.cpp_service_enable)
703 		nfp_disable_cpp_service(pf_dev);
704 	nfp_net_vf_config_uninit(pf_dev);
705 	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
706 	nfp_cpp_area_release_free(pf_dev->qc_area);
707 	free(pf_dev->sym_tbl);
708 	if (pf_dev->multi_pf.enabled) {
709 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
710 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
711 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
712 	}
713 	free(pf_dev->nfp_eth_table);
714 	free(pf_dev->hwinfo);
715 	nfp_cpp_free(pf_dev->cpp);
716 	nfp_sync_free(pf_dev->sync);
717 	rte_free(pf_dev);
718 	rte_free(hw_priv);
719 }
720 
721 static int
722 nfp_pf_secondary_uninit(struct nfp_net_hw_priv *hw_priv)
723 {
724 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
725 
726 	free(pf_dev->sym_tbl);
727 	nfp_cpp_free(pf_dev->cpp);
728 	nfp_sync_free(pf_dev->sync);
729 	rte_free(pf_dev);
730 	rte_free(hw_priv);
731 
732 	return 0;
733 }
734 
735 /* Reset and stop device. The device can not be restarted. */
736 static int
737 nfp_net_close(struct rte_eth_dev *dev)
738 {
739 	uint8_t i;
740 	uint8_t id;
741 	struct nfp_net_hw *hw;
742 	struct nfp_pf_dev *pf_dev;
743 	struct rte_pci_device *pci_dev;
744 	struct nfp_net_hw_priv *hw_priv;
745 	struct nfp_app_fw_nic *app_fw_nic;
746 
747 	hw_priv = dev->process_private;
748 
749 	/*
750 	 * In secondary process, a released eth device can be found by its name
751 	 * in shared memory.
752 	 * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the
753 	 * eth device has been released.
754 	 */
755 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
756 		if (dev->state == RTE_ETH_DEV_UNUSED)
757 			return 0;
758 
759 		nfp_pf_secondary_uninit(hw_priv);
760 		return 0;
761 	}
762 
763 	hw = dev->data->dev_private;
764 	pf_dev = hw_priv->pf_dev;
765 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
766 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
767 
768 	/*
769 	 * We assume that the DPDK application is stopping all the
770 	 * threads/queues before calling the device close function.
771 	 */
772 	nfp_net_disable_queues(dev);
773 
774 	/* Clear queues */
775 	nfp_net_close_tx_queue(dev);
776 	nfp_net_close_rx_queue(dev);
777 
778 	/* Cancel possible impending LSC work here before releasing the port */
779 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
780 
781 	/* Only free PF resources after all physical ports have been closed */
782 	/* Mark this port as unused and free device priv resources */
783 	nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff);
784 
785 	if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC)
786 		return -EINVAL;
787 
788 	nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx, dev);
789 
790 	for (i = 0; i < pf_dev->total_phyports; i++) {
791 		id = nfp_function_id_get(pf_dev, i);
792 
793 		/* Check to see if ports are still in use */
794 		if (app_fw_nic->ports[id] != NULL)
795 			return 0;
796 	}
797 
798 	/* Enable in nfp_net_start() */
799 	rte_intr_disable(pci_dev->intr_handle);
800 
801 	/* Register in nfp_net_init() */
802 	rte_intr_callback_unregister(pci_dev->intr_handle,
803 			nfp_net_dev_interrupt_handler, (void *)dev);
804 
805 	nfp_uninit_app_fw_nic(pf_dev);
806 	nfp_pf_uninit(hw_priv);
807 
808 	return 0;
809 }
810 
811 static int
812 nfp_net_find_vxlan_idx(struct nfp_net_hw *hw,
813 		uint16_t port,
814 		uint32_t *idx)
815 {
816 	uint32_t i;
817 	int free_idx = -1;
818 
819 	for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
820 		if (hw->vxlan_ports[i] == port) {
821 			free_idx = i;
822 			break;
823 		}
824 
825 		if (hw->vxlan_usecnt[i] == 0) {
826 			free_idx = i;
827 			break;
828 		}
829 	}
830 
831 	if (free_idx == -1)
832 		return -EINVAL;
833 
834 	*idx = free_idx;
835 
836 	return 0;
837 }
838 
839 static int
840 nfp_udp_tunnel_port_add(struct rte_eth_dev *dev,
841 		struct rte_eth_udp_tunnel *tunnel_udp)
842 {
843 	int ret;
844 	uint32_t idx;
845 	uint16_t vxlan_port;
846 	struct nfp_net_hw *hw;
847 	enum rte_eth_tunnel_type tnl_type;
848 
849 	hw = dev->data->dev_private;
850 	vxlan_port = tunnel_udp->udp_port;
851 	tnl_type   = tunnel_udp->prot_type;
852 
853 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
854 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
855 		return -ENOTSUP;
856 	}
857 
858 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
859 	if (ret != 0) {
860 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
861 		return -EINVAL;
862 	}
863 
864 	if (hw->vxlan_usecnt[idx] == 0) {
865 		ret = nfp_net_set_vxlan_port(hw, idx, vxlan_port);
866 		if (ret != 0) {
867 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
868 			return -EINVAL;
869 		}
870 	}
871 
872 	hw->vxlan_usecnt[idx]++;
873 
874 	return 0;
875 }
876 
877 static int
878 nfp_udp_tunnel_port_del(struct rte_eth_dev *dev,
879 		struct rte_eth_udp_tunnel *tunnel_udp)
880 {
881 	int ret;
882 	uint32_t idx;
883 	uint16_t vxlan_port;
884 	struct nfp_net_hw *hw;
885 	enum rte_eth_tunnel_type tnl_type;
886 
887 	hw = dev->data->dev_private;
888 	vxlan_port = tunnel_udp->udp_port;
889 	tnl_type   = tunnel_udp->prot_type;
890 
891 	if (tnl_type != RTE_ETH_TUNNEL_TYPE_VXLAN) {
892 		PMD_DRV_LOG(ERR, "Not VXLAN tunnel");
893 		return -ENOTSUP;
894 	}
895 
896 	ret = nfp_net_find_vxlan_idx(hw, vxlan_port, &idx);
897 	if (ret != 0 || hw->vxlan_usecnt[idx] == 0) {
898 		PMD_DRV_LOG(ERR, "Failed find valid vxlan idx");
899 		return -EINVAL;
900 	}
901 
902 	hw->vxlan_usecnt[idx]--;
903 
904 	if (hw->vxlan_usecnt[idx] == 0) {
905 		ret = nfp_net_set_vxlan_port(hw, idx, 0);
906 		if (ret != 0) {
907 			PMD_DRV_LOG(ERR, "Failed set vxlan port");
908 			return -EINVAL;
909 		}
910 	}
911 
912 	return 0;
913 }
914 
915 /* Initialise and register driver with DPDK Application */
916 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
917 	.dev_configure          = nfp_net_configure,
918 	.dev_start              = nfp_net_start,
919 	.dev_stop               = nfp_net_stop,
920 	.dev_set_link_up        = nfp_net_set_link_up,
921 	.dev_set_link_down      = nfp_net_set_link_down,
922 	.dev_close              = nfp_net_close,
923 	.promiscuous_enable     = nfp_net_promisc_enable,
924 	.promiscuous_disable    = nfp_net_promisc_disable,
925 	.allmulticast_enable    = nfp_net_allmulticast_enable,
926 	.allmulticast_disable   = nfp_net_allmulticast_disable,
927 	.link_update            = nfp_net_link_update,
928 	.stats_get              = nfp_net_stats_get,
929 	.stats_reset            = nfp_net_stats_reset,
930 	.xstats_get             = nfp_net_xstats_get,
931 	.xstats_reset           = nfp_net_xstats_reset,
932 	.xstats_get_names       = nfp_net_xstats_get_names,
933 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
934 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
935 	.dev_infos_get          = nfp_net_infos_get,
936 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
937 	.dev_ptypes_set         = nfp_net_ptypes_set,
938 	.mtu_set                = nfp_net_dev_mtu_set,
939 	.mac_addr_set           = nfp_net_set_mac_addr,
940 	.vlan_offload_set       = nfp_net_vlan_offload_set,
941 	.reta_update            = nfp_net_reta_update,
942 	.reta_query             = nfp_net_reta_query,
943 	.rss_hash_update        = nfp_net_rss_hash_update,
944 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
945 	.rx_queue_setup         = nfp_net_rx_queue_setup,
946 	.rx_queue_release       = nfp_net_rx_queue_release,
947 	.rxq_info_get           = nfp_net_rx_queue_info_get,
948 	.tx_queue_setup         = nfp_net_tx_queue_setup,
949 	.tx_queue_release       = nfp_net_tx_queue_release,
950 	.txq_info_get           = nfp_net_tx_queue_info_get,
951 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
952 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
953 	.udp_tunnel_port_add    = nfp_udp_tunnel_port_add,
954 	.udp_tunnel_port_del    = nfp_udp_tunnel_port_del,
955 	.fw_version_get         = nfp_net_firmware_version_get,
956 	.flow_ctrl_get          = nfp_net_flow_ctrl_get,
957 	.flow_ctrl_set          = nfp_net_flow_ctrl_set,
958 	.flow_ops_get           = nfp_net_flow_ops_get,
959 	.fec_get_capability     = nfp_net_fec_get_capability,
960 	.fec_get                = nfp_net_fec_get,
961 	.fec_set                = nfp_net_fec_set,
962 };
963 
964 static inline void
965 nfp_net_ethdev_ops_mount(struct nfp_pf_dev *pf_dev,
966 		struct rte_eth_dev *eth_dev)
967 {
968 	if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
969 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
970 	else
971 		nfp_net_nfdk_xmit_pkts_set(eth_dev);
972 
973 	eth_dev->dev_ops = &nfp_net_eth_dev_ops;
974 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
975 	nfp_net_recv_pkts_set(eth_dev);
976 }
977 
978 static int
979 nfp_net_init(struct rte_eth_dev *eth_dev,
980 		void *para)
981 {
982 	int err;
983 	uint16_t port;
984 	uint64_t rx_base;
985 	uint64_t tx_base;
986 	struct nfp_hw *hw;
987 	struct nfp_net_hw *net_hw;
988 	struct nfp_pf_dev *pf_dev;
989 	struct nfp_net_init *hw_init;
990 	struct rte_pci_device *pci_dev;
991 	struct nfp_net_hw_priv *hw_priv;
992 	struct nfp_app_fw_nic *app_fw_nic;
993 
994 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
995 	net_hw = eth_dev->data->dev_private;
996 
997 	hw_init = para;
998 	net_hw->idx      = hw_init->idx;
999 	net_hw->nfp_idx  = hw_init->nfp_idx;
1000 	eth_dev->process_private = hw_init->hw_priv;
1001 
1002 	/* Use backpointer here to the PF of this eth_dev */
1003 	hw_priv = eth_dev->process_private;
1004 	pf_dev = hw_priv->pf_dev;
1005 
1006 	/* Use backpointer to the CoreNIC app struct */
1007 	app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv);
1008 
1009 	/* Add this device to the PF's array of physical ports */
1010 	app_fw_nic->ports[net_hw->idx] = net_hw;
1011 
1012 	port = net_hw->idx;
1013 	if (port > 7) {
1014 		PMD_DRV_LOG(ERR, "Port value is wrong");
1015 		return -ENODEV;
1016 	}
1017 
1018 	hw = &net_hw->super;
1019 
1020 	PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
1021 			"NFP internal port number: %d", port, net_hw->nfp_idx);
1022 
1023 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1024 
1025 	if (pf_dev->multi_pf.enabled)
1026 		hw->ctrl_bar = pf_dev->ctrl_bar;
1027 	else
1028 		hw->ctrl_bar = pf_dev->ctrl_bar + (port * pf_dev->ctrl_bar_size);
1029 
1030 	net_hw->mac_stats = pf_dev->mac_stats_bar +
1031 				(net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
1032 
1033 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
1034 	PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
1035 
1036 	err = nfp_net_common_init(pf_dev, net_hw);
1037 	if (err != 0)
1038 		return err;
1039 
1040 	err = nfp_net_tlv_caps_parse(eth_dev);
1041 	if (err != 0) {
1042 		PMD_INIT_LOG(ERR, "Failed to parser TLV caps");
1043 		return err;
1044 	}
1045 
1046 	err = nfp_ipsec_init(eth_dev);
1047 	if (err != 0) {
1048 		PMD_INIT_LOG(ERR, "Failed to init IPsec module");
1049 		return err;
1050 	}
1051 
1052 	nfp_net_ethdev_ops_mount(pf_dev, eth_dev);
1053 
1054 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
1055 			nfp_net_xstats_size(eth_dev), 0);
1056 	if (net_hw->eth_xstats_base == NULL) {
1057 		PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
1058 				pci_dev->device.name);
1059 		err = -ENOMEM;
1060 		goto ipsec_exit;
1061 	}
1062 
1063 	/* Work out where in the BAR the queues start. */
1064 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1065 	rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
1066 
1067 	net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1068 	net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
1069 
1070 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
1071 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
1072 
1073 	nfp_net_cfg_queue_setup(net_hw);
1074 	net_hw->mtu = RTE_ETHER_MTU;
1075 
1076 	/* VLAN insertion is incompatible with LSOv2 */
1077 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
1078 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
1079 
1080 	nfp_net_log_device_information(net_hw, pf_dev);
1081 
1082 	/* Initializing spinlock for reconfigs */
1083 	rte_spinlock_init(&hw->reconfig_lock);
1084 
1085 	if ((port == 0 || pf_dev->multi_pf.enabled)) {
1086 		err = nfp_net_vf_config_app_init(net_hw, pf_dev);
1087 		if (err != 0) {
1088 			PMD_INIT_LOG(ERR, "Failed to init sriov module");
1089 			goto xstats_free;
1090 		}
1091 	}
1092 
1093 	/* Allocating memory for mac addr */
1094 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
1095 	if (eth_dev->data->mac_addrs == NULL) {
1096 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
1097 		err = -ENOMEM;
1098 		goto xstats_free;
1099 	}
1100 
1101 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0) {
1102 		err = nfp_net_txrwb_alloc(eth_dev);
1103 		if (err != 0)
1104 			goto xstats_free;
1105 	}
1106 
1107 	nfp_net_pf_read_mac(app_fw_nic, port, hw_priv);
1108 	nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1109 
1110 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
1111 		PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
1112 		/* Using random mac addresses for VFs */
1113 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
1114 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
1115 	}
1116 
1117 	/* Copying mac address to DPDK eth_dev struct */
1118 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
1119 
1120 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
1121 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
1122 
1123 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1124 
1125 	PMD_INIT_LOG(INFO, "port %d VendorID=%#x DeviceID=%#x "
1126 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
1127 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1128 			pci_dev->id.device_id,
1129 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
1130 
1131 	/* Registering LSC interrupt handler */
1132 	rte_intr_callback_register(pci_dev->intr_handle,
1133 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
1134 	/* Telling the firmware about the LSC interrupt entry */
1135 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1136 	/* Unmask the LSC interrupt */
1137 	nfp_net_irq_unmask(eth_dev);
1138 	/* Recording current stats counters values */
1139 	nfp_net_stats_reset(eth_dev);
1140 
1141 	if ((hw->cap_ext & NFP_NET_CFG_CTRL_FLOW_STEER) != 0) {
1142 		err = nfp_net_flow_priv_init(pf_dev, port);
1143 		if (err != 0) {
1144 			PMD_INIT_LOG(ERR, "Init net flow priv failed");
1145 			goto txrwb_free;
1146 		}
1147 	}
1148 
1149 	return 0;
1150 
1151 txrwb_free:
1152 	if ((hw->cap & NFP_NET_CFG_CTRL_TXRWB) != 0)
1153 		nfp_net_txrwb_free(eth_dev);
1154 xstats_free:
1155 	rte_free(net_hw->eth_xstats_base);
1156 ipsec_exit:
1157 	nfp_ipsec_uninit(eth_dev);
1158 
1159 	return err;
1160 }
1161 
1162 static int
1163 nfp_net_device_activate(struct nfp_cpp *cpp,
1164 		struct nfp_multi_pf *multi_pf)
1165 {
1166 	int ret;
1167 	struct nfp_nsp *nsp;
1168 
1169 	if (multi_pf->enabled && multi_pf->function_id != 0) {
1170 		nsp = nfp_nsp_open(cpp);
1171 		if (nsp == NULL) {
1172 			PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1173 			return -EIO;
1174 		}
1175 
1176 		ret = nfp_nsp_device_activate(nsp);
1177 		nfp_nsp_close(nsp);
1178 		if (ret != 0 && ret != -EOPNOTSUPP)
1179 			return ret;
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 #define DEFAULT_FW_PATH       "/lib/firmware/netronome"
1186 
1187 static int
1188 nfp_fw_get_name(struct rte_pci_device *dev,
1189 		struct nfp_cpp *cpp,
1190 		struct nfp_eth_table *nfp_eth_table,
1191 		struct nfp_hwinfo *hwinfo,
1192 		char *fw_name,
1193 		size_t fw_size)
1194 {
1195 	char serial[40];
1196 	uint16_t interface;
1197 	char card_desc[100];
1198 	uint32_t cpp_serial_len;
1199 	const char *nfp_fw_model;
1200 	const uint8_t *cpp_serial;
1201 
1202 	cpp_serial_len = nfp_cpp_serial(cpp, &cpp_serial);
1203 	if (cpp_serial_len != NFP_SERIAL_LEN)
1204 		return -ERANGE;
1205 
1206 	interface = nfp_cpp_interface(cpp);
1207 
1208 	/* Looking for firmware file in order of priority */
1209 
1210 	/* First try to find a firmware image specific for this device */
1211 	snprintf(serial, sizeof(serial),
1212 			"serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
1213 			cpp_serial[0], cpp_serial[1], cpp_serial[2], cpp_serial[3],
1214 			cpp_serial[4], cpp_serial[5], interface >> 8, interface & 0xff);
1215 	snprintf(fw_name, fw_size, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
1216 
1217 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1218 	if (access(fw_name, F_OK) == 0)
1219 		return 0;
1220 
1221 	/* Then try the PCI name */
1222 	snprintf(fw_name, fw_size, "%s/pci-%s.nffw", DEFAULT_FW_PATH,
1223 			dev->name);
1224 
1225 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1226 	if (access(fw_name, F_OK) == 0)
1227 		return 0;
1228 
1229 	nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "nffw.partno");
1230 	if (nfp_fw_model == NULL) {
1231 		nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
1232 		if (nfp_fw_model == NULL) {
1233 			PMD_DRV_LOG(ERR, "firmware model NOT found");
1234 			return -EIO;
1235 		}
1236 	}
1237 
1238 	/* And then try the model name */
1239 	snprintf(card_desc, sizeof(card_desc), "%s.nffw", nfp_fw_model);
1240 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc);
1241 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1242 	if (access(fw_name, F_OK) == 0)
1243 		return 0;
1244 
1245 	/* Finally try the card type and media */
1246 	snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
1247 			nfp_fw_model, nfp_eth_table->count,
1248 			nfp_eth_table->ports[0].speed / 1000);
1249 	snprintf(fw_name, fw_size, "%s/%s", DEFAULT_FW_PATH, card_desc);
1250 	PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
1251 	if (access(fw_name, F_OK) == 0)
1252 		return 0;
1253 
1254 	return -ENOENT;
1255 }
1256 
1257 static int
1258 nfp_fw_upload(struct nfp_nsp *nsp,
1259 		char *fw_name)
1260 {
1261 	int err;
1262 	void *fw_buf;
1263 	size_t fsize;
1264 
1265 	err = rte_firmware_read(fw_name, &fw_buf, &fsize);
1266 	if (err != 0) {
1267 		PMD_DRV_LOG(ERR, "firmware %s not found!", fw_name);
1268 		return -ENOENT;
1269 	}
1270 
1271 	PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %zu",
1272 			fw_name, fsize);
1273 	PMD_DRV_LOG(INFO, "Uploading the firmware ...");
1274 	if (nfp_nsp_load_fw(nsp, fw_buf, fsize) < 0) {
1275 		free(fw_buf);
1276 		PMD_DRV_LOG(ERR, "Firmware load failed.");
1277 		return -EIO;
1278 	}
1279 
1280 	PMD_DRV_LOG(INFO, "Done");
1281 
1282 	free(fw_buf);
1283 
1284 	return 0;
1285 }
1286 
1287 static void
1288 nfp_fw_unload(struct nfp_cpp *cpp)
1289 {
1290 	struct nfp_nsp *nsp;
1291 
1292 	nsp = nfp_nsp_open(cpp);
1293 	if (nsp == NULL)
1294 		return;
1295 
1296 	nfp_nsp_device_soft_reset(nsp);
1297 	nfp_nsp_close(nsp);
1298 }
1299 
1300 static int
1301 nfp_fw_check_change(struct nfp_cpp *cpp,
1302 		char *fw_name,
1303 		bool *fw_changed)
1304 {
1305 	int ret;
1306 	uint32_t new_version = 0;
1307 	uint32_t old_version = 0;
1308 
1309 	ret = nfp_elf_get_fw_version(&new_version, fw_name);
1310 	if (ret != 0)
1311 		return ret;
1312 
1313 	nfp_net_get_fw_version(cpp, &old_version);
1314 
1315 	if (new_version != old_version) {
1316 		PMD_DRV_LOG(INFO, "FW version is changed, new %u, old %u",
1317 				new_version, old_version);
1318 		*fw_changed = true;
1319 	} else {
1320 		PMD_DRV_LOG(INFO, "FW version is not changed and is %u", new_version);
1321 		*fw_changed = false;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static int
1328 nfp_fw_reload(struct nfp_nsp *nsp,
1329 		char *fw_name,
1330 		int reset)
1331 {
1332 	int err;
1333 	bool reset_flag;
1334 
1335 	reset_flag = (reset == NFP_NSP_DRV_RESET_ALWAYS) ||
1336 			(reset == NFP_NSP_DRV_RESET_DISK);
1337 
1338 	if (reset_flag) {
1339 		err = nfp_nsp_device_soft_reset(nsp);
1340 		if (err != 0) {
1341 			PMD_DRV_LOG(ERR, "NFP firmware soft reset failed");
1342 			return err;
1343 		}
1344 	}
1345 
1346 	err = nfp_fw_upload(nsp, fw_name);
1347 	if (err != 0) {
1348 		PMD_DRV_LOG(ERR, "NFP firmware load failed");
1349 		return err;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static bool
1356 nfp_fw_skip_load(const struct nfp_dev_info *dev_info,
1357 		struct nfp_multi_pf *multi_pf,
1358 		bool *reload_fw)
1359 {
1360 	uint8_t i;
1361 	uint64_t tmp_beat;
1362 	uint32_t port_num;
1363 	uint8_t in_use = 0;
1364 	uint64_t beat[dev_info->pf_num_per_unit];
1365 	uint32_t offset[dev_info->pf_num_per_unit];
1366 	uint8_t abnormal = dev_info->pf_num_per_unit;
1367 
1368 	sleep(1);
1369 	for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1370 		if (port_num == multi_pf->function_id) {
1371 			abnormal--;
1372 			continue;
1373 		}
1374 
1375 		offset[port_num] = NFP_BEAT_OFFSET(port_num);
1376 		beat[port_num] = nn_readq(multi_pf->beat_addr + offset[port_num]);
1377 		if (beat[port_num] == 0)
1378 			abnormal--;
1379 	}
1380 
1381 	if (abnormal == 0)
1382 		return true;
1383 
1384 	for (i = 0; i < 3; i++) {
1385 		sleep(1);
1386 		for (port_num = 0; port_num < dev_info->pf_num_per_unit; port_num++) {
1387 			if (port_num == multi_pf->function_id)
1388 				continue;
1389 
1390 			if (beat[port_num] == 0)
1391 				continue;
1392 
1393 			tmp_beat = nn_readq(multi_pf->beat_addr + offset[port_num]);
1394 			if (tmp_beat != beat[port_num]) {
1395 				in_use++;
1396 				abnormal--;
1397 				beat[port_num] = 0;
1398 				if (*reload_fw) {
1399 					*reload_fw = false;
1400 					PMD_DRV_LOG(ERR, "The param %s does not work",
1401 							NFP_PF_FORCE_RELOAD_FW);
1402 				}
1403 			}
1404 		}
1405 
1406 		if (abnormal == 0)
1407 			return true;
1408 	}
1409 
1410 	if (in_use != 0) {
1411 		PMD_DRV_LOG(WARNING, "Abnormal %u != 0, the nic has port which is exit abnormally.",
1412 				abnormal);
1413 		return true;
1414 	}
1415 
1416 	return false;
1417 }
1418 
1419 static int
1420 nfp_fw_reload_from_flash(struct nfp_nsp *nsp)
1421 {
1422 	int ret;
1423 
1424 	ret = nfp_nsp_load_stored_fw(nsp);
1425 	if (ret != 0) {
1426 		PMD_DRV_LOG(ERR, "Load firmware from flash failed.");
1427 		return -EACCES;
1428 	}
1429 
1430 	return 0;
1431 }
1432 
1433 static int
1434 nfp_fw_reload_for_single_pf_from_disk(struct nfp_nsp *nsp,
1435 		char *fw_name,
1436 		struct nfp_cpp *cpp,
1437 		bool force_reload_fw,
1438 		int reset)
1439 {
1440 	int ret;
1441 	bool fw_changed = true;
1442 
1443 	if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp) && !force_reload_fw) {
1444 		ret = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1445 		if (ret != 0)
1446 			return ret;
1447 	}
1448 
1449 	if (!fw_changed)
1450 		return 0;
1451 
1452 	ret = nfp_fw_reload(nsp, fw_name, reset);
1453 	if (ret != 0)
1454 		return ret;
1455 
1456 	return 0;
1457 }
1458 
1459 static int
1460 nfp_fw_reload_for_single_pf(struct nfp_nsp *nsp,
1461 		char *fw_name,
1462 		struct nfp_cpp *cpp,
1463 		bool force_reload_fw,
1464 		int reset,
1465 		int policy)
1466 {
1467 	int ret;
1468 
1469 	if (policy == NFP_NSP_APP_FW_LOAD_FLASH && nfp_nsp_has_stored_fw_load(nsp)) {
1470 		ret = nfp_fw_reload_from_flash(nsp);
1471 		if (ret != 0) {
1472 			PMD_DRV_LOG(ERR, "Load single PF firmware from flash failed.");
1473 			return ret;
1474 		}
1475 	} else if (fw_name[0] != 0) {
1476 		ret = nfp_fw_reload_for_single_pf_from_disk(nsp, fw_name, cpp,
1477 				force_reload_fw, reset);
1478 		if (ret != 0) {
1479 			PMD_DRV_LOG(ERR, "Load single PF firmware from disk failed.");
1480 			return ret;
1481 		}
1482 	} else {
1483 		PMD_DRV_LOG(ERR, "Not load firmware, please update flash or recofigure card.");
1484 		return -ENODATA;
1485 	}
1486 
1487 	return 0;
1488 }
1489 
1490 static int
1491 nfp_fw_reload_for_multi_pf_from_disk(struct nfp_nsp *nsp,
1492 		char *fw_name,
1493 		struct nfp_cpp *cpp,
1494 		const struct nfp_dev_info *dev_info,
1495 		struct nfp_multi_pf *multi_pf,
1496 		bool force_reload_fw,
1497 		int reset)
1498 {
1499 	int err;
1500 	bool fw_changed = true;
1501 	bool skip_load_fw = false;
1502 	bool reload_fw = force_reload_fw;
1503 
1504 	if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp) && !reload_fw) {
1505 		err = nfp_fw_check_change(cpp, fw_name, &fw_changed);
1506 		if (err != 0)
1507 			return err;
1508 	}
1509 
1510 	if (!fw_changed || reload_fw)
1511 		skip_load_fw = nfp_fw_skip_load(dev_info, multi_pf, &reload_fw);
1512 
1513 	if (skip_load_fw && !reload_fw)
1514 		return 0;
1515 
1516 	err = nfp_fw_reload(nsp, fw_name, reset);
1517 	if (err != 0)
1518 		return err;
1519 
1520 	return 0;
1521 }
1522 
1523 static int
1524 nfp_fw_reload_for_multi_pf(struct nfp_nsp *nsp,
1525 		char *fw_name,
1526 		struct nfp_cpp *cpp,
1527 		const struct nfp_dev_info *dev_info,
1528 		struct nfp_multi_pf *multi_pf,
1529 		bool force_reload_fw,
1530 		int reset,
1531 		int policy)
1532 {
1533 	int err;
1534 
1535 	err = nfp_net_keepalive_init(cpp, multi_pf);
1536 	if (err != 0) {
1537 		PMD_DRV_LOG(ERR, "NFP init beat failed");
1538 		return err;
1539 	}
1540 
1541 	err = nfp_net_keepalive_start(multi_pf);
1542 	if (err != 0) {
1543 		PMD_DRV_LOG(ERR, "NFP write beat failed");
1544 		goto keepalive_uninit;
1545 	}
1546 
1547 	if (policy == NFP_NSP_APP_FW_LOAD_FLASH && nfp_nsp_has_stored_fw_load(nsp)) {
1548 		err = nfp_fw_reload_from_flash(nsp);
1549 		if (err != 0) {
1550 			PMD_DRV_LOG(ERR, "Load multi PF firmware from flash failed.");
1551 			goto keepalive_stop;
1552 		}
1553 	} else if (fw_name[0] != 0) {
1554 		err = nfp_fw_reload_for_multi_pf_from_disk(nsp, fw_name, cpp,
1555 				dev_info, multi_pf, force_reload_fw, reset);
1556 		if (err != 0) {
1557 			PMD_DRV_LOG(ERR, "Load multi PF firmware from disk failed.");
1558 			goto keepalive_stop;
1559 		}
1560 	} else {
1561 		PMD_DRV_LOG(ERR, "Not load firmware, please update flash or recofigure card.");
1562 		err = -ENODATA;
1563 		goto keepalive_stop;
1564 	}
1565 
1566 	nfp_net_keepalive_clear_others(dev_info, multi_pf);
1567 
1568 	return 0;
1569 
1570 keepalive_stop:
1571 	nfp_net_keepalive_stop(multi_pf);
1572 keepalive_uninit:
1573 	nfp_net_keepalive_uninit(multi_pf);
1574 
1575 	return err;
1576 }
1577 
1578 static int
1579 nfp_strtol(const char *buf,
1580 		int base,
1581 		long *value)
1582 {
1583 	long val;
1584 	char *tmp;
1585 
1586 	if (value == NULL)
1587 		return -EINVAL;
1588 
1589 	val = strtol(buf, &tmp, base);
1590 	if (tmp == NULL || *tmp != 0)
1591 		return -EINVAL;
1592 
1593 	*value = val;
1594 
1595 	return 0;
1596 }
1597 
1598 static int
1599 nfp_fw_policy_value_get(struct nfp_nsp *nsp,
1600 		const char *key,
1601 		const char *default_val,
1602 		int max_val,
1603 		int *value)
1604 {
1605 	int ret;
1606 	int64_t val;
1607 	char buf[64];
1608 
1609 	snprintf(buf, sizeof(buf), "%s", key);
1610 	ret = nfp_nsp_hwinfo_lookup_optional(nsp, buf, sizeof(buf), default_val);
1611 	if (ret != 0)
1612 		return ret;
1613 
1614 	ret = nfp_strtol(buf, 0, &val);
1615 	if (ret != 0 || val < 0 || val > max_val) {
1616 		PMD_DRV_LOG(WARNING, "Invalid value '%s' from '%s', ignoring",
1617 				buf, key);
1618 		/* Fall back to the default value */
1619 		ret = nfp_strtol(default_val, 0, &val);
1620 		if (ret != 0)
1621 			return ret;
1622 	}
1623 
1624 	*value = val;
1625 
1626 	return 0;
1627 }
1628 
1629 static int
1630 nfp_fw_setup(struct rte_pci_device *dev,
1631 		struct nfp_cpp *cpp,
1632 		struct nfp_eth_table *nfp_eth_table,
1633 		struct nfp_hwinfo *hwinfo,
1634 		const struct nfp_dev_info *dev_info,
1635 		struct nfp_multi_pf *multi_pf,
1636 		bool force_reload_fw)
1637 {
1638 	int err;
1639 	int reset;
1640 	int policy;
1641 	char fw_name[125];
1642 	struct nfp_nsp *nsp;
1643 
1644 	nsp = nfp_nsp_open(cpp);
1645 	if (nsp == NULL) {
1646 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1647 		return -EIO;
1648 	}
1649 
1650 	err = nfp_fw_policy_value_get(nsp, "abi_drv_reset",
1651 			NFP_NSP_DRV_RESET_DEFAULT, NFP_NSP_DRV_RESET_NEVER,
1652 			&reset);
1653 	if (err != 0) {
1654 		PMD_DRV_LOG(ERR, "Get 'abi_drv_reset' from HWinfo failed.");
1655 		goto close_nsp;
1656 	}
1657 
1658 	err = nfp_fw_policy_value_get(nsp, "app_fw_from_flash",
1659 			NFP_NSP_APP_FW_LOAD_DEFAULT, NFP_NSP_APP_FW_LOAD_PREF,
1660 			&policy);
1661 	if (err != 0) {
1662 		PMD_DRV_LOG(ERR, "Get 'app_fw_from_flash' from HWinfo failed.");
1663 		goto close_nsp;
1664 	}
1665 
1666 	fw_name[0] = 0;
1667 	if (policy != NFP_NSP_APP_FW_LOAD_FLASH) {
1668 		err = nfp_fw_get_name(dev, cpp, nfp_eth_table, hwinfo, fw_name,
1669 				sizeof(fw_name));
1670 		if (err != 0) {
1671 			PMD_DRV_LOG(ERR, "Can't find suitable firmware.");
1672 			goto close_nsp;
1673 		}
1674 	}
1675 
1676 	if (multi_pf->enabled)
1677 		err = nfp_fw_reload_for_multi_pf(nsp, fw_name, cpp, dev_info,
1678 				multi_pf, force_reload_fw, reset, policy);
1679 	else
1680 		err = nfp_fw_reload_for_single_pf(nsp, fw_name, cpp,
1681 				force_reload_fw, reset, policy);
1682 
1683 close_nsp:
1684 	nfp_nsp_close(nsp);
1685 	return err;
1686 }
1687 
1688 static inline bool
1689 nfp_check_multi_pf_from_fw(uint32_t total_vnics)
1690 {
1691 	if (total_vnics == 1)
1692 		return true;
1693 
1694 	return false;
1695 }
1696 
1697 static inline bool
1698 nfp_check_multi_pf_from_nsp(struct rte_pci_device *pci_dev,
1699 		struct nfp_cpp *cpp)
1700 {
1701 	bool flag;
1702 	struct nfp_nsp *nsp;
1703 
1704 	nsp = nfp_nsp_open(cpp);
1705 	if (nsp == NULL) {
1706 		PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
1707 		return false;
1708 	}
1709 
1710 	flag = (nfp_nsp_get_abi_ver_major(nsp) > 0) &&
1711 			(pci_dev->id.device_id == PCI_DEVICE_ID_NFP3800_PF_NIC);
1712 
1713 	nfp_nsp_close(nsp);
1714 	return flag;
1715 }
1716 
1717 static int
1718 nfp_enable_multi_pf(struct nfp_pf_dev *pf_dev)
1719 {
1720 	int err = 0;
1721 	uint64_t tx_base;
1722 	uint8_t *ctrl_bar;
1723 	struct nfp_hw *hw;
1724 	uint32_t cap_extend;
1725 	struct nfp_net_hw net_hw;
1726 	struct nfp_cpp_area *area;
1727 	char name[RTE_ETH_NAME_MAX_LEN];
1728 
1729 	memset(&net_hw, 0, sizeof(struct nfp_net_hw));
1730 
1731 	/* Map the symbol table */
1732 	pf_dev->ctrl_bar_size = NFP_NET_CFG_BAR_SZ_MIN;
1733 	snprintf(name, sizeof(name), "_pf%u_net_bar0",
1734 			pf_dev->multi_pf.function_id);
1735 	ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, name, pf_dev->ctrl_bar_size,
1736 			&area);
1737 	if (ctrl_bar == NULL) {
1738 		PMD_INIT_LOG(ERR, "Failed to find data vNIC memory symbol");
1739 		return -ENODEV;
1740 	}
1741 
1742 	hw = &net_hw.super;
1743 	hw->ctrl_bar = ctrl_bar;
1744 
1745 	/* Check the version from firmware */
1746 	if (!nfp_net_version_check(hw, pf_dev)) {
1747 		PMD_INIT_LOG(ERR, "Not the valid version.");
1748 		err = -EINVAL;
1749 		goto end;
1750 	}
1751 
1752 	/* Set the ctrl bar size */
1753 	nfp_net_ctrl_bar_size_set(pf_dev);
1754 
1755 	if (!pf_dev->multi_pf.enabled)
1756 		goto end;
1757 
1758 	cap_extend = nn_cfg_readl(hw, NFP_NET_CFG_CAP_WORD1);
1759 	if ((cap_extend & NFP_NET_CFG_CTRL_MULTI_PF) == 0) {
1760 		PMD_INIT_LOG(ERR, "Loaded firmware doesn't support multiple PF");
1761 		err = -EINVAL;
1762 		goto end;
1763 	}
1764 
1765 	tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
1766 	net_hw.tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
1767 	nfp_net_cfg_queue_setup(&net_hw);
1768 	rte_spinlock_init(&hw->reconfig_lock);
1769 	nfp_ext_reconfig(&net_hw.super, NFP_NET_CFG_CTRL_MULTI_PF, NFP_NET_CFG_UPDATE_GEN);
1770 end:
1771 	nfp_cpp_area_release_free(area);
1772 	return err;
1773 }
1774 
1775 static bool
1776 nfp_app_fw_nic_total_phyports_check(struct nfp_pf_dev *pf_dev)
1777 {
1778 	int ret;
1779 	uint8_t id;
1780 	uint8_t total_phyports;
1781 	char vnic_name[RTE_ETH_NAME_MAX_LEN];
1782 
1783 	/* Read the number of vNIC's created for the PF */
1784 	id = nfp_function_id_get(pf_dev, 0);
1785 	snprintf(vnic_name, sizeof(vnic_name), "nfd_cfg_pf%u_num_ports", id);
1786 	total_phyports = nfp_rtsym_read_le(pf_dev->sym_tbl, vnic_name, &ret);
1787 	if (ret != 0 || total_phyports == 0 || total_phyports > 8) {
1788 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", vnic_name);
1789 		return false;
1790 	}
1791 
1792 	if (pf_dev->multi_pf.enabled) {
1793 		if (!nfp_check_multi_pf_from_fw(total_phyports)) {
1794 			PMD_INIT_LOG(ERR, "NSP report multipf, but FW report not multipf");
1795 			return false;
1796 		}
1797 	} else {
1798 		/*
1799 		 * For single PF the number of vNICs exposed should be the same as the
1800 		 * number of physical ports.
1801 		 */
1802 		if (total_phyports != pf_dev->nfp_eth_table->count) {
1803 			PMD_INIT_LOG(ERR, "Total physical ports do not match number of vNICs");
1804 			return false;
1805 		}
1806 	}
1807 
1808 	return true;
1809 }
1810 
1811 static int
1812 nfp_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
1813 {
1814 	uint8_t i;
1815 	uint8_t id;
1816 	int ret = 0;
1817 	struct nfp_app_fw_nic *app_fw_nic;
1818 	struct nfp_eth_table *nfp_eth_table;
1819 	char bar_name[RTE_ETH_NAME_MAX_LEN];
1820 	char port_name[RTE_ETH_NAME_MAX_LEN];
1821 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
1822 	struct nfp_net_init hw_init = {
1823 		.hw_priv = hw_priv,
1824 	};
1825 
1826 	nfp_eth_table = pf_dev->nfp_eth_table;
1827 	PMD_INIT_LOG(INFO, "Total physical ports: %d", nfp_eth_table->count);
1828 	id = nfp_function_id_get(pf_dev, 0);
1829 
1830 	/* Allocate memory for the CoreNIC app */
1831 	app_fw_nic = rte_zmalloc("nfp_app_fw_nic", sizeof(*app_fw_nic), 0);
1832 	if (app_fw_nic == NULL)
1833 		return -ENOMEM;
1834 
1835 	/* Point the app_fw_priv pointer in the PF to the coreNIC app */
1836 	pf_dev->app_fw_priv = app_fw_nic;
1837 
1838 	/* Check the number of vNIC's created for the PF */
1839 	if (!nfp_app_fw_nic_total_phyports_check(pf_dev)) {
1840 		ret = -ENODEV;
1841 		goto app_cleanup;
1842 	}
1843 
1844 	/* Populate coreNIC app properties */
1845 	if (pf_dev->total_phyports > 1)
1846 		app_fw_nic->multiport = true;
1847 
1848 	/* Map the symbol table */
1849 	snprintf(bar_name, sizeof(bar_name), "_pf%u_net_bar0", id);
1850 	pf_dev->ctrl_bar = nfp_rtsym_map(pf_dev->sym_tbl, bar_name,
1851 			pf_dev->total_phyports * pf_dev->ctrl_bar_size,
1852 			&pf_dev->ctrl_area);
1853 	if (pf_dev->ctrl_bar == NULL) {
1854 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for %s", bar_name);
1855 		ret = -EIO;
1856 		goto app_cleanup;
1857 	}
1858 
1859 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", pf_dev->ctrl_bar);
1860 
1861 	/* Loop through all physical ports on PF */
1862 	for (i = 0; i < pf_dev->total_phyports; i++) {
1863 		if (pf_dev->multi_pf.enabled)
1864 			snprintf(port_name, sizeof(port_name), "%s",
1865 					pf_dev->pci_dev->device.name);
1866 		else
1867 			snprintf(port_name, sizeof(port_name), "%s_port%u",
1868 					pf_dev->pci_dev->device.name, i);
1869 
1870 		id = nfp_function_id_get(pf_dev, i);
1871 		hw_init.idx = id;
1872 		hw_init.nfp_idx = nfp_eth_table->ports[id].index;
1873 		ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name,
1874 				sizeof(struct nfp_net_hw), NULL, NULL,
1875 				nfp_net_init, &hw_init);
1876 		if (ret != 0)
1877 			goto port_cleanup;
1878 
1879 	} /* End loop, all ports on this PF */
1880 
1881 	return 0;
1882 
1883 port_cleanup:
1884 	for (i = 0; i < pf_dev->total_phyports; i++) {
1885 		struct rte_eth_dev *eth_dev;
1886 
1887 		if (pf_dev->multi_pf.enabled)
1888 			snprintf(port_name, sizeof(port_name), "%s",
1889 					pf_dev->pci_dev->device.name);
1890 		else
1891 			snprintf(port_name, sizeof(port_name), "%s_port%u",
1892 					pf_dev->pci_dev->device.name, i);
1893 		eth_dev = rte_eth_dev_get_by_name(port_name);
1894 		if (eth_dev != NULL)
1895 			rte_eth_dev_destroy(eth_dev, nfp_net_uninit);
1896 	}
1897 	nfp_cpp_area_release_free(pf_dev->ctrl_area);
1898 app_cleanup:
1899 	rte_free(app_fw_nic);
1900 
1901 	return ret;
1902 }
1903 
1904 static int
1905 nfp_net_hwinfo_set(uint8_t function_id,
1906 		struct nfp_rtsym_table *sym_tbl,
1907 		struct nfp_cpp *cpp,
1908 		enum nfp_app_fw_id app_fw_id)
1909 {
1910 	int ret = 0;
1911 	uint64_t app_cap;
1912 	struct nfp_nsp *nsp;
1913 	uint8_t sp_indiff = 1;
1914 	char hw_info[RTE_ETH_NAME_MAX_LEN];
1915 	char app_cap_name[RTE_ETH_NAME_MAX_LEN];
1916 
1917 	if (app_fw_id != NFP_APP_FW_FLOWER_NIC) {
1918 		/* Read the app capabilities of the firmware loaded */
1919 		snprintf(app_cap_name, sizeof(app_cap_name), "_pf%u_net_app_cap", function_id);
1920 		app_cap = nfp_rtsym_read_le(sym_tbl, app_cap_name, &ret);
1921 		if (ret != 0) {
1922 			PMD_INIT_LOG(ERR, "Could not read app_fw_cap from firmware.");
1923 			return ret;
1924 		}
1925 
1926 		/* Calculate the value of sp_indiff and write to hw_info */
1927 		sp_indiff = app_cap & NFP_NET_APP_CAP_SP_INDIFF;
1928 	}
1929 
1930 	snprintf(hw_info, sizeof(hw_info), "sp_indiff=%u", sp_indiff);
1931 
1932 	nsp = nfp_nsp_open(cpp);
1933 	if (nsp == NULL) {
1934 		PMD_INIT_LOG(ERR, "Could not get NSP.");
1935 		return -EIO;
1936 	}
1937 
1938 	ret = nfp_nsp_hwinfo_set(nsp, hw_info, sizeof(hw_info));
1939 	nfp_nsp_close(nsp);
1940 	if (ret != 0) {
1941 		PMD_INIT_LOG(ERR, "Failed to set parameter to hwinfo.");
1942 		return ret;
1943 	}
1944 
1945 	return 0;
1946 }
1947 
1948 const uint32_t nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
1949 	[NFP_MEDIA_W0_RJ45_10M]     = RTE_ETH_LINK_SPEED_10M,
1950 	[NFP_MEDIA_W0_RJ45_10M_HD]  = RTE_ETH_LINK_SPEED_10M_HD,
1951 	[NFP_MEDIA_W0_RJ45_100M]    = RTE_ETH_LINK_SPEED_100M,
1952 	[NFP_MEDIA_W0_RJ45_100M_HD] = RTE_ETH_LINK_SPEED_100M_HD,
1953 	[NFP_MEDIA_W0_RJ45_1G]      = RTE_ETH_LINK_SPEED_1G,
1954 	[NFP_MEDIA_W0_RJ45_2P5G]    = RTE_ETH_LINK_SPEED_2_5G,
1955 	[NFP_MEDIA_W0_RJ45_5G]      = RTE_ETH_LINK_SPEED_5G,
1956 	[NFP_MEDIA_W0_RJ45_10G]     = RTE_ETH_LINK_SPEED_10G,
1957 	[NFP_MEDIA_1000BASE_CX]     = RTE_ETH_LINK_SPEED_1G,
1958 	[NFP_MEDIA_1000BASE_KX]     = RTE_ETH_LINK_SPEED_1G,
1959 	[NFP_MEDIA_10GBASE_KX4]     = RTE_ETH_LINK_SPEED_10G,
1960 	[NFP_MEDIA_10GBASE_KR]      = RTE_ETH_LINK_SPEED_10G,
1961 	[NFP_MEDIA_10GBASE_CX4]     = RTE_ETH_LINK_SPEED_10G,
1962 	[NFP_MEDIA_10GBASE_CR]      = RTE_ETH_LINK_SPEED_10G,
1963 	[NFP_MEDIA_10GBASE_SR]      = RTE_ETH_LINK_SPEED_10G,
1964 	[NFP_MEDIA_10GBASE_ER]      = RTE_ETH_LINK_SPEED_10G,
1965 	[NFP_MEDIA_25GBASE_KR]      = RTE_ETH_LINK_SPEED_25G,
1966 	[NFP_MEDIA_25GBASE_KR_S]    = RTE_ETH_LINK_SPEED_25G,
1967 	[NFP_MEDIA_25GBASE_CR]      = RTE_ETH_LINK_SPEED_25G,
1968 	[NFP_MEDIA_25GBASE_CR_S]    = RTE_ETH_LINK_SPEED_25G,
1969 	[NFP_MEDIA_25GBASE_SR]      = RTE_ETH_LINK_SPEED_25G,
1970 	[NFP_MEDIA_40GBASE_CR4]     = RTE_ETH_LINK_SPEED_40G,
1971 	[NFP_MEDIA_40GBASE_KR4]     = RTE_ETH_LINK_SPEED_40G,
1972 	[NFP_MEDIA_40GBASE_SR4]     = RTE_ETH_LINK_SPEED_40G,
1973 	[NFP_MEDIA_40GBASE_LR4]     = RTE_ETH_LINK_SPEED_40G,
1974 	[NFP_MEDIA_50GBASE_KR]      = RTE_ETH_LINK_SPEED_50G,
1975 	[NFP_MEDIA_50GBASE_SR]      = RTE_ETH_LINK_SPEED_50G,
1976 	[NFP_MEDIA_50GBASE_CR]      = RTE_ETH_LINK_SPEED_50G,
1977 	[NFP_MEDIA_50GBASE_LR]      = RTE_ETH_LINK_SPEED_50G,
1978 	[NFP_MEDIA_50GBASE_ER]      = RTE_ETH_LINK_SPEED_50G,
1979 	[NFP_MEDIA_50GBASE_FR]      = RTE_ETH_LINK_SPEED_50G,
1980 	[NFP_MEDIA_100GBASE_KR4]    = RTE_ETH_LINK_SPEED_100G,
1981 	[NFP_MEDIA_100GBASE_SR4]    = RTE_ETH_LINK_SPEED_100G,
1982 	[NFP_MEDIA_100GBASE_CR4]    = RTE_ETH_LINK_SPEED_100G,
1983 	[NFP_MEDIA_100GBASE_KP4]    = RTE_ETH_LINK_SPEED_100G,
1984 	[NFP_MEDIA_100GBASE_CR10]   = RTE_ETH_LINK_SPEED_100G,
1985 	[NFP_MEDIA_10GBASE_LR]      = RTE_ETH_LINK_SPEED_10G,
1986 	[NFP_MEDIA_25GBASE_LR]      = RTE_ETH_LINK_SPEED_25G,
1987 	[NFP_MEDIA_25GBASE_ER]      = RTE_ETH_LINK_SPEED_25G
1988 };
1989 
1990 static int
1991 nfp_net_speed_capa_get_real(struct nfp_eth_media_buf *media_buf,
1992 		struct nfp_pf_dev *pf_dev)
1993 {
1994 	uint32_t i;
1995 	uint32_t j;
1996 	uint32_t offset;
1997 	uint32_t speed_capa = 0;
1998 	uint64_t supported_modes;
1999 
2000 	for (i = 0; i < RTE_DIM(media_buf->supported_modes); i++) {
2001 		supported_modes = media_buf->supported_modes[i];
2002 		offset = i * UINT64_BIT;
2003 		for (j = 0; j < UINT64_BIT; j++) {
2004 			if (supported_modes == 0)
2005 				break;
2006 
2007 			if ((supported_modes & 1) != 0) {
2008 				if ((j + offset) >= NFP_MEDIA_LINK_MODES_NUMBER) {
2009 					PMD_DRV_LOG(ERR, "Invalid offset of media table.");
2010 					return -EINVAL;
2011 				}
2012 
2013 				speed_capa |= nfp_eth_media_table[j + offset];
2014 			}
2015 
2016 			supported_modes = supported_modes >> 1;
2017 		}
2018 	}
2019 
2020 	pf_dev->speed_capa = speed_capa;
2021 
2022 	return pf_dev->speed_capa == 0 ? -EINVAL : 0;
2023 }
2024 
2025 static int
2026 nfp_net_speed_cap_get_one(struct nfp_pf_dev *pf_dev,
2027 		uint32_t port_id)
2028 {
2029 	int ret;
2030 	struct nfp_nsp *nsp;
2031 	struct nfp_eth_media_buf media_buf;
2032 
2033 	media_buf.eth_index = pf_dev->nfp_eth_table->ports[port_id].eth_index;
2034 	pf_dev->speed_capa = 0;
2035 
2036 	nsp = nfp_nsp_open(pf_dev->cpp);
2037 	if (nsp == NULL) {
2038 		PMD_DRV_LOG(ERR, "Couldn't get NSP.");
2039 		return -EIO;
2040 	}
2041 
2042 	ret = nfp_nsp_read_media(nsp, &media_buf, sizeof(media_buf));
2043 	nfp_nsp_close(nsp);
2044 	if (ret != 0) {
2045 		PMD_DRV_LOG(ERR, "Failed to read media.");
2046 		return ret;
2047 	}
2048 
2049 	ret = nfp_net_speed_capa_get_real(&media_buf, pf_dev);
2050 	if (ret < 0) {
2051 		PMD_DRV_LOG(ERR, "Speed capability is invalid.");
2052 		return ret;
2053 	}
2054 
2055 	return 0;
2056 }
2057 
2058 static int
2059 nfp_net_speed_cap_get(struct nfp_pf_dev *pf_dev)
2060 {
2061 	int ret;
2062 	uint32_t i;
2063 	uint32_t id;
2064 	uint32_t count;
2065 
2066 	count = nfp_net_get_port_num(pf_dev, pf_dev->nfp_eth_table);
2067 	for (i = 0; i < count; i++) {
2068 		id = nfp_function_id_get(pf_dev, i);
2069 		ret = nfp_net_speed_cap_get_one(pf_dev, id);
2070 		if (ret != 0) {
2071 			PMD_INIT_LOG(ERR, "Failed to get port %d speed capability.", id);
2072 			return ret;
2073 		}
2074 	}
2075 
2076 	return 0;
2077 }
2078 
2079 /* Force the physical port down to clear the possible DMA error */
2080 static int
2081 nfp_net_force_port_down(struct nfp_pf_dev *pf_dev,
2082 		struct nfp_eth_table *nfp_eth_table,
2083 		struct nfp_cpp *cpp)
2084 {
2085 	int ret;
2086 	uint32_t i;
2087 	uint32_t id;
2088 	uint32_t index;
2089 	uint32_t count;
2090 
2091 	count = nfp_net_get_port_num(pf_dev, nfp_eth_table);
2092 	for (i = 0; i < count; i++) {
2093 		id = nfp_function_id_get(pf_dev, i);
2094 		index = nfp_eth_table->ports[id].index;
2095 		ret = nfp_eth_set_configured(cpp, index, 0);
2096 		if (ret < 0)
2097 			return ret;
2098 	}
2099 
2100 	return 0;
2101 }
2102 
2103 static int
2104 nfp_fw_app_primary_init(struct nfp_net_hw_priv *hw_priv)
2105 {
2106 	int ret;
2107 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
2108 
2109 	switch (pf_dev->app_fw_id) {
2110 	case NFP_APP_FW_CORE_NIC:
2111 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
2112 		ret = nfp_init_app_fw_nic(hw_priv);
2113 		if (ret != 0) {
2114 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2115 			return ret;
2116 		}
2117 		break;
2118 	case NFP_APP_FW_FLOWER_NIC:
2119 		PMD_INIT_LOG(INFO, "Initializing Flower");
2120 		ret = nfp_init_app_fw_flower(hw_priv);
2121 		if (ret != 0) {
2122 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2123 			return ret;
2124 		}
2125 		break;
2126 	default:
2127 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
2128 		ret = -EINVAL;
2129 		return ret;
2130 	}
2131 
2132 	return 0;
2133 }
2134 
2135 static int
2136 nfp_pf_get_max_vf(struct nfp_pf_dev *pf_dev)
2137 {
2138 	int ret;
2139 	uint32_t max_vfs;
2140 
2141 	max_vfs = nfp_rtsym_read_le(pf_dev->sym_tbl, "nfd_vf_cfg_max_vfs", &ret);
2142 	if (ret != 0)
2143 		return ret;
2144 
2145 	pf_dev->max_vfs = max_vfs;
2146 
2147 	return 0;
2148 }
2149 
2150 static int
2151 nfp_pf_get_sriov_vf(struct nfp_pf_dev *pf_dev,
2152 		const struct nfp_dev_info *dev_info)
2153 {
2154 	int ret;
2155 	off_t pos;
2156 	uint16_t offset;
2157 	uint16_t sriov_vf;
2158 
2159 	/* For 3800 single-PF and 4000 card */
2160 	if (!pf_dev->multi_pf.enabled) {
2161 		pf_dev->sriov_vf = pf_dev->max_vfs;
2162 		return 0;
2163 	}
2164 
2165 	pos = rte_pci_find_ext_capability(pf_dev->pci_dev, RTE_PCI_EXT_CAP_ID_SRIOV);
2166 	if (pos == 0) {
2167 		PMD_INIT_LOG(ERR, "Can not get the pci sriov cap");
2168 		return -EIO;
2169 	}
2170 
2171 	/*
2172 	 * Management firmware ensures that sriov capability registers
2173 	 * are initialized correctly.
2174 	 */
2175 	ret = rte_pci_read_config(pf_dev->pci_dev, &sriov_vf, sizeof(sriov_vf),
2176 			pos + RTE_PCI_SRIOV_TOTAL_VF);
2177 	if (ret < 0) {
2178 		PMD_INIT_LOG(ERR, "Can not read the sriov toatl VF");
2179 		return -EIO;
2180 	}
2181 
2182 	/* Offset of first VF is relative to its PF. */
2183 	ret = rte_pci_read_config(pf_dev->pci_dev, &offset, sizeof(offset),
2184 			pos + RTE_PCI_SRIOV_VF_OFFSET);
2185 	if (ret < 0) {
2186 		PMD_INIT_LOG(ERR, "Can not get the VF offset");
2187 		return -EIO;
2188 	}
2189 
2190 	offset += pf_dev->multi_pf.function_id;
2191 	if (offset < dev_info->pf_num_per_unit)
2192 		return -ERANGE;
2193 
2194 	offset -= dev_info->pf_num_per_unit;
2195 	if (offset >= pf_dev->max_vfs || offset + sriov_vf > pf_dev->max_vfs) {
2196 		PMD_INIT_LOG(ERR, "The pci allocate VF is more than the MAX VF");
2197 		return -ERANGE;
2198 	}
2199 
2200 	pf_dev->vf_base_id = offset;
2201 	pf_dev->sriov_vf = sriov_vf;
2202 
2203 	return 0;
2204 }
2205 
2206 static int
2207 nfp_net_get_vf_info(struct nfp_pf_dev *pf_dev,
2208 		const struct nfp_dev_info *dev_info)
2209 {
2210 	int ret;
2211 
2212 	ret = nfp_pf_get_max_vf(pf_dev);
2213 	if (ret != 0) {
2214 		if (ret != -ENOENT) {
2215 			PMD_INIT_LOG(ERR, "Read max VFs failed");
2216 			return ret;
2217 		}
2218 
2219 		PMD_INIT_LOG(WARNING, "The firmware can not support read max VFs");
2220 		return 0;
2221 	}
2222 
2223 	if (pf_dev->max_vfs == 0)
2224 		return 0;
2225 
2226 	ret = nfp_pf_get_sriov_vf(pf_dev, dev_info);
2227 	if (ret < 0)
2228 		return ret;
2229 
2230 	pf_dev->queue_per_vf = NFP_QUEUE_PER_VF;
2231 
2232 	return 0;
2233 }
2234 
2235 static int
2236 nfp_net_vf_config_init(struct nfp_pf_dev *pf_dev)
2237 {
2238 	int ret = 0;
2239 	uint32_t min_size;
2240 	char vf_bar_name[RTE_ETH_NAME_MAX_LEN];
2241 	char vf_cfg_name[RTE_ETH_NAME_MAX_LEN];
2242 
2243 	if (pf_dev->sriov_vf == 0)
2244 		return 0;
2245 
2246 	min_size = pf_dev->ctrl_bar_size * pf_dev->sriov_vf;
2247 	snprintf(vf_bar_name, sizeof(vf_bar_name), "_pf%d_net_vf_bar",
2248 			pf_dev->multi_pf.function_id);
2249 	pf_dev->vf_bar = nfp_rtsym_map_offset(pf_dev->sym_tbl, vf_bar_name,
2250 			pf_dev->ctrl_bar_size * pf_dev->vf_base_id,
2251 			min_size, &pf_dev->vf_area);
2252 	if (pf_dev->vf_bar == NULL) {
2253 		PMD_INIT_LOG(ERR, "Failed to get vf cfg.");
2254 		return -EIO;
2255 	}
2256 
2257 	min_size = NFP_NET_VF_CFG_SZ * pf_dev->sriov_vf + NFP_NET_VF_CFG_MB_SZ;
2258 	snprintf(vf_cfg_name, sizeof(vf_cfg_name), "_pf%d_net_vf_cfg2",
2259 			pf_dev->multi_pf.function_id);
2260 	pf_dev->vf_cfg_tbl_bar = nfp_rtsym_map(pf_dev->sym_tbl, vf_cfg_name,
2261 			min_size, &pf_dev->vf_cfg_tbl_area);
2262 	if (pf_dev->vf_cfg_tbl_bar == NULL) {
2263 		PMD_INIT_LOG(ERR, "Failed to get vf configure table.");
2264 		ret = -EIO;
2265 		goto vf_bar_cleanup;
2266 	}
2267 
2268 	return 0;
2269 
2270 vf_bar_cleanup:
2271 	nfp_cpp_area_release_free(pf_dev->vf_area);
2272 
2273 	return ret;
2274 }
2275 
2276 static int
2277 nfp_pf_init(struct rte_pci_device *pci_dev)
2278 {
2279 	void *sync;
2280 	int ret = 0;
2281 	uint64_t addr;
2282 	uint32_t cpp_id;
2283 	uint8_t function_id;
2284 	struct nfp_cpp *cpp;
2285 	struct nfp_pf_dev *pf_dev;
2286 	struct nfp_hwinfo *hwinfo;
2287 	enum nfp_app_fw_id app_fw_id;
2288 	char name[RTE_ETH_NAME_MAX_LEN];
2289 	struct nfp_rtsym_table *sym_tbl;
2290 	struct nfp_net_hw_priv *hw_priv;
2291 	char app_name[RTE_ETH_NAME_MAX_LEN];
2292 	struct nfp_eth_table *nfp_eth_table;
2293 	const struct nfp_dev_info *dev_info;
2294 
2295 	if (pci_dev == NULL)
2296 		return -ENODEV;
2297 
2298 	if (pci_dev->mem_resource[0].addr == NULL) {
2299 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2300 		return -ENODEV;
2301 	}
2302 
2303 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2304 	if (dev_info == NULL) {
2305 		PMD_INIT_LOG(ERR, "Not supported device ID");
2306 		return -ENODEV;
2307 	}
2308 
2309 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
2310 	if (hw_priv == NULL) {
2311 		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
2312 		return -ENOMEM;
2313 	}
2314 
2315 	/* Allocate memory for the PF "device" */
2316 	function_id = (pci_dev->addr.function) & 0x07;
2317 	snprintf(name, sizeof(name), "nfp_pf%u", function_id);
2318 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2319 	if (pf_dev == NULL) {
2320 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
2321 		ret = -ENOMEM;
2322 		goto hw_priv_free;
2323 	}
2324 
2325 	sync = nfp_sync_alloc();
2326 	if (sync == NULL) {
2327 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2328 		ret = -ENOMEM;
2329 		goto pf_cleanup;
2330 	}
2331 
2332 	/*
2333 	 * When device bound to UIO, the device could be used, by mistake,
2334 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2335 	 * could lead to a serious problem when configuring the NFP CPP
2336 	 * interface. Here we avoid this telling to the CPP init code to
2337 	 * use a lock file if UIO is being used.
2338 	 */
2339 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2340 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2341 	else
2342 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2343 
2344 	if (cpp == NULL) {
2345 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2346 		ret = -EIO;
2347 		goto sync_free;
2348 	}
2349 
2350 	hwinfo = nfp_hwinfo_read(cpp);
2351 	if (hwinfo == NULL) {
2352 		PMD_INIT_LOG(ERR, "Error reading hwinfo table");
2353 		ret = -EIO;
2354 		goto cpp_cleanup;
2355 	}
2356 
2357 	/* Read the number of physical ports from hardware */
2358 	nfp_eth_table = nfp_eth_read_ports(cpp);
2359 	if (nfp_eth_table == NULL) {
2360 		PMD_INIT_LOG(ERR, "Error reading NFP ethernet table");
2361 		ret = -EIO;
2362 		goto hwinfo_cleanup;
2363 	}
2364 
2365 	if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
2366 		PMD_INIT_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
2367 				nfp_eth_table->count);
2368 		ret = -EIO;
2369 		goto eth_table_cleanup;
2370 	}
2371 
2372 	pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp);
2373 	pf_dev->multi_pf.function_id = function_id;
2374 
2375 	ret = nfp_net_force_port_down(pf_dev, nfp_eth_table, cpp);
2376 	if (ret != 0) {
2377 		PMD_INIT_LOG(ERR, "Failed to force port down");
2378 		ret = -EIO;
2379 		goto eth_table_cleanup;
2380 	}
2381 
2382 	ret = nfp_devargs_parse(&pf_dev->devargs, pci_dev->device.devargs);
2383 	if (ret != 0) {
2384 		PMD_INIT_LOG(ERR, "Error when parsing device args");
2385 		ret = -EINVAL;
2386 		goto eth_table_cleanup;
2387 	}
2388 
2389 	ret = nfp_net_device_activate(cpp, &pf_dev->multi_pf);
2390 	if (ret != 0) {
2391 		PMD_INIT_LOG(ERR, "Failed to activate the NFP device");
2392 		ret = -EIO;
2393 		goto eth_table_cleanup;
2394 	}
2395 
2396 	if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo,
2397 			dev_info, &pf_dev->multi_pf, pf_dev->devargs.force_reload_fw) != 0) {
2398 		PMD_INIT_LOG(ERR, "Error when uploading firmware");
2399 		ret = -EIO;
2400 		goto eth_table_cleanup;
2401 	}
2402 
2403 	/* Now the symbol table should be there */
2404 	sym_tbl = nfp_rtsym_table_read(cpp);
2405 	if (sym_tbl == NULL) {
2406 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
2407 		ret = -EIO;
2408 		goto fw_cleanup;
2409 	}
2410 
2411 	/* Read the app ID of the firmware loaded */
2412 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2413 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2414 	if (ret != 0) {
2415 		PMD_INIT_LOG(ERR, "Couldn't read %s from firmware", app_name);
2416 		ret = -EIO;
2417 		goto sym_tbl_cleanup;
2418 	}
2419 
2420 	/* Write sp_indiff to hw_info */
2421 	ret = nfp_net_hwinfo_set(function_id, sym_tbl, cpp, app_fw_id);
2422 	if (ret != 0) {
2423 		PMD_INIT_LOG(ERR, "Failed to set hwinfo.");
2424 		ret = -EIO;
2425 		goto sym_tbl_cleanup;
2426 	}
2427 
2428 	/* Populate the newly created PF device */
2429 	pf_dev->app_fw_id = app_fw_id;
2430 	pf_dev->cpp = cpp;
2431 	pf_dev->hwinfo = hwinfo;
2432 	pf_dev->sym_tbl = sym_tbl;
2433 	pf_dev->pci_dev = pci_dev;
2434 	pf_dev->nfp_eth_table = nfp_eth_table;
2435 	pf_dev->sync = sync;
2436 	pf_dev->total_phyports = nfp_net_get_port_num(pf_dev, nfp_eth_table);
2437 	pf_dev->speed_updated = false;
2438 
2439 	ret = nfp_net_speed_cap_get(pf_dev);
2440 	if (ret != 0) {
2441 		PMD_INIT_LOG(ERR, "Failed to get speed capability.");
2442 		ret = -EIO;
2443 		goto sym_tbl_cleanup;
2444 	}
2445 
2446 	/* Get the VF info */
2447 	ret = nfp_net_get_vf_info(pf_dev, dev_info);
2448 	if (ret != 0) {
2449 		PMD_INIT_LOG(ERR, "Failed to get VF info.");
2450 		ret = -EIO;
2451 		goto sym_tbl_cleanup;
2452 	}
2453 
2454 	/* Configure access to tx/rx vNIC BARs */
2455 	addr = nfp_qcp_queue_offset(dev_info, 0);
2456 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
2457 
2458 	pf_dev->qc_bar = nfp_cpp_map_area(pf_dev->cpp, cpp_id,
2459 			addr, dev_info->qc_area_sz, &pf_dev->qc_area);
2460 	if (pf_dev->qc_bar == NULL) {
2461 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for net.qc");
2462 		ret = -EIO;
2463 		goto sym_tbl_cleanup;
2464 	}
2465 
2466 	PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar);
2467 
2468 	pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats",
2469 			NFP_MAC_STATS_SIZE * nfp_eth_table->max_index,
2470 			&pf_dev->mac_stats_area);
2471 	if (pf_dev->mac_stats_bar == NULL) {
2472 		PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats");
2473 		goto hwqueues_cleanup;
2474 	}
2475 
2476 	ret = nfp_enable_multi_pf(pf_dev);
2477 	if (ret != 0)
2478 		goto mac_stats_cleanup;
2479 
2480 	ret = nfp_net_vf_config_init(pf_dev);
2481 	if (ret != 0) {
2482 		PMD_INIT_LOG(ERR, "Failed to init VF config.");
2483 		goto vf_cfg_tbl_cleanup;
2484 	}
2485 
2486 	hw_priv->is_pf = true;
2487 	hw_priv->pf_dev = pf_dev;
2488 	hw_priv->dev_info = dev_info;
2489 
2490 	/*
2491 	 * PF initialization has been done at this point. Call app specific
2492 	 * init code now.
2493 	 */
2494 	ret = nfp_fw_app_primary_init(hw_priv);
2495 	if (ret != 0) {
2496 		PMD_INIT_LOG(ERR, "Failed to init hw app primary.");
2497 		goto vf_cfg_tbl_cleanup;
2498 	}
2499 
2500 	/* Register the CPP bridge service here for primary use */
2501 	if (pf_dev->devargs.cpp_service_enable) {
2502 		ret = nfp_enable_cpp_service(pf_dev);
2503 		if (ret != 0) {
2504 			PMD_INIT_LOG(ERR, "Enable CPP service failed.");
2505 			goto vf_cfg_tbl_cleanup;
2506 		}
2507 	}
2508 
2509 	return 0;
2510 
2511 vf_cfg_tbl_cleanup:
2512 	nfp_net_vf_config_uninit(pf_dev);
2513 mac_stats_cleanup:
2514 	nfp_cpp_area_release_free(pf_dev->mac_stats_area);
2515 hwqueues_cleanup:
2516 	nfp_cpp_area_release_free(pf_dev->qc_area);
2517 sym_tbl_cleanup:
2518 	free(sym_tbl);
2519 fw_cleanup:
2520 	nfp_fw_unload(cpp);
2521 	if (pf_dev->multi_pf.enabled) {
2522 		nfp_net_keepalive_stop(&pf_dev->multi_pf);
2523 		nfp_net_keepalive_clear(pf_dev->multi_pf.beat_addr, pf_dev->multi_pf.function_id);
2524 		nfp_net_keepalive_uninit(&pf_dev->multi_pf);
2525 	}
2526 eth_table_cleanup:
2527 	free(nfp_eth_table);
2528 hwinfo_cleanup:
2529 	free(hwinfo);
2530 cpp_cleanup:
2531 	nfp_cpp_free(cpp);
2532 sync_free:
2533 	nfp_sync_free(sync);
2534 pf_cleanup:
2535 	rte_free(pf_dev);
2536 hw_priv_free:
2537 	rte_free(hw_priv);
2538 
2539 	return ret;
2540 }
2541 
2542 static int
2543 nfp_secondary_net_init(struct rte_eth_dev *eth_dev,
2544 		void *para)
2545 {
2546 	struct nfp_net_hw_priv *hw_priv;
2547 
2548 	hw_priv = para;
2549 	nfp_net_ethdev_ops_mount(hw_priv->pf_dev, eth_dev);
2550 
2551 	eth_dev->process_private = para;
2552 
2553 	return 0;
2554 }
2555 
2556 static int
2557 nfp_secondary_init_app_fw_nic(struct nfp_net_hw_priv *hw_priv)
2558 {
2559 	uint32_t i;
2560 	int err = 0;
2561 	int ret = 0;
2562 	uint8_t function_id;
2563 	uint32_t total_vnics;
2564 	char pf_name[RTE_ETH_NAME_MAX_LEN];
2565 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
2566 
2567 	/* Read the number of vNIC's created for the PF */
2568 	function_id = (pf_dev->pci_dev->addr.function) & 0x07;
2569 	snprintf(pf_name, sizeof(pf_name), "nfd_cfg_pf%u_num_ports", function_id);
2570 	total_vnics = nfp_rtsym_read_le(pf_dev->sym_tbl, pf_name, &err);
2571 	if (err != 0 || total_vnics == 0 || total_vnics > 8) {
2572 		PMD_INIT_LOG(ERR, "%s symbol with wrong value", pf_name);
2573 		return -ENODEV;
2574 	}
2575 
2576 	for (i = 0; i < total_vnics; i++) {
2577 		char port_name[RTE_ETH_NAME_MAX_LEN];
2578 
2579 		if (nfp_check_multi_pf_from_fw(total_vnics))
2580 			snprintf(port_name, sizeof(port_name), "%s",
2581 					pf_dev->pci_dev->device.name);
2582 		else
2583 			snprintf(port_name, sizeof(port_name), "%s_port%u",
2584 					pf_dev->pci_dev->device.name, i);
2585 
2586 		PMD_INIT_LOG(DEBUG, "Secondary attaching to port %s", port_name);
2587 		ret = rte_eth_dev_create(&pf_dev->pci_dev->device, port_name, 0,
2588 				NULL, NULL, nfp_secondary_net_init, hw_priv);
2589 		if (ret != 0) {
2590 			PMD_INIT_LOG(ERR, "Secondary process attach to port %s failed", port_name);
2591 			ret = -ENODEV;
2592 			break;
2593 		}
2594 	}
2595 
2596 	return ret;
2597 }
2598 
2599 static int
2600 nfp_fw_app_secondary_init(struct nfp_net_hw_priv *hw_priv)
2601 {
2602 	int ret;
2603 	struct nfp_pf_dev *pf_dev = hw_priv->pf_dev;
2604 
2605 	switch (pf_dev->app_fw_id) {
2606 	case NFP_APP_FW_CORE_NIC:
2607 		PMD_INIT_LOG(INFO, "Initializing coreNIC");
2608 		ret = nfp_secondary_init_app_fw_nic(hw_priv);
2609 		if (ret != 0) {
2610 			PMD_INIT_LOG(ERR, "Could not initialize coreNIC!");
2611 			return ret;
2612 		}
2613 		break;
2614 	case NFP_APP_FW_FLOWER_NIC:
2615 		PMD_INIT_LOG(INFO, "Initializing Flower");
2616 		ret = nfp_secondary_init_app_fw_flower(hw_priv);
2617 		if (ret != 0) {
2618 			PMD_INIT_LOG(ERR, "Could not initialize Flower!");
2619 			return ret;
2620 		}
2621 		break;
2622 	default:
2623 		PMD_INIT_LOG(ERR, "Unsupported Firmware loaded");
2624 		ret = -EINVAL;
2625 		return ret;
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 /*
2632  * When attaching to the NFP4000/6000 PF on a secondary process there
2633  * is no need to initialise the PF again. Only minimal work is required
2634  * here.
2635  */
2636 static int
2637 nfp_pf_secondary_init(struct rte_pci_device *pci_dev)
2638 {
2639 	void *sync;
2640 	int ret = 0;
2641 	struct nfp_cpp *cpp;
2642 	uint8_t function_id;
2643 	struct nfp_pf_dev *pf_dev;
2644 	enum nfp_app_fw_id app_fw_id;
2645 	char name[RTE_ETH_NAME_MAX_LEN];
2646 	struct nfp_rtsym_table *sym_tbl;
2647 	struct nfp_net_hw_priv *hw_priv;
2648 	const struct nfp_dev_info *dev_info;
2649 	char app_name[RTE_ETH_NAME_MAX_LEN];
2650 
2651 	if (pci_dev == NULL)
2652 		return -ENODEV;
2653 
2654 	if (pci_dev->mem_resource[0].addr == NULL) {
2655 		PMD_INIT_LOG(ERR, "The address of BAR0 is NULL.");
2656 		return -ENODEV;
2657 	}
2658 
2659 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
2660 	if (dev_info == NULL) {
2661 		PMD_INIT_LOG(ERR, "Not supported device ID");
2662 		return -ENODEV;
2663 	}
2664 
2665 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
2666 	if (hw_priv == NULL) {
2667 		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data");
2668 		return -ENOMEM;
2669 	}
2670 
2671 	/* Allocate memory for the PF "device" */
2672 	snprintf(name, sizeof(name), "nfp_pf%d", 0);
2673 	pf_dev = rte_zmalloc(name, sizeof(*pf_dev), 0);
2674 	if (pf_dev == NULL) {
2675 		PMD_INIT_LOG(ERR, "Can't allocate memory for the PF device");
2676 		ret = -ENOMEM;
2677 		goto hw_priv_free;
2678 	}
2679 
2680 	sync = nfp_sync_alloc();
2681 	if (sync == NULL) {
2682 		PMD_INIT_LOG(ERR, "Failed to alloc sync zone.");
2683 		ret = -ENOMEM;
2684 		goto pf_cleanup;
2685 	}
2686 
2687 	/*
2688 	 * When device bound to UIO, the device could be used, by mistake,
2689 	 * by two DPDK apps, and the UIO driver does not avoid it. This
2690 	 * could lead to a serious problem when configuring the NFP CPP
2691 	 * interface. Here we avoid this telling to the CPP init code to
2692 	 * use a lock file if UIO is being used.
2693 	 */
2694 	if (pci_dev->kdrv == RTE_PCI_KDRV_VFIO)
2695 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, false);
2696 	else
2697 		cpp = nfp_cpp_from_nfp6000_pcie(pci_dev, dev_info, true);
2698 
2699 	if (cpp == NULL) {
2700 		PMD_INIT_LOG(ERR, "A CPP handle can not be obtained");
2701 		ret = -EIO;
2702 		goto sync_free;
2703 	}
2704 
2705 	/*
2706 	 * We don't have access to the PF created in the primary process
2707 	 * here so we have to read the number of ports from firmware.
2708 	 */
2709 	sym_tbl = nfp_rtsym_table_read(cpp);
2710 	if (sym_tbl == NULL) {
2711 		PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table");
2712 		ret = -EIO;
2713 		goto cpp_cleanup;
2714 	}
2715 
2716 	/* Read the app ID of the firmware loaded */
2717 	function_id = pci_dev->addr.function & 0x7;
2718 	snprintf(app_name, sizeof(app_name), "_pf%u_net_app_id", function_id);
2719 	app_fw_id = nfp_rtsym_read_le(sym_tbl, app_name, &ret);
2720 	if (ret != 0) {
2721 		PMD_INIT_LOG(ERR, "Couldn't read %s from fw", app_name);
2722 		ret = -EIO;
2723 		goto sym_tbl_cleanup;
2724 	}
2725 
2726 	/* Populate the newly created PF device */
2727 	pf_dev->app_fw_id = app_fw_id;
2728 	pf_dev->cpp = cpp;
2729 	pf_dev->sym_tbl = sym_tbl;
2730 	pf_dev->pci_dev = pci_dev;
2731 	pf_dev->sync = sync;
2732 
2733 	hw_priv->is_pf = true;
2734 	hw_priv->pf_dev = pf_dev;
2735 	hw_priv->dev_info = dev_info;
2736 
2737 	/* Call app specific init code now */
2738 	ret = nfp_fw_app_secondary_init(hw_priv);
2739 	if (ret != 0) {
2740 		PMD_INIT_LOG(ERR, "Failed to init hw app primary.");
2741 		goto sym_tbl_cleanup;
2742 	}
2743 
2744 	return 0;
2745 
2746 sym_tbl_cleanup:
2747 	free(sym_tbl);
2748 cpp_cleanup:
2749 	nfp_cpp_free(cpp);
2750 sync_free:
2751 	nfp_sync_free(sync);
2752 pf_cleanup:
2753 	rte_free(pf_dev);
2754 hw_priv_free:
2755 	rte_free(hw_priv);
2756 
2757 	return ret;
2758 }
2759 
2760 static int
2761 nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2762 		struct rte_pci_device *dev)
2763 {
2764 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2765 		return nfp_pf_init(dev);
2766 	else
2767 		return nfp_pf_secondary_init(dev);
2768 }
2769 
2770 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
2771 	{
2772 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2773 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2774 	},
2775 	{
2776 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2777 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2778 	},
2779 	{
2780 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2781 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2782 	},
2783 	{
2784 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2785 				PCI_DEVICE_ID_NFP3800_PF_NIC)
2786 	},
2787 	{
2788 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2789 				PCI_DEVICE_ID_NFP4000_PF_NIC)
2790 	},
2791 	{
2792 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
2793 				PCI_DEVICE_ID_NFP6000_PF_NIC)
2794 	},
2795 	{
2796 		.vendor_id = 0,
2797 	},
2798 };
2799 
2800 static int
2801 nfp_pci_uninit(struct rte_eth_dev *eth_dev)
2802 {
2803 	uint16_t port_id;
2804 	struct rte_pci_device *pci_dev;
2805 
2806 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2807 
2808 	/* Free up all physical ports under PF */
2809 	RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
2810 		rte_eth_dev_close(port_id);
2811 	/*
2812 	 * Ports can be closed and freed but hotplugging is not
2813 	 * currently supported.
2814 	 */
2815 	return -ENOTSUP;
2816 }
2817 
2818 static int
2819 eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
2820 {
2821 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_pci_uninit);
2822 }
2823 
2824 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
2825 	.id_table = pci_id_nfp_pf_net_map,
2826 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2827 	.probe = nfp_pf_pci_probe,
2828 	.remove = eth_nfp_pci_remove,
2829 };
2830 
2831 RTE_PMD_REGISTER_PCI(NFP_PF_DRIVER_NAME, rte_nfp_net_pf_pmd);
2832 RTE_PMD_REGISTER_PCI_TABLE(NFP_PF_DRIVER_NAME, pci_id_nfp_pf_net_map);
2833 RTE_PMD_REGISTER_KMOD_DEP(NFP_PF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
2834 RTE_PMD_REGISTER_PARAM_STRING(NFP_PF_DRIVER_NAME,
2835 		NFP_PF_FORCE_RELOAD_FW "=<0|1>"
2836 		NFP_CPP_SERVICE_ENABLE "=<0|1>");
2837