xref: /dpdk/drivers/net/nfp/nfp_ethdev_vf.c (revision 4dcbf32ffefd84dbb5924de3b2c6dd517f7809c8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <rte_alarm.h>
9 #include <nfp_common_pci.h>
10 
11 #include "nfd3/nfp_nfd3.h"
12 #include "nfdk/nfp_nfdk.h"
13 #include "nfpcore/nfp_cpp.h"
14 
15 #include "nfp_logs.h"
16 #include "nfp_net_common.h"
17 #include "nfp_rxtx_vec.h"
18 
19 #define NFP_VF_DRIVER_NAME net_nfp_vf
20 
21 static int
22 nfp_netvf_start(struct rte_eth_dev *dev)
23 {
24 	int ret;
25 	uint16_t i;
26 	struct nfp_hw *hw;
27 	uint32_t new_ctrl;
28 	uint32_t update = 0;
29 	uint32_t intr_vector;
30 	struct nfp_net_hw *net_hw;
31 	struct rte_eth_conf *dev_conf;
32 	struct rte_eth_rxmode *rxmode;
33 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
34 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
35 
36 	/* Disabling queues just in case... */
37 	nfp_net_disable_queues(dev);
38 
39 	/* Enabling the required queues in the device */
40 	nfp_net_enable_queues(dev);
41 
42 	/* Check and configure queue intr-vector mapping */
43 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
44 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
45 			/*
46 			 * Better not to share LSC with RX interrupts.
47 			 * Unregistering LSC interrupt handler.
48 			 */
49 			rte_intr_callback_unregister(intr_handle,
50 					nfp_net_dev_interrupt_handler, (void *)dev);
51 
52 			if (dev->data->nb_rx_queues > 1) {
53 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
54 						"supports 1 queue with UIO.");
55 				return -EIO;
56 			}
57 		}
58 
59 		intr_vector = dev->data->nb_rx_queues;
60 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
61 			return -1;
62 
63 		nfp_configure_rx_interrupt(dev, intr_handle);
64 		update = NFP_NET_CFG_UPDATE_MSIX;
65 	}
66 
67 	rte_intr_enable(intr_handle);
68 
69 	new_ctrl = nfp_check_offloads(dev);
70 
71 	/* Writing configuration parameters in the device */
72 	net_hw = dev->data->dev_private;
73 	hw = &net_hw->super;
74 	nfp_net_params_setup(net_hw);
75 
76 	dev_conf = &dev->data->dev_conf;
77 	rxmode = &dev_conf->rxmode;
78 
79 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) != 0) {
80 		nfp_net_rss_config_default(dev);
81 		update |= NFP_NET_CFG_UPDATE_RSS;
82 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
83 	}
84 
85 	/* Enable device */
86 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
87 
88 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
89 
90 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
91 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
92 
93 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
94 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
95 		return -EIO;
96 
97 	hw->ctrl = new_ctrl;
98 
99 	/*
100 	 * Allocating rte mbufs for configured rx queues.
101 	 * This requires queues being enabled before.
102 	 */
103 	if (nfp_net_rx_freelist_setup(dev) != 0) {
104 		ret = -ENOMEM;
105 		goto error;
106 	}
107 
108 	for (i = 0; i < dev->data->nb_rx_queues; i++)
109 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
110 	for (i = 0; i < dev->data->nb_tx_queues; i++)
111 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
112 
113 	return 0;
114 
115 error:
116 	/*
117 	 * An error returned by this function should mean the app
118 	 * exiting and then the system releasing all the memory
119 	 * allocated even memory coming from hugepages.
120 	 *
121 	 * The device could be enabled at this point with some queues
122 	 * ready for getting packets. This is true if the call to
123 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
124 	 * fails for subsequent queues.
125 	 *
126 	 * This should make the app exiting but better if we tell the
127 	 * device first.
128 	 */
129 	nfp_net_disable_queues(dev);
130 
131 	return ret;
132 }
133 
134 static int
135 nfp_netvf_stop(struct rte_eth_dev *dev)
136 {
137 	nfp_net_disable_queues(dev);
138 
139 	/* Clear queues */
140 	nfp_net_stop_tx_queue(dev);
141 
142 	nfp_net_stop_rx_queue(dev);
143 
144 	return 0;
145 }
146 
147 static int
148 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
149 {
150 	return -ENOTSUP;
151 }
152 
153 /* Set the link down. */
154 static int
155 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
156 {
157 	return -ENOTSUP;
158 }
159 
160 /* Reset and stop device. The device can not be restarted. */
161 static int
162 nfp_netvf_close(struct rte_eth_dev *dev)
163 {
164 	struct nfp_net_hw *net_hw;
165 	struct rte_pci_device *pci_dev;
166 	struct nfp_net_hw_priv *hw_priv;
167 
168 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
169 		return 0;
170 
171 	net_hw = dev->data->dev_private;
172 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
173 	hw_priv = dev->process_private;
174 
175 	rte_free(net_hw->eth_xstats_base);
176 	rte_free(hw_priv);
177 
178 	/*
179 	 * We assume that the DPDK application is stopping all the
180 	 * threads/queues before calling the device close function.
181 	 */
182 	nfp_net_disable_queues(dev);
183 
184 	/* Clear queues */
185 	nfp_net_close_tx_queue(dev);
186 	nfp_net_close_rx_queue(dev);
187 
188 	rte_intr_disable(pci_dev->intr_handle);
189 
190 	/* Unregister callback func from eal lib */
191 	rte_intr_callback_unregister(pci_dev->intr_handle,
192 			nfp_net_dev_interrupt_handler, (void *)dev);
193 
194 	/* Cancel possible impending LSC work here before releasing the port */
195 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
196 
197 	return 0;
198 }
199 
200 /* Initialise and register VF driver with DPDK Application */
201 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
202 	.dev_configure          = nfp_net_configure,
203 	.dev_start              = nfp_netvf_start,
204 	.dev_stop               = nfp_netvf_stop,
205 	.dev_set_link_up        = nfp_netvf_set_link_up,
206 	.dev_set_link_down      = nfp_netvf_set_link_down,
207 	.dev_close              = nfp_netvf_close,
208 	.promiscuous_enable     = nfp_net_promisc_enable,
209 	.promiscuous_disable    = nfp_net_promisc_disable,
210 	.allmulticast_enable    = nfp_net_allmulticast_enable,
211 	.allmulticast_disable   = nfp_net_allmulticast_disable,
212 	.link_update            = nfp_net_link_update,
213 	.stats_get              = nfp_net_stats_get,
214 	.stats_reset            = nfp_net_stats_reset,
215 	.xstats_get             = nfp_net_xstats_get,
216 	.xstats_reset           = nfp_net_xstats_reset,
217 	.xstats_get_names       = nfp_net_xstats_get_names,
218 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
219 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
220 	.dev_infos_get          = nfp_net_infos_get,
221 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
222 	.mtu_set                = nfp_net_dev_mtu_set,
223 	.mac_addr_set           = nfp_net_set_mac_addr,
224 	.vlan_offload_set       = nfp_net_vlan_offload_set,
225 	.reta_update            = nfp_net_reta_update,
226 	.reta_query             = nfp_net_reta_query,
227 	.rss_hash_update        = nfp_net_rss_hash_update,
228 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
229 	.rx_queue_setup         = nfp_net_rx_queue_setup,
230 	.rx_queue_release       = nfp_net_rx_queue_release,
231 	.tx_queue_setup         = nfp_net_tx_queue_setup,
232 	.tx_queue_release       = nfp_net_tx_queue_release,
233 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
234 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
235 	.rx_burst_mode_get      = nfp_net_rx_burst_mode_get,
236 	.tx_burst_mode_get      = nfp_net_tx_burst_mode_get,
237 };
238 
239 static inline void
240 nfp_netvf_ethdev_ops_mount(struct nfp_pf_dev *pf_dev,
241 		struct rte_eth_dev *eth_dev)
242 {
243 	if (pf_dev->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
244 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
245 	else
246 		nfp_net_nfdk_xmit_pkts_set(eth_dev);
247 
248 	eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
249 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
250 	nfp_net_recv_pkts_set(eth_dev);
251 }
252 
253 static int
254 nfp_netvf_init(struct rte_eth_dev *eth_dev)
255 {
256 	int err;
257 	uint16_t port;
258 	uint32_t start_q;
259 	struct nfp_hw *hw;
260 	struct nfp_net_hw *net_hw;
261 	struct nfp_pf_dev *pf_dev;
262 	uint64_t tx_bar_off = 0;
263 	uint64_t rx_bar_off = 0;
264 	struct rte_pci_device *pci_dev;
265 	struct nfp_net_hw_priv *hw_priv;
266 	const struct nfp_dev_info *dev_info;
267 
268 	port = eth_dev->data->port_id;
269 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
270 
271 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
272 	if (dev_info == NULL) {
273 		PMD_INIT_LOG(ERR, "Not supported device ID.");
274 		return -ENODEV;
275 	}
276 
277 	net_hw = eth_dev->data->dev_private;
278 	hw = &net_hw->super;
279 
280 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
281 	if (hw->ctrl_bar == NULL) {
282 		PMD_DRV_LOG(ERR, "The hw->super.ctrl_bar is NULL. BAR0 not configured.");
283 		return -ENODEV;
284 	}
285 
286 	pf_dev = rte_zmalloc(NULL, sizeof(*pf_dev), 0);
287 	if (pf_dev == NULL) {
288 		PMD_INIT_LOG(ERR, "Can not allocate memory for the PF device.");
289 		return -ENOMEM;
290 	}
291 
292 	pf_dev->pci_dev = pci_dev;
293 
294 	/* Check the version from firmware */
295 	if (!nfp_net_version_check(hw, pf_dev)) {
296 		err = -EINVAL;
297 		goto pf_dev_free;
298 	}
299 
300 	/* Set the ctrl bar size */
301 	nfp_net_ctrl_bar_size_set(pf_dev);
302 
303 	PMD_INIT_LOG(DEBUG, "Ctrl bar: %p.", hw->ctrl_bar);
304 
305 	err = nfp_net_common_init(pf_dev, net_hw);
306 	if (err != 0)
307 		goto pf_dev_free;
308 
309 	nfp_netvf_ethdev_ops_mount(pf_dev, eth_dev);
310 
311 	hw_priv = rte_zmalloc(NULL, sizeof(*hw_priv), 0);
312 	if (hw_priv == NULL) {
313 		PMD_INIT_LOG(ERR, "Can not alloc memory for hw priv data.");
314 		err = -ENOMEM;
315 		goto hw_priv_free;
316 	}
317 
318 	hw_priv->dev_info = dev_info;
319 	hw_priv->pf_dev = pf_dev;
320 
321 	if (!nfp_net_recv_pkt_meta_check_register(hw_priv)) {
322 		PMD_INIT_LOG(ERR, "VF register meta check function failed.");
323 		err = -EINVAL;
324 		goto hw_priv_free;
325 	}
326 
327 	eth_dev->process_private = hw_priv;
328 
329 	/* For secondary processes, the primary has done all the work */
330 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
331 		return 0;
332 
333 	net_hw->eth_xstats_base = rte_calloc("rte_eth_xstat",
334 			nfp_net_xstats_size(eth_dev), sizeof(struct rte_eth_xstat), 0);
335 	if (net_hw->eth_xstats_base == NULL) {
336 		PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!",
337 				pci_dev->device.name);
338 		err = -ENOMEM;
339 		goto hw_priv_free;
340 	}
341 
342 	/* Work out where in the BAR the queues start. */
343 	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
344 	tx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
345 	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
346 	rx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
347 
348 	net_hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
349 	net_hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
350 
351 	PMD_INIT_LOG(DEBUG, "The ctrl_bar: %p, tx_bar: %p, rx_bar: %p.",
352 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
353 
354 	nfp_net_cfg_queue_setup(net_hw);
355 	net_hw->mtu = RTE_ETHER_MTU;
356 
357 	/* VLAN insertion is incompatible with LSOv2 */
358 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
359 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
360 
361 	nfp_net_log_device_information(net_hw, pf_dev);
362 
363 	/* Initializing spinlock for reconfigs */
364 	rte_spinlock_init(&hw->reconfig_lock);
365 
366 	/* Allocating memory for mac addr */
367 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
368 	if (eth_dev->data->mac_addrs == NULL) {
369 		PMD_INIT_LOG(ERR, "Failed to space for MAC address.");
370 		err = -ENOMEM;
371 		goto free_xstats;
372 	}
373 
374 	nfp_read_mac(hw);
375 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
376 		PMD_INIT_LOG(INFO, "Using random mac address for port %hu.", port);
377 		/* Using random mac addresses for VFs */
378 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
379 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
380 	}
381 
382 	/* Copying mac address to DPDK eth_dev struct */
383 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
384 
385 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
386 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
387 
388 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
389 
390 	PMD_INIT_LOG(INFO, "Port %hu VendorID=%#x DeviceID=%#x "
391 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
392 			port, pci_dev->id.vendor_id,
393 			pci_dev->id.device_id,
394 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
395 
396 	/* Registering LSC interrupt handler */
397 	rte_intr_callback_register(pci_dev->intr_handle,
398 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
399 	/* Telling the firmware about the LSC interrupt entry */
400 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
401 	/* Unmask the LSC interrupt */
402 	nfp_net_irq_unmask(eth_dev);
403 	/* Recording current stats counters values */
404 	nfp_net_stats_reset(eth_dev);
405 
406 	return 0;
407 
408 free_xstats:
409 	rte_free(net_hw->eth_xstats_base);
410 hw_priv_free:
411 	rte_free(hw_priv);
412 pf_dev_free:
413 	rte_free(pf_dev);
414 
415 	return err;
416 }
417 
418 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
419 	{
420 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
421 				PCI_DEVICE_ID_NFP3800_VF_NIC)
422 	},
423 	{
424 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
425 				PCI_DEVICE_ID_NFP6000_VF_NIC)
426 	},
427 	{
428 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
429 				PCI_DEVICE_ID_NFP3800_VF_NIC)
430 	},
431 	{
432 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
433 				PCI_DEVICE_ID_NFP6000_VF_NIC)
434 	},
435 	{
436 		.vendor_id = 0,
437 	},
438 };
439 
440 static int
441 nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
442 {
443 	/* VF cleanup, just free private port data */
444 	return nfp_netvf_close(eth_dev);
445 }
446 
447 static int
448 nfp_vf_pci_probe(struct rte_pci_device *pci_dev)
449 {
450 	return rte_eth_dev_pci_generic_probe(pci_dev,
451 			sizeof(struct nfp_net_hw), nfp_netvf_init);
452 }
453 
454 static int
455 nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
456 {
457 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
458 }
459 
460 static struct nfp_class_driver rte_nfp_net_vf_pmd = {
461 	.drv_class = NFP_CLASS_ETH,
462 	.name = RTE_STR(net_nfp_vf),
463 	.id_table = pci_id_nfp_vf_net_map,
464 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
465 	.probe = nfp_vf_pci_probe,
466 	.remove = nfp_vf_pci_remove,
467 };
468 
469 RTE_INIT(rte_nfp_vf_pmd_init)
470 {
471 	nfp_class_driver_register(&rte_nfp_net_vf_pmd);
472 }
473 
474 RTE_PMD_REGISTER_PCI_TABLE(NFP_VF_DRIVER_NAME, pci_id_nfp_vf_net_map);
475 RTE_PMD_REGISTER_KMOD_DEP(NFP_VF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
476