xref: /dpdk/drivers/net/nfp/nfp_ethdev_vf.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 #include <rte_alarm.h>
9 #include <nfp_common_pci.h>
10 
11 #include "nfd3/nfp_nfd3.h"
12 #include "nfdk/nfp_nfdk.h"
13 #include "nfpcore/nfp_cpp.h"
14 
15 #include "nfp_logs.h"
16 #include "nfp_net_common.h"
17 
18 #define NFP_VF_DRIVER_NAME net_nfp_vf
19 
20 static int
21 nfp_netvf_start(struct rte_eth_dev *dev)
22 {
23 	int ret;
24 	uint16_t i;
25 	struct nfp_hw *hw;
26 	uint32_t new_ctrl;
27 	uint32_t update = 0;
28 	uint32_t intr_vector;
29 	struct nfp_net_hw *net_hw;
30 	struct rte_eth_conf *dev_conf;
31 	struct rte_eth_rxmode *rxmode;
32 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
33 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
34 
35 	/* Disabling queues just in case... */
36 	nfp_net_disable_queues(dev);
37 
38 	/* Enabling the required queues in the device */
39 	nfp_net_enable_queues(dev);
40 
41 	/* Check and configure queue intr-vector mapping */
42 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
43 		if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_UIO) {
44 			/*
45 			 * Better not to share LSC with RX interrupts.
46 			 * Unregistering LSC interrupt handler.
47 			 */
48 			rte_intr_callback_unregister(intr_handle,
49 					nfp_net_dev_interrupt_handler, (void *)dev);
50 
51 			if (dev->data->nb_rx_queues > 1) {
52 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
53 						"supports 1 queue with UIO");
54 				return -EIO;
55 			}
56 		}
57 
58 		intr_vector = dev->data->nb_rx_queues;
59 		if (rte_intr_efd_enable(intr_handle, intr_vector) != 0)
60 			return -1;
61 
62 		nfp_configure_rx_interrupt(dev, intr_handle);
63 		update = NFP_NET_CFG_UPDATE_MSIX;
64 	}
65 
66 	rte_intr_enable(intr_handle);
67 
68 	new_ctrl = nfp_check_offloads(dev);
69 
70 	/* Writing configuration parameters in the device */
71 	net_hw = dev->data->dev_private;
72 	hw = &net_hw->super;
73 	nfp_net_params_setup(net_hw);
74 
75 	dev_conf = &dev->data->dev_conf;
76 	rxmode = &dev_conf->rxmode;
77 
78 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) != 0) {
79 		nfp_net_rss_config_default(dev);
80 		update |= NFP_NET_CFG_UPDATE_RSS;
81 		new_ctrl |= nfp_net_cfg_ctrl_rss(hw->cap);
82 	}
83 
84 	/* Enable device */
85 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
86 
87 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
88 
89 	if ((hw->cap & NFP_NET_CFG_CTRL_RINGCFG) != 0)
90 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
91 
92 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
93 	if (nfp_reconfig(hw, new_ctrl, update) != 0)
94 		return -EIO;
95 
96 	hw->ctrl = new_ctrl;
97 
98 	/*
99 	 * Allocating rte mbufs for configured rx queues.
100 	 * This requires queues being enabled before.
101 	 */
102 	if (nfp_net_rx_freelist_setup(dev) != 0) {
103 		ret = -ENOMEM;
104 		goto error;
105 	}
106 
107 	for (i = 0; i < dev->data->nb_rx_queues; i++)
108 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
109 	for (i = 0; i < dev->data->nb_tx_queues; i++)
110 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
111 
112 	return 0;
113 
114 error:
115 	/*
116 	 * An error returned by this function should mean the app
117 	 * exiting and then the system releasing all the memory
118 	 * allocated even memory coming from hugepages.
119 	 *
120 	 * The device could be enabled at this point with some queues
121 	 * ready for getting packets. This is true if the call to
122 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
123 	 * fails for subsequent queues.
124 	 *
125 	 * This should make the app exiting but better if we tell the
126 	 * device first.
127 	 */
128 	nfp_net_disable_queues(dev);
129 
130 	return ret;
131 }
132 
133 static int
134 nfp_netvf_stop(struct rte_eth_dev *dev)
135 {
136 	nfp_net_disable_queues(dev);
137 
138 	/* Clear queues */
139 	nfp_net_stop_tx_queue(dev);
140 
141 	nfp_net_stop_rx_queue(dev);
142 
143 	return 0;
144 }
145 
146 static int
147 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
148 {
149 	return -ENOTSUP;
150 }
151 
152 /* Set the link down. */
153 static int
154 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
155 {
156 	return -ENOTSUP;
157 }
158 
159 /* Reset and stop device. The device can not be restarted. */
160 static int
161 nfp_netvf_close(struct rte_eth_dev *dev)
162 {
163 	struct nfp_net_hw *net_hw;
164 	struct rte_pci_device *pci_dev;
165 
166 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
167 		return 0;
168 
169 	net_hw = dev->data->dev_private;
170 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
171 
172 	rte_free(net_hw->eth_xstats_base);
173 
174 	/*
175 	 * We assume that the DPDK application is stopping all the
176 	 * threads/queues before calling the device close function.
177 	 */
178 	nfp_net_disable_queues(dev);
179 
180 	/* Clear queues */
181 	nfp_net_close_tx_queue(dev);
182 	nfp_net_close_rx_queue(dev);
183 
184 	rte_intr_disable(pci_dev->intr_handle);
185 
186 	/* Unregister callback func from eal lib */
187 	rte_intr_callback_unregister(pci_dev->intr_handle,
188 			nfp_net_dev_interrupt_handler, (void *)dev);
189 
190 	/* Cancel possible impending LSC work here before releasing the port */
191 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
192 
193 	return 0;
194 }
195 
196 /* Initialise and register VF driver with DPDK Application */
197 static const struct eth_dev_ops nfp_netvf_eth_dev_ops = {
198 	.dev_configure          = nfp_net_configure,
199 	.dev_start              = nfp_netvf_start,
200 	.dev_stop               = nfp_netvf_stop,
201 	.dev_set_link_up        = nfp_netvf_set_link_up,
202 	.dev_set_link_down      = nfp_netvf_set_link_down,
203 	.dev_close              = nfp_netvf_close,
204 	.promiscuous_enable     = nfp_net_promisc_enable,
205 	.promiscuous_disable    = nfp_net_promisc_disable,
206 	.allmulticast_enable    = nfp_net_allmulticast_enable,
207 	.allmulticast_disable   = nfp_net_allmulticast_disable,
208 	.link_update            = nfp_net_link_update,
209 	.stats_get              = nfp_net_stats_get,
210 	.stats_reset            = nfp_net_stats_reset,
211 	.xstats_get             = nfp_net_xstats_get,
212 	.xstats_reset           = nfp_net_xstats_reset,
213 	.xstats_get_names       = nfp_net_xstats_get_names,
214 	.xstats_get_by_id       = nfp_net_xstats_get_by_id,
215 	.xstats_get_names_by_id = nfp_net_xstats_get_names_by_id,
216 	.dev_infos_get          = nfp_net_infos_get,
217 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
218 	.mtu_set                = nfp_net_dev_mtu_set,
219 	.mac_addr_set           = nfp_net_set_mac_addr,
220 	.vlan_offload_set       = nfp_net_vlan_offload_set,
221 	.reta_update            = nfp_net_reta_update,
222 	.reta_query             = nfp_net_reta_query,
223 	.rss_hash_update        = nfp_net_rss_hash_update,
224 	.rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
225 	.rx_queue_setup         = nfp_net_rx_queue_setup,
226 	.rx_queue_release       = nfp_net_rx_queue_release,
227 	.tx_queue_setup         = nfp_net_tx_queue_setup,
228 	.tx_queue_release       = nfp_net_tx_queue_release,
229 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
230 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
231 };
232 
233 static inline void
234 nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw,
235 		struct rte_eth_dev *eth_dev)
236 {
237 	if (hw->ver.extend == NFP_NET_CFG_VERSION_DP_NFD3)
238 		eth_dev->tx_pkt_burst = nfp_net_nfd3_xmit_pkts;
239 	else
240 		eth_dev->tx_pkt_burst = nfp_net_nfdk_xmit_pkts;
241 
242 	eth_dev->dev_ops = &nfp_netvf_eth_dev_ops;
243 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
244 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
245 }
246 
247 static int
248 nfp_netvf_init(struct rte_eth_dev *eth_dev)
249 {
250 	int err;
251 	uint16_t port;
252 	uint32_t start_q;
253 	struct nfp_hw *hw;
254 	struct nfp_net_hw *net_hw;
255 	uint64_t tx_bar_off = 0;
256 	uint64_t rx_bar_off = 0;
257 	struct rte_pci_device *pci_dev;
258 	const struct nfp_dev_info *dev_info;
259 
260 	port = eth_dev->data->port_id;
261 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
262 
263 	dev_info = nfp_dev_info_get(pci_dev->id.device_id);
264 	if (dev_info == NULL) {
265 		PMD_INIT_LOG(ERR, "Not supported device ID");
266 		return -ENODEV;
267 	}
268 
269 	net_hw = eth_dev->data->dev_private;
270 	net_hw->dev_info = dev_info;
271 	hw = &net_hw->super;
272 
273 	hw->ctrl_bar = pci_dev->mem_resource[0].addr;
274 	if (hw->ctrl_bar == NULL) {
275 		PMD_DRV_LOG(ERR, "hw->super.ctrl_bar is NULL. BAR0 not configured");
276 		return -ENODEV;
277 	}
278 
279 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
280 
281 	err = nfp_net_common_init(pci_dev, net_hw);
282 	if (err != 0)
283 		return err;
284 
285 	nfp_netvf_ethdev_ops_mount(net_hw, eth_dev);
286 
287 	/* For secondary processes, the primary has done all the work */
288 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
289 		return 0;
290 
291 	rte_eth_copy_pci_info(eth_dev, pci_dev);
292 
293 	net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat",
294 			sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0);
295 	if (net_hw->eth_xstats_base == NULL) {
296 		PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!",
297 				pci_dev->device.name);
298 		return -ENOMEM;
299 	}
300 
301 	/* Work out where in the BAR the queues start. */
302 	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
303 	tx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
304 	start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
305 	rx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
306 
307 	net_hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
308 	net_hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
309 
310 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
311 			hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
312 
313 	nfp_net_cfg_queue_setup(net_hw);
314 	net_hw->mtu = RTE_ETHER_MTU;
315 
316 	/* VLAN insertion is incompatible with LSOv2 */
317 	if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
318 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
319 
320 	nfp_net_log_device_information(net_hw);
321 
322 	/* Initializing spinlock for reconfigs */
323 	rte_spinlock_init(&hw->reconfig_lock);
324 
325 	/* Allocating memory for mac addr */
326 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
327 	if (eth_dev->data->mac_addrs == NULL) {
328 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
329 		err = -ENOMEM;
330 		goto free_xstats;
331 	}
332 
333 	nfp_read_mac(hw);
334 	if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
335 		PMD_INIT_LOG(INFO, "Using random mac address for port %hu", port);
336 		/* Using random mac addresses for VFs */
337 		rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
338 		nfp_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
339 	}
340 
341 	/* Copying mac address to DPDK eth_dev struct */
342 	rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
343 
344 	if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
345 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
346 
347 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
348 
349 	PMD_INIT_LOG(INFO, "port %hu VendorID=%#x DeviceID=%#x "
350 			"mac=" RTE_ETHER_ADDR_PRT_FMT,
351 			port, pci_dev->id.vendor_id,
352 			pci_dev->id.device_id,
353 			RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
354 
355 	/* Registering LSC interrupt handler */
356 	rte_intr_callback_register(pci_dev->intr_handle,
357 			nfp_net_dev_interrupt_handler, (void *)eth_dev);
358 	/* Telling the firmware about the LSC interrupt entry */
359 	nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
360 	/* Unmask the LSC interrupt */
361 	nfp_net_irq_unmask(eth_dev);
362 	/* Recording current stats counters values */
363 	nfp_net_stats_reset(eth_dev);
364 
365 	return 0;
366 
367 free_xstats:
368 	rte_free(net_hw->eth_xstats_base);
369 
370 	return err;
371 }
372 
373 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
374 	{
375 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
376 				PCI_DEVICE_ID_NFP3800_VF_NIC)
377 	},
378 	{
379 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
380 				PCI_DEVICE_ID_NFP6000_VF_NIC)
381 	},
382 	{
383 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
384 				PCI_DEVICE_ID_NFP3800_VF_NIC)
385 	},
386 	{
387 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CORIGINE,
388 				PCI_DEVICE_ID_NFP6000_VF_NIC)
389 	},
390 	{
391 		.vendor_id = 0,
392 	},
393 };
394 
395 static int
396 nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
397 {
398 	/* VF cleanup, just free private port data */
399 	return nfp_netvf_close(eth_dev);
400 }
401 
402 static int
403 nfp_vf_pci_probe(struct rte_pci_device *pci_dev)
404 {
405 	return rte_eth_dev_pci_generic_probe(pci_dev,
406 			sizeof(struct nfp_net_hw), nfp_netvf_init);
407 }
408 
409 static int
410 nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
411 {
412 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
413 }
414 
415 static struct nfp_class_driver rte_nfp_net_vf_pmd = {
416 	.drv_class = NFP_CLASS_ETH,
417 	.name = RTE_STR(net_nfp_vf),
418 	.id_table = pci_id_nfp_vf_net_map,
419 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
420 	.probe = nfp_vf_pci_probe,
421 	.remove = nfp_vf_pci_remove,
422 };
423 
424 RTE_INIT(rte_nfp_vf_pmd_init)
425 {
426 	nfp_class_driver_register(&rte_nfp_net_vf_pmd);
427 }
428 
429 RTE_PMD_REGISTER_PCI_TABLE(NFP_VF_DRIVER_NAME, pci_id_nfp_vf_net_map);
430 RTE_PMD_REGISTER_KMOD_DEP(NFP_VF_DRIVER_NAME, "* igb_uio | uio_pci_generic | vfio");
431