xref: /dpdk/drivers/net/nfp/nfp_ethdev_vf.c (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7 
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev_vf.c
12  *
13  * Netronome vNIC  VF DPDK Poll-Mode Driver: Main entry point
14  */
15 
16 #include <rte_alarm.h>
17 
18 #include "nfpcore/nfp_mip.h"
19 #include "nfpcore/nfp_rtsym.h"
20 
21 #include "nfp_common.h"
22 #include "nfp_rxtx.h"
23 #include "nfp_logs.h"
24 #include "nfp_ctrl.h"
25 
26 static void
27 nfp_netvf_read_mac(struct nfp_net_hw *hw)
28 {
29 	uint32_t tmp;
30 
31 	tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
32 	memcpy(&hw->mac_addr[0], &tmp, 4);
33 
34 	tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
35 	memcpy(&hw->mac_addr[4], &tmp, 2);
36 }
37 
38 static int
39 nfp_netvf_start(struct rte_eth_dev *dev)
40 {
41 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
42 	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
43 	uint32_t new_ctrl, update = 0;
44 	struct nfp_net_hw *hw;
45 	struct rte_eth_conf *dev_conf;
46 	struct rte_eth_rxmode *rxmode;
47 	uint32_t intr_vector;
48 	int ret;
49 
50 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
51 
52 	PMD_INIT_LOG(DEBUG, "Start");
53 
54 	/* Disabling queues just in case... */
55 	nfp_net_disable_queues(dev);
56 
57 	/* Enabling the required queues in the device */
58 	nfp_net_enable_queues(dev);
59 
60 	/* check and configure queue intr-vector mapping */
61 	if (dev->data->dev_conf.intr_conf.rxq != 0) {
62 		if (rte_intr_type_get(intr_handle) ==
63 						RTE_INTR_HANDLE_UIO) {
64 			/*
65 			 * Better not to share LSC with RX interrupts.
66 			 * Unregistering LSC interrupt handler
67 			 */
68 			rte_intr_callback_unregister(pci_dev->intr_handle,
69 				nfp_net_dev_interrupt_handler, (void *)dev);
70 
71 			if (dev->data->nb_rx_queues > 1) {
72 				PMD_INIT_LOG(ERR, "PMD rx interrupt only "
73 					     "supports 1 queue with UIO");
74 				return -EIO;
75 			}
76 		}
77 		intr_vector = dev->data->nb_rx_queues;
78 		if (rte_intr_efd_enable(intr_handle, intr_vector))
79 			return -1;
80 
81 		nfp_configure_rx_interrupt(dev, intr_handle);
82 		update = NFP_NET_CFG_UPDATE_MSIX;
83 	}
84 
85 	rte_intr_enable(intr_handle);
86 
87 	new_ctrl = nfp_check_offloads(dev);
88 
89 	/* Writing configuration parameters in the device */
90 	nfp_net_params_setup(hw);
91 
92 	dev_conf = &dev->data->dev_conf;
93 	rxmode = &dev_conf->rxmode;
94 
95 	if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
96 		nfp_net_rss_config_default(dev);
97 		update |= NFP_NET_CFG_UPDATE_RSS;
98 		if (hw->cap & NFP_NET_CFG_CTRL_RSS2)
99 			new_ctrl |= NFP_NET_CFG_CTRL_RSS2;
100 		else
101 			new_ctrl |= NFP_NET_CFG_CTRL_RSS;
102 	}
103 
104 	/* Enable device */
105 	new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
106 
107 	update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
108 
109 	if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
110 		new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
111 
112 	nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
113 	if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
114 		return -EIO;
115 
116 	/*
117 	 * Allocating rte mbufs for configured rx queues.
118 	 * This requires queues being enabled before
119 	 */
120 	if (nfp_net_rx_freelist_setup(dev) < 0) {
121 		ret = -ENOMEM;
122 		goto error;
123 	}
124 
125 	hw->ctrl = new_ctrl;
126 
127 	return 0;
128 
129 error:
130 	/*
131 	 * An error returned by this function should mean the app
132 	 * exiting and then the system releasing all the memory
133 	 * allocated even memory coming from hugepages.
134 	 *
135 	 * The device could be enabled at this point with some queues
136 	 * ready for getting packets. This is true if the call to
137 	 * nfp_net_rx_freelist_setup() succeeds for some queues but
138 	 * fails for subsequent queues.
139 	 *
140 	 * This should make the app exiting but better if we tell the
141 	 * device first.
142 	 */
143 	nfp_net_disable_queues(dev);
144 
145 	return ret;
146 }
147 
148 static int
149 nfp_netvf_stop(struct rte_eth_dev *dev)
150 {
151 	PMD_INIT_LOG(DEBUG, "Stop");
152 
153 	nfp_net_disable_queues(dev);
154 
155 	/* Clear queues */
156 	nfp_net_stop_tx_queue(dev);
157 
158 	nfp_net_stop_rx_queue(dev);
159 
160 	return 0;
161 }
162 
163 static int
164 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
165 {
166 	return -ENOTSUP;
167 }
168 
169 /* Set the link down. */
170 static int
171 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
172 {
173 	return -ENOTSUP;
174 }
175 
176 /* Reset and stop device. The device can not be restarted. */
177 static int
178 nfp_netvf_close(struct rte_eth_dev *dev)
179 {
180 	struct rte_pci_device *pci_dev;
181 
182 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
183 		return 0;
184 
185 	PMD_INIT_LOG(DEBUG, "Close");
186 
187 	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
188 
189 	/*
190 	 * We assume that the DPDK application is stopping all the
191 	 * threads/queues before calling the device close function.
192 	 */
193 
194 	nfp_net_disable_queues(dev);
195 
196 	/* Clear queues */
197 	nfp_net_close_tx_queue(dev);
198 
199 	nfp_net_close_rx_queue(dev);
200 
201 	rte_intr_disable(pci_dev->intr_handle);
202 
203 	/* unregister callback func from eal lib */
204 	rte_intr_callback_unregister(pci_dev->intr_handle,
205 				     nfp_net_dev_interrupt_handler,
206 				     (void *)dev);
207 
208 	/* Cancel possible impending LSC work here before releasing the port*/
209 	rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
210 			     (void *)dev);
211 
212 	/*
213 	 * The ixgbe PMD disables the pcie master on the
214 	 * device. The i40e does not...
215 	 */
216 
217 	return 0;
218 }
219 
220 /* Initialise and register VF driver with DPDK Application */
221 static const struct eth_dev_ops nfp_netvf_nfd3_eth_dev_ops = {
222 	.dev_configure		= nfp_net_configure,
223 	.dev_start		= nfp_netvf_start,
224 	.dev_stop		= nfp_netvf_stop,
225 	.dev_set_link_up	= nfp_netvf_set_link_up,
226 	.dev_set_link_down	= nfp_netvf_set_link_down,
227 	.dev_close		= nfp_netvf_close,
228 	.promiscuous_enable	= nfp_net_promisc_enable,
229 	.promiscuous_disable	= nfp_net_promisc_disable,
230 	.link_update		= nfp_net_link_update,
231 	.stats_get		= nfp_net_stats_get,
232 	.stats_reset		= nfp_net_stats_reset,
233 	.dev_infos_get		= nfp_net_infos_get,
234 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
235 	.mtu_set		= nfp_net_dev_mtu_set,
236 	.mac_addr_set		= nfp_net_set_mac_addr,
237 	.vlan_offload_set	= nfp_net_vlan_offload_set,
238 	.reta_update		= nfp_net_reta_update,
239 	.reta_query		= nfp_net_reta_query,
240 	.rss_hash_update	= nfp_net_rss_hash_update,
241 	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
242 	.rx_queue_setup		= nfp_net_rx_queue_setup,
243 	.rx_queue_release	= nfp_net_rx_queue_release,
244 	.tx_queue_setup		= nfp_net_nfd3_tx_queue_setup,
245 	.tx_queue_release	= nfp_net_tx_queue_release,
246 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
247 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
248 };
249 
250 static const struct eth_dev_ops nfp_netvf_nfdk_eth_dev_ops = {
251 	.dev_configure		= nfp_net_configure,
252 	.dev_start		= nfp_netvf_start,
253 	.dev_stop		= nfp_netvf_stop,
254 	.dev_set_link_up	= nfp_netvf_set_link_up,
255 	.dev_set_link_down	= nfp_netvf_set_link_down,
256 	.dev_close		= nfp_netvf_close,
257 	.promiscuous_enable	= nfp_net_promisc_enable,
258 	.promiscuous_disable	= nfp_net_promisc_disable,
259 	.link_update		= nfp_net_link_update,
260 	.stats_get		= nfp_net_stats_get,
261 	.stats_reset		= nfp_net_stats_reset,
262 	.dev_infos_get		= nfp_net_infos_get,
263 	.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
264 	.mtu_set		= nfp_net_dev_mtu_set,
265 	.mac_addr_set		= nfp_net_set_mac_addr,
266 	.vlan_offload_set	= nfp_net_vlan_offload_set,
267 	.reta_update		= nfp_net_reta_update,
268 	.reta_query		= nfp_net_reta_query,
269 	.rss_hash_update	= nfp_net_rss_hash_update,
270 	.rss_hash_conf_get	= nfp_net_rss_hash_conf_get,
271 	.rx_queue_setup		= nfp_net_rx_queue_setup,
272 	.rx_queue_release	= nfp_net_rx_queue_release,
273 	.tx_queue_setup		= nfp_net_nfdk_tx_queue_setup,
274 	.tx_queue_release	= nfp_net_tx_queue_release,
275 	.rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
276 	.rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
277 };
278 
279 static inline int
280 nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
281 {
282 	switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
283 	case NFP_NET_CFG_VERSION_DP_NFD3:
284 		eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
285 		break;
286 	case NFP_NET_CFG_VERSION_DP_NFDK:
287 		if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
288 			PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
289 				NFD_CFG_MAJOR_VERSION_of(hw->ver));
290 			return -EINVAL;
291 		}
292 		eth_dev->dev_ops = &nfp_netvf_nfdk_eth_dev_ops;
293 		break;
294 	default:
295 		PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
296 		return -EINVAL;
297 	}
298 
299 	eth_dev->rx_queue_count = nfp_net_rx_queue_count;
300 	eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
301 	eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
302 
303 	return 0;
304 }
305 
306 static int
307 nfp_netvf_init(struct rte_eth_dev *eth_dev)
308 {
309 	struct rte_pci_device *pci_dev;
310 	struct nfp_net_hw *hw;
311 	struct rte_ether_addr *tmp_ether_addr;
312 
313 	uint64_t tx_bar_off = 0, rx_bar_off = 0;
314 	uint32_t start_q;
315 	int stride = 4;
316 	int port = 0;
317 	int err;
318 
319 	PMD_INIT_FUNC_TRACE();
320 
321 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
322 
323 	/* NFP can not handle DMA addresses requiring more than 40 bits */
324 	if (rte_mem_check_dma_mask(40)) {
325 		RTE_LOG(ERR, PMD,
326 			"device %s can not be used: restricted dma mask to 40 bits!\n",
327 			pci_dev->device.name);
328 		return -ENODEV;
329 	}
330 
331 	hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
332 
333 	hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
334 	if (hw->ctrl_bar == NULL) {
335 		PMD_DRV_LOG(ERR,
336 			"hw->ctrl_bar is NULL. BAR0 not configured");
337 		return -ENODEV;
338 	}
339 
340 	PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
341 
342 	hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
343 
344 	if (nfp_netvf_ethdev_ops_mount(hw, eth_dev))
345 		return -EINVAL;
346 
347 	/* For secondary processes, the primary has done all the work */
348 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
349 		return 0;
350 
351 	rte_eth_copy_pci_info(eth_dev, pci_dev);
352 
353 	hw->device_id = pci_dev->id.device_id;
354 	hw->vendor_id = pci_dev->id.vendor_id;
355 	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
356 	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
357 
358 	PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
359 		     pci_dev->id.vendor_id, pci_dev->id.device_id,
360 		     pci_dev->addr.domain, pci_dev->addr.bus,
361 		     pci_dev->addr.devid, pci_dev->addr.function);
362 
363 	hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
364 	hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
365 
366 	/* Work out where in the BAR the queues start. */
367 	switch (pci_dev->id.device_id) {
368 	case PCI_DEVICE_ID_NFP3800_VF_NIC:
369 	case PCI_DEVICE_ID_NFP6000_VF_NIC:
370 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
371 		tx_bar_off = nfp_pci_queue(pci_dev, start_q);
372 		start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
373 		rx_bar_off = nfp_pci_queue(pci_dev, start_q);
374 		break;
375 	default:
376 		PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
377 		err = -ENODEV;
378 		goto dev_err_ctrl_map;
379 	}
380 
381 	PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
382 	PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
383 
384 	hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
385 		     tx_bar_off;
386 	hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
387 		     rx_bar_off;
388 
389 	PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
390 		     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
391 
392 	nfp_net_cfg_queue_setup(hw);
393 
394 	/* Get some of the read-only fields from the config BAR */
395 	hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
396 	hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
397 	hw->mtu = RTE_ETHER_MTU;
398 	hw->flbufsz = RTE_ETHER_MTU;
399 
400 	/* VLAN insertion is incompatible with LSOv2 */
401 	if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
402 		hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
403 
404 	if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
405 		hw->rx_offset = NFP_NET_RX_OFFSET;
406 	else
407 		hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
408 
409 	PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
410 			   NFD_CFG_MAJOR_VERSION_of(hw->ver),
411 			   NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
412 
413 	PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
414 		     hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
415 		     hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
416 		     hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
417 		     hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
418 		     hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
419 		     hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
420 		     hw->cap & NFP_NET_CFG_CTRL_TXVLAN  ? "TXVLAN "  : "",
421 		     hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
422 		     hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
423 		     hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR "  : "",
424 		     hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
425 		     hw->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSOv2 "     : "",
426 		     hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "",
427 		     hw->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSSv2 "     : "");
428 
429 	hw->ctrl = 0;
430 
431 	hw->stride_rx = stride;
432 	hw->stride_tx = stride;
433 
434 	PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
435 		     hw->max_rx_queues, hw->max_tx_queues);
436 
437 	/* Initializing spinlock for reconfigs */
438 	rte_spinlock_init(&hw->reconfig_lock);
439 
440 	/* Allocating memory for mac addr */
441 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
442 					       RTE_ETHER_ADDR_LEN, 0);
443 	if (eth_dev->data->mac_addrs == NULL) {
444 		PMD_INIT_LOG(ERR, "Failed to space for MAC address");
445 		err = -ENOMEM;
446 		goto dev_err_queues_map;
447 	}
448 
449 	nfp_netvf_read_mac(hw);
450 
451 	tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
452 	if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
453 		PMD_INIT_LOG(INFO, "Using random mac address for port %d",
454 				   port);
455 		/* Using random mac addresses for VFs */
456 		rte_eth_random_addr(&hw->mac_addr[0]);
457 		nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
458 	}
459 
460 	/* Copying mac address to DPDK eth_dev struct */
461 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
462 			&eth_dev->data->mac_addrs[0]);
463 
464 	if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
465 		eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
466 
467 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
468 
469 	PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
470 		     "mac=%02x:%02x:%02x:%02x:%02x:%02x",
471 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
472 		     pci_dev->id.device_id,
473 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
474 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
475 
476 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
477 		/* Registering LSC interrupt handler */
478 		rte_intr_callback_register(pci_dev->intr_handle,
479 					   nfp_net_dev_interrupt_handler,
480 					   (void *)eth_dev);
481 		/* Telling the firmware about the LSC interrupt entry */
482 		nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
483 		/* Recording current stats counters values */
484 		nfp_net_stats_reset(eth_dev);
485 	}
486 
487 	return 0;
488 
489 dev_err_queues_map:
490 		nfp_cpp_area_free(hw->hwqueues_area);
491 dev_err_ctrl_map:
492 		nfp_cpp_area_free(hw->ctrl_area);
493 
494 	return err;
495 }
496 
497 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
498 	{
499 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
500 			       PCI_DEVICE_ID_NFP3800_VF_NIC)
501 	},
502 	{
503 		RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
504 			       PCI_DEVICE_ID_NFP6000_VF_NIC)
505 	},
506 	{
507 		.vendor_id = 0,
508 	},
509 };
510 
511 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
512 {
513 	/* VF cleanup, just free private port data */
514 	return nfp_netvf_close(eth_dev);
515 }
516 
517 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
518 	struct rte_pci_device *pci_dev)
519 {
520 	return rte_eth_dev_pci_generic_probe(pci_dev,
521 		sizeof(struct nfp_net_adapter), nfp_netvf_init);
522 }
523 
524 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
525 {
526 	return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
527 }
528 
529 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
530 	.id_table = pci_id_nfp_vf_net_map,
531 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
532 	.probe = eth_nfp_vf_pci_probe,
533 	.remove = eth_nfp_vf_pci_remove,
534 };
535 
536 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
537 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
538 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
539 /*
540  * Local variables:
541  * c-file-style: "Linux"
542  * indent-tabs-mode: t
543  * End:
544  */
545