xref: /dpdk/drivers/net/virtio/virtio_pci_ethdev.c (revision 7793d29359e8996c7028d976eeeaa21024139fef)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <unistd.h>
10 
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
16 
17 #include <rte_memory.h>
18 #include <rte_eal.h>
19 #include <rte_dev.h>
20 #include <rte_kvargs.h>
21 
22 #include "virtio_ethdev.h"
23 #include "virtio_pci.h"
24 #include "virtio_logs.h"
25 
26 /*
27  * The set of PCI devices this driver supports
28  */
29 static const struct rte_pci_id pci_id_virtio_map[] = {
30 	{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
31 	{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
32 	{ .vendor_id = 0, /* sentinel */ },
33 };
34 
35 
36 /*
37  * Remap the PCI device again (IO port map for legacy device and
38  * memory map for modern device), so that the secondary process
39  * could have the PCI initiated correctly.
40  */
41 static int
42 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
43 {
44 	if (hw->bus_type == VIRTIO_BUS_PCI_MODERN) {
45 		/*
46 		 * We don't have to re-parse the PCI config space, since
47 		 * rte_pci_map_device() makes sure the mapped address
48 		 * in secondary process would equal to the one mapped in
49 		 * the primary process: error will be returned if that
50 		 * requirement is not met.
51 		 *
52 		 * That said, we could simply reuse all cap pointers
53 		 * (such as dev_cfg, common_cfg, etc.) parsed from the
54 		 * primary process, which is stored in shared memory.
55 		 */
56 		if (rte_pci_map_device(pci_dev)) {
57 			PMD_INIT_LOG(DEBUG, "failed to map pci device!");
58 			return -1;
59 		}
60 	} else if (hw->bus_type == VIRTIO_BUS_PCI_LEGACY) {
61 		if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
62 			return -1;
63 	}
64 
65 	return 0;
66 }
67 
68 static int
69 eth_virtio_pci_init(struct rte_eth_dev *eth_dev)
70 {
71 	struct virtio_pci_dev *dev = eth_dev->data->dev_private;
72 	struct virtio_hw *hw = &dev->hw;
73 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
74 	int ret;
75 
76 	VTPCI_DEV(hw) = pci_dev;
77 
78 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
79 		ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
80 		if (ret) {
81 			PMD_INIT_LOG(ERR, "Failed to init PCI device\n");
82 			return -1;
83 		}
84 	} else {
85 		ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
86 		if (ret < 0) {
87 			PMD_INIT_LOG(ERR, "Failed to remap PCI device\n");
88 			return -1;
89 		}
90 	}
91 
92 	ret = eth_virtio_dev_init(eth_dev);
93 	if (ret < 0) {
94 		PMD_INIT_LOG(ERR, "Failed to init virtio device\n");
95 		goto err_unmap;
96 	}
97 
98 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
99 		eth_dev->data->port_id, pci_dev->id.vendor_id,
100 		pci_dev->id.device_id);
101 
102 	return 0;
103 
104 err_unmap:
105 	rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
106 	if (hw->bus_type == VIRTIO_BUS_PCI_LEGACY)
107 		rte_pci_ioport_unmap(VTPCI_IO(hw));
108 
109 	return ret;
110 }
111 
112 static int
113 eth_virtio_pci_uninit(struct rte_eth_dev *eth_dev)
114 {
115 	int ret;
116 	PMD_INIT_FUNC_TRACE();
117 
118 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
119 		return 0;
120 
121 	ret = virtio_dev_stop(eth_dev);
122 	virtio_dev_close(eth_dev);
123 
124 	PMD_INIT_LOG(DEBUG, "dev_uninit completed");
125 
126 	return ret;
127 }
128 
129 static int vdpa_check_handler(__rte_unused const char *key,
130 		const char *value, void *ret_val)
131 {
132 	if (strcmp(value, "1") == 0)
133 		*(int *)ret_val = 1;
134 	else
135 		*(int *)ret_val = 0;
136 
137 	return 0;
138 }
139 
140 #define VIRTIO_ARG_VDPA       "vdpa"
141 
142 static int
143 virtio_pci_devargs_parse(struct rte_devargs *devargs, int *vdpa)
144 {
145 	struct rte_kvargs *kvlist;
146 	int ret = 0;
147 
148 	if (devargs == NULL)
149 		return 0;
150 
151 	kvlist = rte_kvargs_parse(devargs->args, NULL);
152 	if (kvlist == NULL) {
153 		PMD_INIT_LOG(ERR, "error when parsing param");
154 		return 0;
155 	}
156 
157 	if (rte_kvargs_count(kvlist, VIRTIO_ARG_VDPA) == 1) {
158 		/* vdpa mode selected when there's a key-value pair:
159 		 * vdpa=1
160 		 */
161 		ret = rte_kvargs_process(kvlist, VIRTIO_ARG_VDPA,
162 				vdpa_check_handler, vdpa);
163 		if (ret < 0)
164 			PMD_INIT_LOG(ERR, "Failed to parse %s", VIRTIO_ARG_VDPA);
165 	}
166 
167 	rte_kvargs_free(kvlist);
168 
169 	return ret;
170 }
171 
172 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
173 	struct rte_pci_device *pci_dev)
174 {
175 	int vdpa = 0;
176 	int ret = 0;
177 
178 	ret = virtio_pci_devargs_parse(pci_dev->device.devargs, &vdpa);
179 	if (ret < 0) {
180 		PMD_INIT_LOG(ERR, "devargs parsing is failed");
181 		return ret;
182 	}
183 	/* virtio pmd skips probe if device needs to work in vdpa mode */
184 	if (vdpa == 1)
185 		return 1;
186 
187 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_pci_dev),
188 		eth_virtio_pci_init);
189 }
190 
191 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
192 {
193 	int ret;
194 
195 	ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_pci_uninit);
196 	/* Port has already been released by close. */
197 	if (ret == -ENODEV)
198 		ret = 0;
199 	return ret;
200 }
201 
202 static struct rte_pci_driver rte_virtio_net_pci_pmd = {
203 	.driver = {
204 		.name = "net_virtio",
205 	},
206 	.id_table = pci_id_virtio_map,
207 	.drv_flags = 0,
208 	.probe = eth_virtio_pci_probe,
209 	.remove = eth_virtio_pci_remove,
210 };
211 
212 RTE_INIT(rte_virtio_net_pci_pmd_init)
213 {
214 	rte_eal_iopl_init();
215 	rte_pci_register(&rte_virtio_net_pci_pmd);
216 }
217 
218 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
219 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
220 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
221