xref: /dpdk/drivers/net/virtio/virtio_pci_ethdev.c (revision 6c02043e9967a9d8f6e8c058256e257efe1d6d1a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <unistd.h>
10 
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
16 
17 #include <rte_memory.h>
18 #include <rte_eal.h>
19 #include <rte_dev.h>
20 #include <rte_kvargs.h>
21 
22 #include "virtio.h"
23 #include "virtio_ethdev.h"
24 #include "virtio_pci.h"
25 #include "virtio_logs.h"
26 
27 /*
28  * The set of PCI devices this driver supports
29  */
30 static const struct rte_pci_id pci_id_virtio_map[] = {
31 	{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
32 	{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
33 	{ .vendor_id = 0, /* sentinel */ },
34 };
35 
36 
37 /*
38  * Remap the PCI device again (IO port map for legacy device and
39  * memory map for modern device), so that the secondary process
40  * could have the PCI initiated correctly.
41  */
42 static int
43 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_pci_dev *dev)
44 {
45 	struct virtio_hw *hw = &dev->hw;
46 
47 	if (dev->modern) {
48 		/*
49 		 * We don't have to re-parse the PCI config space, since
50 		 * rte_pci_map_device() makes sure the mapped address
51 		 * in secondary process would equal to the one mapped in
52 		 * the primary process: error will be returned if that
53 		 * requirement is not met.
54 		 *
55 		 * That said, we could simply reuse all cap pointers
56 		 * (such as dev_cfg, common_cfg, etc.) parsed from the
57 		 * primary process, which is stored in shared memory.
58 		 */
59 		if (rte_pci_map_device(pci_dev)) {
60 			PMD_INIT_LOG(DEBUG, "failed to map pci device!");
61 			return -1;
62 		}
63 	} else {
64 		if (vtpci_legacy_ioport_map(hw) < 0)
65 			return -1;
66 	}
67 
68 	return 0;
69 }
70 
71 static int
72 eth_virtio_pci_init(struct rte_eth_dev *eth_dev)
73 {
74 	struct virtio_pci_dev *dev = eth_dev->data->dev_private;
75 	struct virtio_hw *hw = &dev->hw;
76 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
77 	int ret;
78 
79 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
80 		ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), dev);
81 		if (ret) {
82 			PMD_INIT_LOG(ERR, "Failed to init PCI device\n");
83 			return -1;
84 		}
85 	} else {
86 		if (dev->modern)
87 			VIRTIO_OPS(hw) = &modern_ops;
88 		else
89 			VIRTIO_OPS(hw) = &legacy_ops;
90 
91 		ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), dev);
92 		if (ret < 0) {
93 			PMD_INIT_LOG(ERR, "Failed to remap PCI device\n");
94 			return -1;
95 		}
96 	}
97 
98 	ret = eth_virtio_dev_init(eth_dev);
99 	if (ret < 0) {
100 		PMD_INIT_LOG(ERR, "Failed to init virtio device\n");
101 		goto err_unmap;
102 	}
103 
104 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
105 		eth_dev->data->port_id, pci_dev->id.vendor_id,
106 		pci_dev->id.device_id);
107 
108 	return 0;
109 
110 err_unmap:
111 	rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
112 	if (!dev->modern)
113 		vtpci_legacy_ioport_unmap(hw);
114 
115 	return ret;
116 }
117 
118 static int
119 eth_virtio_pci_uninit(struct rte_eth_dev *eth_dev)
120 {
121 	int ret;
122 	PMD_INIT_FUNC_TRACE();
123 
124 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
125 		return 0;
126 
127 	ret = virtio_dev_stop(eth_dev);
128 	virtio_dev_close(eth_dev);
129 
130 	PMD_INIT_LOG(DEBUG, "dev_uninit completed");
131 
132 	return ret;
133 }
134 
135 static int vdpa_check_handler(__rte_unused const char *key,
136 		const char *value, void *ret_val)
137 {
138 	if (strcmp(value, "1") == 0)
139 		*(int *)ret_val = 1;
140 	else
141 		*(int *)ret_val = 0;
142 
143 	return 0;
144 }
145 
146 #define VIRTIO_ARG_VDPA       "vdpa"
147 
148 static int
149 virtio_pci_devargs_parse(struct rte_devargs *devargs, int *vdpa)
150 {
151 	struct rte_kvargs *kvlist;
152 	int ret = 0;
153 
154 	if (devargs == NULL)
155 		return 0;
156 
157 	kvlist = rte_kvargs_parse(devargs->args, NULL);
158 	if (kvlist == NULL) {
159 		PMD_INIT_LOG(ERR, "error when parsing param");
160 		return 0;
161 	}
162 
163 	if (rte_kvargs_count(kvlist, VIRTIO_ARG_VDPA) == 1) {
164 		/* vdpa mode selected when there's a key-value pair:
165 		 * vdpa=1
166 		 */
167 		ret = rte_kvargs_process(kvlist, VIRTIO_ARG_VDPA,
168 				vdpa_check_handler, vdpa);
169 		if (ret < 0)
170 			PMD_INIT_LOG(ERR, "Failed to parse %s", VIRTIO_ARG_VDPA);
171 	}
172 
173 	rte_kvargs_free(kvlist);
174 
175 	return ret;
176 }
177 
178 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
179 	struct rte_pci_device *pci_dev)
180 {
181 	int vdpa = 0;
182 	int ret = 0;
183 
184 	ret = virtio_pci_devargs_parse(pci_dev->device.devargs, &vdpa);
185 	if (ret < 0) {
186 		PMD_INIT_LOG(ERR, "devargs parsing is failed");
187 		return ret;
188 	}
189 	/* virtio pmd skips probe if device needs to work in vdpa mode */
190 	if (vdpa == 1)
191 		return 1;
192 
193 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_pci_dev),
194 		eth_virtio_pci_init);
195 }
196 
197 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
198 {
199 	int ret;
200 
201 	ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_pci_uninit);
202 	/* Port has already been released by close. */
203 	if (ret == -ENODEV)
204 		ret = 0;
205 	return ret;
206 }
207 
208 static struct rte_pci_driver rte_virtio_net_pci_pmd = {
209 	.driver = {
210 		.name = "net_virtio",
211 	},
212 	.id_table = pci_id_virtio_map,
213 	.drv_flags = 0,
214 	.probe = eth_virtio_pci_probe,
215 	.remove = eth_virtio_pci_remove,
216 };
217 
218 RTE_INIT(rte_virtio_net_pci_pmd_init)
219 {
220 	rte_eal_iopl_init();
221 	rte_pci_register(&rte_virtio_net_pci_pmd);
222 }
223 
224 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
225 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
226 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
227