xref: /dpdk/drivers/net/virtio/virtio_user_ethdev.c (revision a5d7a3f77ddc3c3ae18bce04d7555b458360cc65)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdint.h>
35 #include <sys/types.h>
36 #include <unistd.h>
37 
38 #include <rte_malloc.h>
39 #include <rte_kvargs.h>
40 
41 #include "virtio_ethdev.h"
42 #include "virtio_logs.h"
43 #include "virtio_pci.h"
44 #include "virtqueue.h"
45 #include "virtio_rxtx.h"
46 #include "virtio_user/virtio_user_dev.h"
47 
48 #define virtio_user_get_dev(hw) \
49 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
50 
51 static void
52 virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
53 		     void *dst, int length)
54 {
55 	int i;
56 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
57 
58 	if (offset == offsetof(struct virtio_net_config, mac) &&
59 	    length == ETHER_ADDR_LEN) {
60 		for (i = 0; i < ETHER_ADDR_LEN; ++i)
61 			((uint8_t *)dst)[i] = dev->mac_addr[i];
62 		return;
63 	}
64 
65 	if (offset == offsetof(struct virtio_net_config, status))
66 		*(uint16_t *)dst = dev->status;
67 
68 	if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
69 		*(uint16_t *)dst = dev->max_queue_pairs;
70 }
71 
72 static void
73 virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
74 		      const void *src, int length)
75 {
76 	int i;
77 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
78 
79 	if ((offset == offsetof(struct virtio_net_config, mac)) &&
80 	    (length == ETHER_ADDR_LEN))
81 		for (i = 0; i < ETHER_ADDR_LEN; ++i)
82 			dev->mac_addr[i] = ((const uint8_t *)src)[i];
83 	else
84 		PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d\n",
85 			    offset, length);
86 }
87 
88 static void
89 virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
90 {
91 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
92 
93 	if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
94 		virtio_user_start_device(dev);
95 	dev->status = status;
96 }
97 
98 static void
99 virtio_user_reset(struct virtio_hw *hw)
100 {
101 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
102 
103 	virtio_user_stop_device(dev);
104 }
105 
106 static uint8_t
107 virtio_user_get_status(struct virtio_hw *hw)
108 {
109 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
110 
111 	return dev->status;
112 }
113 
114 static uint64_t
115 virtio_user_get_features(struct virtio_hw *hw)
116 {
117 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
118 
119 	return dev->features;
120 }
121 
122 static void
123 virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
124 {
125 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
126 
127 	dev->features = features;
128 }
129 
130 static uint8_t
131 virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
132 {
133 	/* When config interrupt happens, driver calls this function to query
134 	 * what kinds of change happen. Interrupt mode not supported for now.
135 	 */
136 	return 0;
137 }
138 
139 static uint16_t
140 virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
141 		    uint16_t vec __rte_unused)
142 {
143 	return VIRTIO_MSI_NO_VECTOR;
144 }
145 
146 /* This function is to get the queue size, aka, number of descs, of a specified
147  * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the
148  * max supported queues.
149  */
150 static uint16_t
151 virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
152 {
153 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
154 
155 	/* Currently, each queue has same queue size */
156 	return dev->queue_size;
157 }
158 
159 static int
160 virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
161 {
162 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
163 	uint16_t queue_idx = vq->vq_queue_index;
164 	uint64_t desc_addr, avail_addr, used_addr;
165 
166 	desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
167 	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
168 	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
169 							 ring[vq->vq_nentries]),
170 				   VIRTIO_PCI_VRING_ALIGN);
171 
172 	dev->vrings[queue_idx].num = vq->vq_nentries;
173 	dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
174 	dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
175 	dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
176 
177 	return 0;
178 }
179 
180 static void
181 virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
182 {
183 	/* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU
184 	 * correspondingly stops the ioeventfds, and reset the status of
185 	 * the device.
186 	 * For modern devices, set queue desc, avail, used in PCI bar to 0,
187 	 * not see any more behavior in QEMU.
188 	 *
189 	 * Here we just care about what information to deliver to vhost-user
190 	 * or vhost-kernel. So we just close ioeventfd for now.
191 	 */
192 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
193 
194 	close(dev->callfds[vq->vq_queue_index]);
195 	close(dev->kickfds[vq->vq_queue_index]);
196 }
197 
198 static void
199 virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
200 {
201 	uint64_t buf = 1;
202 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
203 
204 	if (hw->cvq && (hw->cvq->vq == vq)) {
205 		virtio_user_handle_cq(dev, vq->vq_queue_index);
206 		return;
207 	}
208 
209 	if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
210 		PMD_DRV_LOG(ERR, "failed to kick backend: %s\n",
211 			    strerror(errno));
212 }
213 
214 static const struct virtio_pci_ops virtio_user_ops = {
215 	.read_dev_cfg	= virtio_user_read_dev_config,
216 	.write_dev_cfg	= virtio_user_write_dev_config,
217 	.reset		= virtio_user_reset,
218 	.get_status	= virtio_user_get_status,
219 	.set_status	= virtio_user_set_status,
220 	.get_features	= virtio_user_get_features,
221 	.set_features	= virtio_user_set_features,
222 	.get_isr	= virtio_user_get_isr,
223 	.set_config_irq	= virtio_user_set_config_irq,
224 	.get_queue_num	= virtio_user_get_queue_num,
225 	.setup_queue	= virtio_user_setup_queue,
226 	.del_queue	= virtio_user_del_queue,
227 	.notify_queue	= virtio_user_notify_queue,
228 };
229 
230 static const char *valid_args[] = {
231 #define VIRTIO_USER_ARG_QUEUES_NUM     "queues"
232 	VIRTIO_USER_ARG_QUEUES_NUM,
233 #define VIRTIO_USER_ARG_CQ_NUM         "cq"
234 	VIRTIO_USER_ARG_CQ_NUM,
235 #define VIRTIO_USER_ARG_MAC            "mac"
236 	VIRTIO_USER_ARG_MAC,
237 #define VIRTIO_USER_ARG_PATH           "path"
238 	VIRTIO_USER_ARG_PATH,
239 #define VIRTIO_USER_ARG_QUEUE_SIZE     "queue_size"
240 	VIRTIO_USER_ARG_QUEUE_SIZE,
241 	NULL
242 };
243 
244 #define VIRTIO_USER_DEF_CQ_EN	0
245 #define VIRTIO_USER_DEF_Q_NUM	1
246 #define VIRTIO_USER_DEF_Q_SZ	256
247 
248 static int
249 get_string_arg(const char *key __rte_unused,
250 	       const char *value, void *extra_args)
251 {
252 	if (!value || !extra_args)
253 		return -EINVAL;
254 
255 	*(char **)extra_args = strdup(value);
256 
257 	return 0;
258 }
259 
260 static int
261 get_integer_arg(const char *key __rte_unused,
262 		const char *value, void *extra_args)
263 {
264 	if (!value || !extra_args)
265 		return -EINVAL;
266 
267 	*(uint64_t *)extra_args = strtoull(value, NULL, 0);
268 
269 	return 0;
270 }
271 
272 static struct rte_eth_dev *
273 virtio_user_eth_dev_alloc(const char *name)
274 {
275 	struct rte_eth_dev *eth_dev;
276 	struct rte_eth_dev_data *data;
277 	struct virtio_hw *hw;
278 	struct virtio_user_dev *dev;
279 
280 	eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
281 	if (!eth_dev) {
282 		PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
283 		return NULL;
284 	}
285 
286 	data = eth_dev->data;
287 
288 	hw = rte_zmalloc(NULL, sizeof(*hw), 0);
289 	if (!hw) {
290 		PMD_INIT_LOG(ERR, "malloc virtio_hw failed");
291 		rte_eth_dev_release_port(eth_dev);
292 		return NULL;
293 	}
294 
295 	dev = rte_zmalloc(NULL, sizeof(*dev), 0);
296 	if (!dev) {
297 		PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed");
298 		rte_eth_dev_release_port(eth_dev);
299 		rte_free(hw);
300 		return NULL;
301 	}
302 
303 	hw->vtpci_ops = &virtio_user_ops;
304 	hw->use_msix = 0;
305 	hw->modern   = 0;
306 	hw->virtio_user_dev = dev;
307 	data->dev_private = hw;
308 	data->numa_node = SOCKET_ID_ANY;
309 	data->kdrv = RTE_KDRV_NONE;
310 	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
311 	eth_dev->pci_dev = NULL;
312 	eth_dev->driver = NULL;
313 	return eth_dev;
314 }
315 
316 /* Dev initialization routine. Invoked once for each virtio vdev at
317  * EAL init time, see rte_eal_dev_init().
318  * Returns 0 on success.
319  */
320 static int
321 virtio_user_pmd_devinit(const char *name, const char *params)
322 {
323 	struct rte_kvargs *kvlist = NULL;
324 	struct rte_eth_dev *eth_dev;
325 	struct virtio_hw *hw;
326 	uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
327 	uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
328 	uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
329 	char *path = NULL;
330 	char *mac_addr = NULL;
331 	int ret = -1;
332 
333 	if (!params || params[0] == '\0') {
334 		PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
335 			  VIRTIO_USER_ARG_QUEUE_SIZE);
336 		goto end;
337 	}
338 
339 	kvlist = rte_kvargs_parse(params, valid_args);
340 	if (!kvlist) {
341 		PMD_INIT_LOG(ERR, "error when parsing param");
342 		goto end;
343 	}
344 
345 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) {
346 		ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH,
347 					 &get_string_arg, &path);
348 		if (ret < 0) {
349 			PMD_INIT_LOG(ERR, "error to parse %s",
350 				     VIRTIO_USER_ARG_PATH);
351 			goto end;
352 		}
353 	} else {
354 		PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user\n",
355 			  VIRTIO_USER_ARG_QUEUE_SIZE);
356 		goto end;
357 	}
358 
359 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
360 		ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
361 					 &get_string_arg, &mac_addr);
362 		if (ret < 0) {
363 			PMD_INIT_LOG(ERR, "error to parse %s",
364 				     VIRTIO_USER_ARG_MAC);
365 			goto end;
366 		}
367 	}
368 
369 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) {
370 		ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE,
371 					 &get_integer_arg, &queue_size);
372 		if (ret < 0) {
373 			PMD_INIT_LOG(ERR, "error to parse %s",
374 				     VIRTIO_USER_ARG_QUEUE_SIZE);
375 			goto end;
376 		}
377 	}
378 
379 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) {
380 		ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM,
381 					 &get_integer_arg, &queues);
382 		if (ret < 0) {
383 			PMD_INIT_LOG(ERR, "error to parse %s",
384 				     VIRTIO_USER_ARG_QUEUES_NUM);
385 			goto end;
386 		}
387 	}
388 
389 	if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
390 		ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
391 					 &get_integer_arg, &cq);
392 		if (ret < 0) {
393 			PMD_INIT_LOG(ERR, "error to parse %s",
394 				     VIRTIO_USER_ARG_CQ_NUM);
395 			goto end;
396 		}
397 	} else if (queues > 1) {
398 		cq = 1;
399 	}
400 
401 	if (queues > 1 && cq == 0) {
402 		PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
403 		goto end;
404 	}
405 
406 	eth_dev = virtio_user_eth_dev_alloc(name);
407 	if (!eth_dev) {
408 		PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
409 		goto end;
410 	}
411 
412 	hw = eth_dev->data->dev_private;
413 	if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
414 				 queue_size, mac_addr) < 0)
415 		goto end;
416 
417 	/* previously called by rte_eal_pci_probe() for physical dev */
418 	if (eth_virtio_dev_init(eth_dev) < 0) {
419 		PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
420 		goto end;
421 	}
422 	ret = 0;
423 
424 end:
425 	if (kvlist)
426 		rte_kvargs_free(kvlist);
427 	if (path)
428 		free(path);
429 	if (mac_addr)
430 		free(mac_addr);
431 	return ret;
432 }
433 
434 /** Called by rte_eth_dev_detach() */
435 static int
436 virtio_user_pmd_devuninit(const char *name)
437 {
438 	struct rte_eth_dev *eth_dev;
439 	struct virtio_hw *hw;
440 	struct virtio_user_dev *dev;
441 
442 	if (!name)
443 		return -EINVAL;
444 
445 	PMD_DRV_LOG(INFO, "Un-Initializing %s\n", name);
446 	eth_dev = rte_eth_dev_allocated(name);
447 	if (!eth_dev)
448 		return -ENODEV;
449 
450 	/* make sure the device is stopped, queues freed */
451 	rte_eth_dev_close(eth_dev->data->port_id);
452 
453 	hw = eth_dev->data->dev_private;
454 	dev = hw->virtio_user_dev;
455 	virtio_user_dev_uninit(dev);
456 
457 	rte_free(eth_dev->data->dev_private);
458 	rte_free(eth_dev->data);
459 	rte_eth_dev_release_port(eth_dev);
460 
461 	return 0;
462 }
463 
464 static struct rte_driver virtio_user_driver = {
465 	.type   = PMD_VDEV,
466 	.init   = virtio_user_pmd_devinit,
467 	.uninit = virtio_user_pmd_devuninit,
468 };
469 
470 PMD_REGISTER_DRIVER(virtio_user_driver, virtio_user);
471 DRIVER_REGISTER_PARAM_STRING(virtio_user,
472 	"path=<path> "
473 	"mac=<mac addr> "
474 	"cq=<int> "
475 	"queue_size=<int> "
476 	"queues=<int>");
477