xref: /dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c (revision 3e0ceb9f17fff027fc6c8f18de35e11719ffa61e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <fcntl.h>
37 #include <string.h>
38 #include <errno.h>
39 #include <sys/mman.h>
40 #include <unistd.h>
41 #include <sys/eventfd.h>
42 #include <sys/types.h>
43 #include <sys/stat.h>
44 
45 #include "vhost.h"
46 #include "virtio_user_dev.h"
47 #include "../virtio_ethdev.h"
48 
49 static int
50 virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
51 {
52 	/* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come
53 	 * firstly because vhost depends on this msg to allocate virtqueue
54 	 * pair.
55 	 */
56 	struct vhost_vring_file file;
57 
58 	file.index = queue_sel;
59 	file.fd = dev->callfds[queue_sel];
60 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
61 
62 	return 0;
63 }
64 
65 static int
66 virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
67 {
68 	struct vhost_vring_file file;
69 	struct vhost_vring_state state;
70 	struct vring *vring = &dev->vrings[queue_sel];
71 	struct vhost_vring_addr addr = {
72 		.index = queue_sel,
73 		.desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
74 		.avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
75 		.used_user_addr = (uint64_t)(uintptr_t)vring->used,
76 		.log_guest_addr = 0,
77 		.flags = 0, /* disable log */
78 	};
79 
80 	state.index = queue_sel;
81 	state.num = vring->num;
82 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
83 
84 	state.index = queue_sel;
85 	state.num = 0; /* no reservation */
86 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
87 
88 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
89 
90 	/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
91 	 * lastly because vhost depends on this msg to judge if
92 	 * virtio is ready.
93 	 */
94 	file.index = queue_sel;
95 	file.fd = dev->kickfds[queue_sel];
96 	dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
97 
98 	return 0;
99 }
100 
101 static int
102 virtio_user_queue_setup(struct virtio_user_dev *dev,
103 			int (*fn)(struct virtio_user_dev *, uint32_t))
104 {
105 	uint32_t i, queue_sel;
106 
107 	for (i = 0; i < dev->max_queue_pairs; ++i) {
108 		queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX;
109 		if (fn(dev, queue_sel) < 0) {
110 			PMD_DRV_LOG(INFO, "setup rx vq fails: %u", i);
111 			return -1;
112 		}
113 	}
114 	for (i = 0; i < dev->max_queue_pairs; ++i) {
115 		queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX;
116 		if (fn(dev, queue_sel) < 0) {
117 			PMD_DRV_LOG(INFO, "setup tx vq fails: %u", i);
118 			return -1;
119 		}
120 	}
121 
122 	return 0;
123 }
124 
125 int
126 virtio_user_start_device(struct virtio_user_dev *dev)
127 {
128 	uint64_t features;
129 	int ret;
130 
131 	/* Step 0: tell vhost to create queues */
132 	if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
133 		goto error;
134 
135 	/* Step 1: set features */
136 	features = dev->features;
137 	/* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
138 	features &= ~(1ull << VIRTIO_NET_F_MAC);
139 	/* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
140 	features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
141 	features &= ~(1ull << VIRTIO_NET_F_STATUS);
142 	ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
143 	if (ret < 0)
144 		goto error;
145 	PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
146 
147 	/* Step 2: share memory regions */
148 	ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
149 	if (ret < 0)
150 		goto error;
151 
152 	/* Step 3: kick queues */
153 	if (virtio_user_queue_setup(dev, virtio_user_kick_queue) < 0)
154 		goto error;
155 
156 	/* Step 4: enable queues
157 	 * we enable the 1st queue pair by default.
158 	 */
159 	dev->ops->enable_qp(dev, 0, 1);
160 
161 	return 0;
162 error:
163 	/* TODO: free resource here or caller to check */
164 	return -1;
165 }
166 
167 int virtio_user_stop_device(struct virtio_user_dev *dev)
168 {
169 	uint32_t i;
170 
171 	for (i = 0; i < dev->max_queue_pairs; ++i)
172 		dev->ops->enable_qp(dev, i, 0);
173 
174 	return 0;
175 }
176 
177 static inline void
178 parse_mac(struct virtio_user_dev *dev, const char *mac)
179 {
180 	int i, r;
181 	uint32_t tmp[ETHER_ADDR_LEN];
182 
183 	if (!mac)
184 		return;
185 
186 	r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0],
187 			&tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]);
188 	if (r == ETHER_ADDR_LEN) {
189 		for (i = 0; i < ETHER_ADDR_LEN; ++i)
190 			dev->mac_addr[i] = (uint8_t)tmp[i];
191 		dev->mac_specified = 1;
192 	} else {
193 		/* ignore the wrong mac, use random mac */
194 		PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac);
195 	}
196 }
197 
198 int
199 is_vhost_user_by_type(const char *path)
200 {
201 	struct stat sb;
202 
203 	if (stat(path, &sb) == -1)
204 		return 0;
205 
206 	return S_ISSOCK(sb.st_mode);
207 }
208 
209 static int
210 virtio_user_dev_init_notify(struct virtio_user_dev *dev)
211 {
212 	uint32_t i, j;
213 	int callfd;
214 	int kickfd;
215 
216 	for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
217 		if (i >= dev->max_queue_pairs * 2) {
218 			dev->kickfds[i] = -1;
219 			dev->callfds[i] = -1;
220 			continue;
221 		}
222 
223 		/* May use invalid flag, but some backend uses kickfd and
224 		 * callfd as criteria to judge if dev is alive. so finally we
225 		 * use real event_fd.
226 		 */
227 		callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
228 		if (callfd < 0) {
229 			PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
230 			break;
231 		}
232 		kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
233 		if (kickfd < 0) {
234 			PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
235 			break;
236 		}
237 		dev->callfds[i] = callfd;
238 		dev->kickfds[i] = kickfd;
239 	}
240 
241 	if (i < VIRTIO_MAX_VIRTQUEUES) {
242 		for (j = 0; j <= i; ++j) {
243 			close(dev->callfds[j]);
244 			close(dev->kickfds[j]);
245 		}
246 
247 		return -1;
248 	}
249 
250 	return 0;
251 }
252 
253 static int
254 virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
255 {
256 	uint32_t i;
257 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
258 
259 	if (!eth_dev->intr_handle) {
260 		eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
261 		if (!eth_dev->intr_handle) {
262 			PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
263 			return -1;
264 		}
265 		memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
266 	}
267 
268 	for (i = 0; i < dev->max_queue_pairs; ++i)
269 		eth_dev->intr_handle->efds[i] = dev->callfds[i];
270 	eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
271 	eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
272 	eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
273 	/* For virtio vdev, no need to read counter for clean */
274 	eth_dev->intr_handle->efd_counter_size = 0;
275 	if (dev->vhostfd >= 0)
276 		eth_dev->intr_handle->fd = dev->vhostfd;
277 
278 	return 0;
279 }
280 
281 static int
282 virtio_user_dev_setup(struct virtio_user_dev *dev)
283 {
284 	uint32_t q;
285 
286 	dev->vhostfd = -1;
287 	dev->vhostfds = NULL;
288 	dev->tapfds = NULL;
289 
290 	if (is_vhost_user_by_type(dev->path)) {
291 		dev->ops = &ops_user;
292 	} else {
293 		dev->ops = &ops_kernel;
294 
295 		dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
296 		dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
297 		if (!dev->vhostfds || !dev->tapfds) {
298 			PMD_INIT_LOG(ERR, "Failed to malloc");
299 			return -1;
300 		}
301 
302 		for (q = 0; q < dev->max_queue_pairs; ++q) {
303 			dev->vhostfds[q] = -1;
304 			dev->tapfds[q] = -1;
305 		}
306 	}
307 
308 	if (dev->ops->setup(dev) < 0)
309 		return -1;
310 
311 	if (virtio_user_dev_init_notify(dev) < 0)
312 		return -1;
313 
314 	if (virtio_user_fill_intr_handle(dev) < 0)
315 		return -1;
316 
317 	return 0;
318 }
319 
320 /* Use below macro to filter features from vhost backend */
321 #define VIRTIO_USER_SUPPORTED_FEATURES			\
322 	(1ULL << VIRTIO_NET_F_MAC		|	\
323 	 1ULL << VIRTIO_NET_F_STATUS		|	\
324 	 1ULL << VIRTIO_NET_F_MQ		|	\
325 	 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR	|	\
326 	 1ULL << VIRTIO_NET_F_CTRL_VQ		|	\
327 	 1ULL << VIRTIO_NET_F_CTRL_RX		|	\
328 	 1ULL << VIRTIO_NET_F_CTRL_VLAN		|	\
329 	 1ULL << VIRTIO_NET_F_CSUM		|	\
330 	 1ULL << VIRTIO_NET_F_HOST_TSO4		|	\
331 	 1ULL << VIRTIO_NET_F_HOST_TSO6		|	\
332 	 1ULL << VIRTIO_NET_F_MRG_RXBUF		|	\
333 	 1ULL << VIRTIO_RING_F_INDIRECT_DESC	|	\
334 	 1ULL << VIRTIO_NET_F_GUEST_CSUM	|	\
335 	 1ULL << VIRTIO_NET_F_GUEST_TSO4	|	\
336 	 1ULL << VIRTIO_NET_F_GUEST_TSO6	|	\
337 	 1ULL << VIRTIO_F_VERSION_1)
338 
339 int
340 virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
341 		     int cq, int queue_size, const char *mac, char **ifname)
342 {
343 	snprintf(dev->path, PATH_MAX, "%s", path);
344 	dev->max_queue_pairs = queues;
345 	dev->queue_pairs = 1; /* mq disabled by default */
346 	dev->queue_size = queue_size;
347 	dev->mac_specified = 0;
348 	parse_mac(dev, mac);
349 
350 	if (*ifname) {
351 		dev->ifname = *ifname;
352 		*ifname = NULL;
353 	}
354 
355 	if (virtio_user_dev_setup(dev) < 0) {
356 		PMD_INIT_LOG(ERR, "backend set up fails");
357 		return -1;
358 	}
359 	if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
360 		PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
361 		return -1;
362 	}
363 
364 	if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
365 			    &dev->device_features) < 0) {
366 		PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
367 		return -1;
368 	}
369 	if (dev->mac_specified)
370 		dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
371 
372 	if (cq) {
373 		/* device does not really need to know anything about CQ,
374 		 * so if necessary, we just claim to support CQ
375 		 */
376 		dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
377 	} else {
378 		dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
379 		/* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
380 		dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
381 		dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
382 		dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
383 		dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
384 		dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
385 	}
386 
387 	/* The backend will not report this feature, we add it explicitly */
388 	if (is_vhost_user_by_type(dev->path))
389 		dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
390 
391 	dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
392 
393 	return 0;
394 }
395 
396 void
397 virtio_user_dev_uninit(struct virtio_user_dev *dev)
398 {
399 	uint32_t i;
400 
401 	virtio_user_stop_device(dev);
402 
403 	for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
404 		close(dev->callfds[i]);
405 		close(dev->kickfds[i]);
406 	}
407 
408 	close(dev->vhostfd);
409 
410 	if (dev->vhostfds) {
411 		for (i = 0; i < dev->max_queue_pairs; ++i)
412 			close(dev->vhostfds[i]);
413 		free(dev->vhostfds);
414 		free(dev->tapfds);
415 	}
416 
417 	free(dev->ifname);
418 }
419 
420 static uint8_t
421 virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
422 {
423 	uint16_t i;
424 	uint8_t ret = 0;
425 
426 	if (q_pairs > dev->max_queue_pairs) {
427 		PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported",
428 			     q_pairs, dev->max_queue_pairs);
429 		return -1;
430 	}
431 
432 	for (i = 0; i < q_pairs; ++i)
433 		ret |= dev->ops->enable_qp(dev, i, 1);
434 	for (i = q_pairs; i < dev->max_queue_pairs; ++i)
435 		ret |= dev->ops->enable_qp(dev, i, 0);
436 
437 	dev->queue_pairs = q_pairs;
438 
439 	return ret;
440 }
441 
442 static uint32_t
443 virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
444 			    uint16_t idx_hdr)
445 {
446 	struct virtio_net_ctrl_hdr *hdr;
447 	virtio_net_ctrl_ack status = ~0;
448 	uint16_t i, idx_data, idx_status;
449 	uint32_t n_descs = 0;
450 
451 	/* locate desc for header, data, and status */
452 	idx_data = vring->desc[idx_hdr].next;
453 	n_descs++;
454 
455 	i = idx_data;
456 	while (vring->desc[i].flags == VRING_DESC_F_NEXT) {
457 		i = vring->desc[i].next;
458 		n_descs++;
459 	}
460 
461 	/* locate desc for status */
462 	idx_status = i;
463 	n_descs++;
464 
465 	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
466 	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
467 	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
468 		uint16_t queues;
469 
470 		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
471 		status = virtio_user_handle_mq(dev, queues);
472 	}
473 
474 	/* Update status */
475 	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status;
476 
477 	return n_descs;
478 }
479 
480 void
481 virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
482 {
483 	uint16_t avail_idx, desc_idx;
484 	struct vring_used_elem *uep;
485 	uint32_t n_descs;
486 	struct vring *vring = &dev->vrings[queue_idx];
487 
488 	/* Consume avail ring, using used ring idx as first one */
489 	while (vring->used->idx != vring->avail->idx) {
490 		avail_idx = (vring->used->idx) & (vring->num - 1);
491 		desc_idx = vring->avail->ring[avail_idx];
492 
493 		n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx);
494 
495 		/* Update used ring */
496 		uep = &vring->used->ring[avail_idx];
497 		uep->id = avail_idx;
498 		uep->len = n_descs;
499 
500 		vring->used->idx++;
501 	}
502 }
503