xref: /dpdk/examples/vhost/virtio_net.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8 
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_vhost.h>
12 
13 #include "main.h"
14 
15 /*
16  * A very simple vhost-user net driver implementation, without
17  * any extra features being enabled, such as TSO and mrg-Rx.
18  */
19 
20 void
21 vs_vhost_net_setup(struct vhost_dev *dev)
22 {
23 	uint16_t i;
24 	int vid = dev->vid;
25 	struct vhost_queue *queue;
26 
27 	RTE_LOG(INFO, VHOST_CONFIG,
28 		"setting builtin vhost-user net driver\n");
29 
30 	rte_vhost_get_negotiated_features(vid, &dev->features);
31 	if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
32 		dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
33 	else
34 		dev->hdr_len = sizeof(struct virtio_net_hdr);
35 
36 	rte_vhost_get_mem_table(vid, &dev->mem);
37 
38 	dev->nr_vrings = rte_vhost_get_vring_num(vid);
39 	for (i = 0; i < dev->nr_vrings; i++) {
40 		queue = &dev->queues[i];
41 
42 		queue->last_used_idx  = 0;
43 		queue->last_avail_idx = 0;
44 		rte_vhost_get_vhost_vring(vid, i, &queue->vr);
45 	}
46 }
47 
48 void
49 vs_vhost_net_remove(struct vhost_dev *dev)
50 {
51 	free(dev->mem);
52 }
53 
54 static __rte_always_inline int
55 enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
56 	    struct rte_mbuf *m, uint16_t desc_idx)
57 {
58 	uint32_t desc_avail, desc_offset;
59 	uint64_t desc_chunck_len;
60 	uint32_t mbuf_avail, mbuf_offset;
61 	uint32_t cpy_len;
62 	struct vring_desc *desc;
63 	uint64_t desc_addr, desc_gaddr;
64 	struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
65 	/* A counter to avoid desc dead loop chain */
66 	uint16_t nr_desc = 1;
67 
68 	desc = &vr->desc[desc_idx];
69 	desc_chunck_len = desc->len;
70 	desc_gaddr = desc->addr;
71 	desc_addr = rte_vhost_va_from_guest_pa(
72 			dev->mem, desc_gaddr, &desc_chunck_len);
73 	/*
74 	 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
75 	 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
76 	 * otherwise stores offset on the stack instead of in a register.
77 	 */
78 	if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
79 		return -1;
80 
81 	rte_prefetch0((void *)(uintptr_t)desc_addr);
82 
83 	/* write virtio-net header */
84 	if (likely(desc_chunck_len >= dev->hdr_len)) {
85 		*(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
86 		desc_offset = dev->hdr_len;
87 	} else {
88 		uint64_t len;
89 		uint64_t remain = dev->hdr_len;
90 		uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
91 		uint64_t guest_addr = desc_gaddr;
92 
93 		while (remain) {
94 			len = remain;
95 			dst = rte_vhost_va_from_guest_pa(dev->mem,
96 					guest_addr, &len);
97 			if (unlikely(!dst || !len))
98 				return -1;
99 
100 			rte_memcpy((void *)(uintptr_t)dst,
101 					(void *)(uintptr_t)src,
102 					len);
103 
104 			remain -= len;
105 			guest_addr += len;
106 			src += len;
107 		}
108 
109 		desc_chunck_len = desc->len - dev->hdr_len;
110 		desc_gaddr += dev->hdr_len;
111 		desc_addr = rte_vhost_va_from_guest_pa(
112 				dev->mem, desc_gaddr,
113 				&desc_chunck_len);
114 		if (unlikely(!desc_addr))
115 			return -1;
116 
117 		desc_offset = 0;
118 	}
119 
120 	desc_avail  = desc->len - dev->hdr_len;
121 
122 	mbuf_avail  = rte_pktmbuf_data_len(m);
123 	mbuf_offset = 0;
124 	while (mbuf_avail != 0 || m->next != NULL) {
125 		/* done with current mbuf, fetch next */
126 		if (mbuf_avail == 0) {
127 			m = m->next;
128 
129 			mbuf_offset = 0;
130 			mbuf_avail  = rte_pktmbuf_data_len(m);
131 		}
132 
133 		/* done with current desc buf, fetch next */
134 		if (desc_avail == 0) {
135 			if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
136 				/* Room in vring buffer is not enough */
137 				return -1;
138 			}
139 			if (unlikely(desc->next >= vr->size ||
140 				     ++nr_desc > vr->size))
141 				return -1;
142 
143 			desc = &vr->desc[desc->next];
144 			desc_chunck_len = desc->len;
145 			desc_gaddr = desc->addr;
146 			desc_addr = rte_vhost_va_from_guest_pa(
147 					dev->mem, desc_gaddr, &desc_chunck_len);
148 			if (unlikely(!desc_addr))
149 				return -1;
150 
151 			desc_offset = 0;
152 			desc_avail  = desc->len;
153 		} else if (unlikely(desc_chunck_len == 0)) {
154 			desc_chunck_len = desc_avail;
155 			desc_gaddr += desc_offset;
156 			desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
157 					desc_gaddr,
158 					&desc_chunck_len);
159 			if (unlikely(!desc_addr))
160 				return -1;
161 
162 			desc_offset = 0;
163 		}
164 
165 		cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
166 		rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
167 			rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
168 			cpy_len);
169 
170 		mbuf_avail  -= cpy_len;
171 		mbuf_offset += cpy_len;
172 		desc_avail  -= cpy_len;
173 		desc_offset += cpy_len;
174 		desc_chunck_len -= cpy_len;
175 	}
176 
177 	return 0;
178 }
179 
180 uint16_t
181 vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
182 		struct rte_mbuf **pkts, uint32_t count)
183 {
184 	struct vhost_queue *queue;
185 	struct rte_vhost_vring *vr;
186 	uint16_t avail_idx, free_entries, start_idx;
187 	uint16_t desc_indexes[MAX_PKT_BURST];
188 	uint16_t used_idx;
189 	uint32_t i;
190 
191 	queue = &dev->queues[queue_id];
192 	vr    = &queue->vr;
193 
194 	avail_idx = *((volatile uint16_t *)&vr->avail->idx);
195 	start_idx = queue->last_used_idx;
196 	free_entries = avail_idx - start_idx;
197 	count = RTE_MIN(count, free_entries);
198 	count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
199 	if (count == 0)
200 		return 0;
201 
202 	/* Retrieve all of the desc indexes first to avoid caching issues. */
203 	rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
204 	for (i = 0; i < count; i++) {
205 		used_idx = (start_idx + i) & (vr->size - 1);
206 		desc_indexes[i] = vr->avail->ring[used_idx];
207 		vr->used->ring[used_idx].id = desc_indexes[i];
208 		vr->used->ring[used_idx].len = pkts[i]->pkt_len +
209 					       dev->hdr_len;
210 	}
211 
212 	rte_prefetch0(&vr->desc[desc_indexes[0]]);
213 	for (i = 0; i < count; i++) {
214 		uint16_t desc_idx = desc_indexes[i];
215 		int err;
216 
217 		err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
218 		if (unlikely(err)) {
219 			used_idx = (start_idx + i) & (vr->size - 1);
220 			vr->used->ring[used_idx].len = dev->hdr_len;
221 		}
222 
223 		if (i + 1 < count)
224 			rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
225 	}
226 
227 	rte_smp_wmb();
228 
229 	*(volatile uint16_t *)&vr->used->idx += count;
230 	queue->last_used_idx += count;
231 
232 	rte_vhost_vring_call(dev->vid, queue_id);
233 
234 	return count;
235 }
236 
237 static __rte_always_inline int
238 dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
239 	    struct rte_mbuf *m, uint16_t desc_idx,
240 	    struct rte_mempool *mbuf_pool)
241 {
242 	struct vring_desc *desc;
243 	uint64_t desc_addr, desc_gaddr;
244 	uint32_t desc_avail, desc_offset;
245 	uint64_t desc_chunck_len;
246 	uint32_t mbuf_avail, mbuf_offset;
247 	uint32_t cpy_len;
248 	struct rte_mbuf *cur = m, *prev = m;
249 	/* A counter to avoid desc dead loop chain */
250 	uint32_t nr_desc = 1;
251 
252 	desc = &vr->desc[desc_idx];
253 	if (unlikely((desc->len < dev->hdr_len)) ||
254 			(desc->flags & VRING_DESC_F_INDIRECT))
255 		return -1;
256 
257 	desc_chunck_len = desc->len;
258 	desc_gaddr = desc->addr;
259 	desc_addr = rte_vhost_va_from_guest_pa(
260 			dev->mem, desc_gaddr, &desc_chunck_len);
261 	if (unlikely(!desc_addr))
262 		return -1;
263 
264 	/*
265 	 * We don't support ANY_LAYOUT, neither VERSION_1, meaning
266 	 * a Tx packet from guest must have 2 desc buffers at least:
267 	 * the first for storing the header and the others for
268 	 * storing the data.
269 	 *
270 	 * And since we don't support TSO, we could simply skip the
271 	 * header.
272 	 */
273 	desc = &vr->desc[desc->next];
274 	desc_chunck_len = desc->len;
275 	desc_gaddr = desc->addr;
276 	desc_addr = rte_vhost_va_from_guest_pa(
277 			dev->mem, desc_gaddr, &desc_chunck_len);
278 	if (unlikely(!desc_addr))
279 		return -1;
280 	rte_prefetch0((void *)(uintptr_t)desc_addr);
281 
282 	desc_offset = 0;
283 	desc_avail  = desc->len;
284 	nr_desc    += 1;
285 
286 	mbuf_offset = 0;
287 	mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
288 	while (1) {
289 		cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
290 		rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
291 						   mbuf_offset),
292 			(void *)((uintptr_t)(desc_addr + desc_offset)),
293 			cpy_len);
294 
295 		mbuf_avail  -= cpy_len;
296 		mbuf_offset += cpy_len;
297 		desc_avail  -= cpy_len;
298 		desc_offset += cpy_len;
299 		desc_chunck_len -= cpy_len;
300 
301 		/* This desc reaches to its end, get the next one */
302 		if (desc_avail == 0) {
303 			if ((desc->flags & VRING_DESC_F_NEXT) == 0)
304 				break;
305 
306 			if (unlikely(desc->next >= vr->size ||
307 				     ++nr_desc > vr->size))
308 				return -1;
309 			desc = &vr->desc[desc->next];
310 
311 			desc_chunck_len = desc->len;
312 			desc_gaddr = desc->addr;
313 			desc_addr = rte_vhost_va_from_guest_pa(
314 					dev->mem, desc_gaddr, &desc_chunck_len);
315 			if (unlikely(!desc_addr))
316 				return -1;
317 			rte_prefetch0((void *)(uintptr_t)desc_addr);
318 
319 			desc_offset = 0;
320 			desc_avail  = desc->len;
321 		} else if (unlikely(desc_chunck_len == 0)) {
322 			desc_chunck_len = desc_avail;
323 			desc_gaddr += desc_offset;
324 			desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
325 					desc_gaddr,
326 					&desc_chunck_len);
327 			if (unlikely(!desc_addr))
328 				return -1;
329 
330 			desc_offset = 0;
331 		}
332 
333 		/*
334 		 * This mbuf reaches to its end, get a new one
335 		 * to hold more data.
336 		 */
337 		if (mbuf_avail == 0) {
338 			cur = rte_pktmbuf_alloc(mbuf_pool);
339 			if (unlikely(cur == NULL)) {
340 				RTE_LOG(ERR, VHOST_DATA, "Failed to "
341 					"allocate memory for mbuf.\n");
342 				return -1;
343 			}
344 
345 			prev->next = cur;
346 			prev->data_len = mbuf_offset;
347 			m->nb_segs += 1;
348 			m->pkt_len += mbuf_offset;
349 			prev = cur;
350 
351 			mbuf_offset = 0;
352 			mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
353 		}
354 	}
355 
356 	prev->data_len = mbuf_offset;
357 	m->pkt_len    += mbuf_offset;
358 
359 	return 0;
360 }
361 
362 uint16_t
363 vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
364 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
365 {
366 	struct vhost_queue *queue;
367 	struct rte_vhost_vring *vr;
368 	uint32_t desc_indexes[MAX_PKT_BURST];
369 	uint32_t used_idx;
370 	uint32_t i = 0;
371 	uint16_t free_entries;
372 	uint16_t avail_idx;
373 
374 	queue = &dev->queues[queue_id];
375 	vr    = &queue->vr;
376 
377 	free_entries = *((volatile uint16_t *)&vr->avail->idx) -
378 			queue->last_avail_idx;
379 	if (free_entries == 0)
380 		return 0;
381 
382 	/* Prefetch available and used ring */
383 	avail_idx = queue->last_avail_idx & (vr->size - 1);
384 	used_idx  = queue->last_used_idx  & (vr->size - 1);
385 	rte_prefetch0(&vr->avail->ring[avail_idx]);
386 	rte_prefetch0(&vr->used->ring[used_idx]);
387 
388 	count = RTE_MIN(count, MAX_PKT_BURST);
389 	count = RTE_MIN(count, free_entries);
390 
391 	if (unlikely(count == 0))
392 		return 0;
393 
394 	/*
395 	 * Retrieve all of the head indexes first and pre-update used entries
396 	 * to avoid caching issues.
397 	 */
398 	for (i = 0; i < count; i++) {
399 		avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
400 		used_idx  = (queue->last_used_idx  + i) & (vr->size - 1);
401 		desc_indexes[i] = vr->avail->ring[avail_idx];
402 
403 		vr->used->ring[used_idx].id  = desc_indexes[i];
404 		vr->used->ring[used_idx].len = 0;
405 	}
406 
407 	/* Prefetch descriptor index. */
408 	rte_prefetch0(&vr->desc[desc_indexes[0]]);
409 	for (i = 0; i < count; i++) {
410 		int err;
411 
412 		if (likely(i + 1 < count))
413 			rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
414 
415 		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
416 		if (unlikely(pkts[i] == NULL)) {
417 			RTE_LOG(ERR, VHOST_DATA,
418 				"Failed to allocate memory for mbuf.\n");
419 			break;
420 		}
421 
422 		err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
423 		if (unlikely(err)) {
424 			rte_pktmbuf_free(pkts[i]);
425 			break;
426 		}
427 
428 	}
429 
430 	queue->last_avail_idx += i;
431 	queue->last_used_idx += i;
432 	rte_smp_wmb();
433 	rte_smp_rmb();
434 
435 	vr->used->idx += i;
436 
437 	rte_vhost_vring_call(dev->vid, queue_id);
438 
439 	return i;
440 }
441