13998e2a0SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
23998e2a0SBruce Richardson * Copyright(c) 2010-2017 Intel Corporation
3ca059fa5SYuanhan Liu */
4ca059fa5SYuanhan Liu
5ca059fa5SYuanhan Liu #include <stdint.h>
6ca059fa5SYuanhan Liu #include <stdbool.h>
772b452c5SDmitry Kozlyuk #include <stdlib.h>
8ca059fa5SYuanhan Liu #include <linux/virtio_net.h>
9ca059fa5SYuanhan Liu
10ca059fa5SYuanhan Liu #include <rte_mbuf.h>
11ca059fa5SYuanhan Liu #include <rte_memcpy.h>
12ca059fa5SYuanhan Liu #include <rte_vhost.h>
13ca059fa5SYuanhan Liu
14ca059fa5SYuanhan Liu #include "main.h"
15ca059fa5SYuanhan Liu
16ca059fa5SYuanhan Liu /*
17ca059fa5SYuanhan Liu * A very simple vhost-user net driver implementation, without
18ca059fa5SYuanhan Liu * any extra features being enabled, such as TSO and mrg-Rx.
19ca059fa5SYuanhan Liu */
20ca059fa5SYuanhan Liu
21ca059fa5SYuanhan Liu void
vs_vhost_net_setup(struct vhost_dev * dev)22ca059fa5SYuanhan Liu vs_vhost_net_setup(struct vhost_dev *dev)
23ca059fa5SYuanhan Liu {
24ca059fa5SYuanhan Liu uint16_t i;
25ca059fa5SYuanhan Liu int vid = dev->vid;
26ca059fa5SYuanhan Liu struct vhost_queue *queue;
271739f814SChenbo Xia int ret;
28ca059fa5SYuanhan Liu
29ca059fa5SYuanhan Liu RTE_LOG(INFO, VHOST_CONFIG,
30ca059fa5SYuanhan Liu "setting builtin vhost-user net driver\n");
31ca059fa5SYuanhan Liu
32ca059fa5SYuanhan Liu rte_vhost_get_negotiated_features(vid, &dev->features);
33ca059fa5SYuanhan Liu if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
34ca059fa5SYuanhan Liu dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
35ca059fa5SYuanhan Liu else
36ca059fa5SYuanhan Liu dev->hdr_len = sizeof(struct virtio_net_hdr);
37ca059fa5SYuanhan Liu
381739f814SChenbo Xia ret = rte_vhost_get_mem_table(vid, &dev->mem);
391739f814SChenbo Xia if (ret < 0) {
401739f814SChenbo Xia RTE_LOG(ERR, VHOST_CONFIG, "Failed to get "
411739f814SChenbo Xia "VM memory layout for device(%d)\n", vid);
421739f814SChenbo Xia return;
431739f814SChenbo Xia }
44ca059fa5SYuanhan Liu
45ca059fa5SYuanhan Liu dev->nr_vrings = rte_vhost_get_vring_num(vid);
46ca059fa5SYuanhan Liu for (i = 0; i < dev->nr_vrings; i++) {
47ca059fa5SYuanhan Liu queue = &dev->queues[i];
48ca059fa5SYuanhan Liu
49ca059fa5SYuanhan Liu queue->last_used_idx = 0;
50ca059fa5SYuanhan Liu queue->last_avail_idx = 0;
51ca059fa5SYuanhan Liu rte_vhost_get_vhost_vring(vid, i, &queue->vr);
52ca059fa5SYuanhan Liu }
53ca059fa5SYuanhan Liu }
54ca059fa5SYuanhan Liu
55ca059fa5SYuanhan Liu void
vs_vhost_net_remove(struct vhost_dev * dev)56ca059fa5SYuanhan Liu vs_vhost_net_remove(struct vhost_dev *dev)
57ca059fa5SYuanhan Liu {
58ca059fa5SYuanhan Liu free(dev->mem);
59ca059fa5SYuanhan Liu }
60ca059fa5SYuanhan Liu
61c0583d98SJerin Jacob static __rte_always_inline int
enqueue_pkt(struct vhost_dev * dev,struct rte_vhost_vring * vr,struct rte_mbuf * m,uint16_t desc_idx)62ca059fa5SYuanhan Liu enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
63ca059fa5SYuanhan Liu struct rte_mbuf *m, uint16_t desc_idx)
64ca059fa5SYuanhan Liu {
65ca059fa5SYuanhan Liu uint32_t desc_avail, desc_offset;
6682c93a56SMaxime Coquelin uint64_t desc_chunck_len;
67ca059fa5SYuanhan Liu uint32_t mbuf_avail, mbuf_offset;
68ca059fa5SYuanhan Liu uint32_t cpy_len;
69ca059fa5SYuanhan Liu struct vring_desc *desc;
7082c93a56SMaxime Coquelin uint64_t desc_addr, desc_gaddr;
71ca059fa5SYuanhan Liu struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
72ca059fa5SYuanhan Liu /* A counter to avoid desc dead loop chain */
73ca059fa5SYuanhan Liu uint16_t nr_desc = 1;
74ca059fa5SYuanhan Liu
75ca059fa5SYuanhan Liu desc = &vr->desc[desc_idx];
7682c93a56SMaxime Coquelin desc_chunck_len = desc->len;
7782c93a56SMaxime Coquelin desc_gaddr = desc->addr;
7882c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(
7982c93a56SMaxime Coquelin dev->mem, desc_gaddr, &desc_chunck_len);
80ca059fa5SYuanhan Liu /*
81ca059fa5SYuanhan Liu * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
82ca059fa5SYuanhan Liu * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
83ca059fa5SYuanhan Liu * otherwise stores offset on the stack instead of in a register.
84ca059fa5SYuanhan Liu */
85ca059fa5SYuanhan Liu if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
86ca059fa5SYuanhan Liu return -1;
87ca059fa5SYuanhan Liu
88ca059fa5SYuanhan Liu rte_prefetch0((void *)(uintptr_t)desc_addr);
89ca059fa5SYuanhan Liu
90ca059fa5SYuanhan Liu /* write virtio-net header */
9182c93a56SMaxime Coquelin if (likely(desc_chunck_len >= dev->hdr_len)) {
92ca059fa5SYuanhan Liu *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
93ca059fa5SYuanhan Liu desc_offset = dev->hdr_len;
9482c93a56SMaxime Coquelin } else {
9582c93a56SMaxime Coquelin uint64_t len;
9682c93a56SMaxime Coquelin uint64_t remain = dev->hdr_len;
9782c93a56SMaxime Coquelin uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
9882c93a56SMaxime Coquelin uint64_t guest_addr = desc_gaddr;
9982c93a56SMaxime Coquelin
10082c93a56SMaxime Coquelin while (remain) {
10182c93a56SMaxime Coquelin len = remain;
10282c93a56SMaxime Coquelin dst = rte_vhost_va_from_guest_pa(dev->mem,
10382c93a56SMaxime Coquelin guest_addr, &len);
10482c93a56SMaxime Coquelin if (unlikely(!dst || !len))
10582c93a56SMaxime Coquelin return -1;
10682c93a56SMaxime Coquelin
10782c93a56SMaxime Coquelin rte_memcpy((void *)(uintptr_t)dst,
10882c93a56SMaxime Coquelin (void *)(uintptr_t)src,
10982c93a56SMaxime Coquelin len);
11082c93a56SMaxime Coquelin
11182c93a56SMaxime Coquelin remain -= len;
11282c93a56SMaxime Coquelin guest_addr += len;
11355dfc375SMaxime Coquelin src += len;
11482c93a56SMaxime Coquelin }
11582c93a56SMaxime Coquelin
11682c93a56SMaxime Coquelin desc_chunck_len = desc->len - dev->hdr_len;
11782c93a56SMaxime Coquelin desc_gaddr += dev->hdr_len;
11882c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(
11982c93a56SMaxime Coquelin dev->mem, desc_gaddr,
12082c93a56SMaxime Coquelin &desc_chunck_len);
12182c93a56SMaxime Coquelin if (unlikely(!desc_addr))
12282c93a56SMaxime Coquelin return -1;
12382c93a56SMaxime Coquelin
12482c93a56SMaxime Coquelin desc_offset = 0;
12582c93a56SMaxime Coquelin }
12682c93a56SMaxime Coquelin
127ca059fa5SYuanhan Liu desc_avail = desc->len - dev->hdr_len;
128ca059fa5SYuanhan Liu
129ca059fa5SYuanhan Liu mbuf_avail = rte_pktmbuf_data_len(m);
130ca059fa5SYuanhan Liu mbuf_offset = 0;
131ca059fa5SYuanhan Liu while (mbuf_avail != 0 || m->next != NULL) {
132ca059fa5SYuanhan Liu /* done with current mbuf, fetch next */
133ca059fa5SYuanhan Liu if (mbuf_avail == 0) {
134ca059fa5SYuanhan Liu m = m->next;
135ca059fa5SYuanhan Liu
136ca059fa5SYuanhan Liu mbuf_offset = 0;
137ca059fa5SYuanhan Liu mbuf_avail = rte_pktmbuf_data_len(m);
138ca059fa5SYuanhan Liu }
139ca059fa5SYuanhan Liu
140ca059fa5SYuanhan Liu /* done with current desc buf, fetch next */
141ca059fa5SYuanhan Liu if (desc_avail == 0) {
142ca059fa5SYuanhan Liu if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
143ca059fa5SYuanhan Liu /* Room in vring buffer is not enough */
144ca059fa5SYuanhan Liu return -1;
145ca059fa5SYuanhan Liu }
146ca059fa5SYuanhan Liu if (unlikely(desc->next >= vr->size ||
147ca059fa5SYuanhan Liu ++nr_desc > vr->size))
148ca059fa5SYuanhan Liu return -1;
149ca059fa5SYuanhan Liu
150ca059fa5SYuanhan Liu desc = &vr->desc[desc->next];
15182c93a56SMaxime Coquelin desc_chunck_len = desc->len;
15282c93a56SMaxime Coquelin desc_gaddr = desc->addr;
15382c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(
15482c93a56SMaxime Coquelin dev->mem, desc_gaddr, &desc_chunck_len);
155ca059fa5SYuanhan Liu if (unlikely(!desc_addr))
156ca059fa5SYuanhan Liu return -1;
157ca059fa5SYuanhan Liu
158ca059fa5SYuanhan Liu desc_offset = 0;
159ca059fa5SYuanhan Liu desc_avail = desc->len;
16082c93a56SMaxime Coquelin } else if (unlikely(desc_chunck_len == 0)) {
16182c93a56SMaxime Coquelin desc_chunck_len = desc_avail;
16282c93a56SMaxime Coquelin desc_gaddr += desc_offset;
16382c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
16482c93a56SMaxime Coquelin desc_gaddr,
16582c93a56SMaxime Coquelin &desc_chunck_len);
16682c93a56SMaxime Coquelin if (unlikely(!desc_addr))
16782c93a56SMaxime Coquelin return -1;
16882c93a56SMaxime Coquelin
16982c93a56SMaxime Coquelin desc_offset = 0;
170ca059fa5SYuanhan Liu }
171ca059fa5SYuanhan Liu
17282c93a56SMaxime Coquelin cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
173ca059fa5SYuanhan Liu rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
174ca059fa5SYuanhan Liu rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
175ca059fa5SYuanhan Liu cpy_len);
176ca059fa5SYuanhan Liu
177ca059fa5SYuanhan Liu mbuf_avail -= cpy_len;
178ca059fa5SYuanhan Liu mbuf_offset += cpy_len;
179ca059fa5SYuanhan Liu desc_avail -= cpy_len;
180ca059fa5SYuanhan Liu desc_offset += cpy_len;
18182c93a56SMaxime Coquelin desc_chunck_len -= cpy_len;
182ca059fa5SYuanhan Liu }
183ca059fa5SYuanhan Liu
184ca059fa5SYuanhan Liu return 0;
185ca059fa5SYuanhan Liu }
186ca059fa5SYuanhan Liu
187ca059fa5SYuanhan Liu uint16_t
vs_enqueue_pkts(struct vhost_dev * dev,uint16_t queue_id,struct rte_mbuf ** pkts,uint32_t count)188ca059fa5SYuanhan Liu vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
189ca059fa5SYuanhan Liu struct rte_mbuf **pkts, uint32_t count)
190ca059fa5SYuanhan Liu {
191ca059fa5SYuanhan Liu struct vhost_queue *queue;
192ca059fa5SYuanhan Liu struct rte_vhost_vring *vr;
193ca059fa5SYuanhan Liu uint16_t avail_idx, free_entries, start_idx;
194ca059fa5SYuanhan Liu uint16_t desc_indexes[MAX_PKT_BURST];
195ca059fa5SYuanhan Liu uint16_t used_idx;
196ca059fa5SYuanhan Liu uint32_t i;
197ca059fa5SYuanhan Liu
198ca059fa5SYuanhan Liu queue = &dev->queues[queue_id];
199ca059fa5SYuanhan Liu vr = &queue->vr;
200ca059fa5SYuanhan Liu
201*92e68d9cSTyler Retzlaff avail_idx = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
202*92e68d9cSTyler Retzlaff rte_memory_order_acquire);
203ca059fa5SYuanhan Liu start_idx = queue->last_used_idx;
204ca059fa5SYuanhan Liu free_entries = avail_idx - start_idx;
205ca059fa5SYuanhan Liu count = RTE_MIN(count, free_entries);
206ca059fa5SYuanhan Liu count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
207ca059fa5SYuanhan Liu if (count == 0)
208ca059fa5SYuanhan Liu return 0;
209ca059fa5SYuanhan Liu
210ca059fa5SYuanhan Liu /* Retrieve all of the desc indexes first to avoid caching issues. */
211ca059fa5SYuanhan Liu rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
212ca059fa5SYuanhan Liu for (i = 0; i < count; i++) {
213ca059fa5SYuanhan Liu used_idx = (start_idx + i) & (vr->size - 1);
214ca059fa5SYuanhan Liu desc_indexes[i] = vr->avail->ring[used_idx];
215ca059fa5SYuanhan Liu vr->used->ring[used_idx].id = desc_indexes[i];
216ca059fa5SYuanhan Liu vr->used->ring[used_idx].len = pkts[i]->pkt_len +
217ca059fa5SYuanhan Liu dev->hdr_len;
218ca059fa5SYuanhan Liu }
219ca059fa5SYuanhan Liu
220ca059fa5SYuanhan Liu rte_prefetch0(&vr->desc[desc_indexes[0]]);
221ca059fa5SYuanhan Liu for (i = 0; i < count; i++) {
222ca059fa5SYuanhan Liu uint16_t desc_idx = desc_indexes[i];
223ca059fa5SYuanhan Liu int err;
224ca059fa5SYuanhan Liu
225ca059fa5SYuanhan Liu err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
226ca059fa5SYuanhan Liu if (unlikely(err)) {
227ca059fa5SYuanhan Liu used_idx = (start_idx + i) & (vr->size - 1);
228ca059fa5SYuanhan Liu vr->used->ring[used_idx].len = dev->hdr_len;
229ca059fa5SYuanhan Liu }
230ca059fa5SYuanhan Liu
231ca059fa5SYuanhan Liu if (i + 1 < count)
232ca059fa5SYuanhan Liu rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
233ca059fa5SYuanhan Liu }
234ca059fa5SYuanhan Liu
235*92e68d9cSTyler Retzlaff rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, count,
236*92e68d9cSTyler Retzlaff rte_memory_order_release);
237ca059fa5SYuanhan Liu queue->last_used_idx += count;
238ca059fa5SYuanhan Liu
2396c299bb7SStefan Hajnoczi rte_vhost_vring_call(dev->vid, queue_id);
240ca059fa5SYuanhan Liu
241ca059fa5SYuanhan Liu return count;
242ca059fa5SYuanhan Liu }
243ca059fa5SYuanhan Liu
244a543dcb7SXuan Ding uint16_t
builtin_enqueue_pkts(struct vhost_dev * dev,uint16_t queue_id,struct rte_mbuf ** pkts,uint32_t count)245a543dcb7SXuan Ding builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
246a543dcb7SXuan Ding struct rte_mbuf **pkts, uint32_t count)
247a543dcb7SXuan Ding {
248a543dcb7SXuan Ding return vs_enqueue_pkts(dev, queue_id, pkts, count);
249a543dcb7SXuan Ding }
250a543dcb7SXuan Ding
251c0583d98SJerin Jacob static __rte_always_inline int
dequeue_pkt(struct vhost_dev * dev,struct rte_vhost_vring * vr,struct rte_mbuf * m,uint16_t desc_idx,struct rte_mempool * mbuf_pool)252ca059fa5SYuanhan Liu dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
253ca059fa5SYuanhan Liu struct rte_mbuf *m, uint16_t desc_idx,
254ca059fa5SYuanhan Liu struct rte_mempool *mbuf_pool)
255ca059fa5SYuanhan Liu {
256ca059fa5SYuanhan Liu struct vring_desc *desc;
25782c93a56SMaxime Coquelin uint64_t desc_addr, desc_gaddr;
258ca059fa5SYuanhan Liu uint32_t desc_avail, desc_offset;
25982c93a56SMaxime Coquelin uint64_t desc_chunck_len;
260ca059fa5SYuanhan Liu uint32_t mbuf_avail, mbuf_offset;
261ca059fa5SYuanhan Liu uint32_t cpy_len;
262ca059fa5SYuanhan Liu struct rte_mbuf *cur = m, *prev = m;
263ca059fa5SYuanhan Liu /* A counter to avoid desc dead loop chain */
264ca059fa5SYuanhan Liu uint32_t nr_desc = 1;
265ca059fa5SYuanhan Liu
266ca059fa5SYuanhan Liu desc = &vr->desc[desc_idx];
267ca059fa5SYuanhan Liu if (unlikely((desc->len < dev->hdr_len)) ||
268ca059fa5SYuanhan Liu (desc->flags & VRING_DESC_F_INDIRECT))
269ca059fa5SYuanhan Liu return -1;
270ca059fa5SYuanhan Liu
27182c93a56SMaxime Coquelin desc_chunck_len = desc->len;
27282c93a56SMaxime Coquelin desc_gaddr = desc->addr;
27382c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(
27482c93a56SMaxime Coquelin dev->mem, desc_gaddr, &desc_chunck_len);
275ca059fa5SYuanhan Liu if (unlikely(!desc_addr))
276ca059fa5SYuanhan Liu return -1;
277ca059fa5SYuanhan Liu
278ca059fa5SYuanhan Liu /*
279ca059fa5SYuanhan Liu * We don't support ANY_LAYOUT, neither VERSION_1, meaning
280ca059fa5SYuanhan Liu * a Tx packet from guest must have 2 desc buffers at least:
281ca059fa5SYuanhan Liu * the first for storing the header and the others for
282ca059fa5SYuanhan Liu * storing the data.
283ca059fa5SYuanhan Liu *
284ca059fa5SYuanhan Liu * And since we don't support TSO, we could simply skip the
285ca059fa5SYuanhan Liu * header.
286ca059fa5SYuanhan Liu */
287ca059fa5SYuanhan Liu desc = &vr->desc[desc->next];
28882c93a56SMaxime Coquelin desc_chunck_len = desc->len;
28982c93a56SMaxime Coquelin desc_gaddr = desc->addr;
29082c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(
29182c93a56SMaxime Coquelin dev->mem, desc_gaddr, &desc_chunck_len);
292ca059fa5SYuanhan Liu if (unlikely(!desc_addr))
293ca059fa5SYuanhan Liu return -1;
294ca059fa5SYuanhan Liu rte_prefetch0((void *)(uintptr_t)desc_addr);
295ca059fa5SYuanhan Liu
296ca059fa5SYuanhan Liu desc_offset = 0;
297ca059fa5SYuanhan Liu desc_avail = desc->len;
298ca059fa5SYuanhan Liu nr_desc += 1;
299ca059fa5SYuanhan Liu
300ca059fa5SYuanhan Liu mbuf_offset = 0;
301ca059fa5SYuanhan Liu mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
302ca059fa5SYuanhan Liu while (1) {
30382c93a56SMaxime Coquelin cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
304ca059fa5SYuanhan Liu rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
305ca059fa5SYuanhan Liu mbuf_offset),
306ca059fa5SYuanhan Liu (void *)((uintptr_t)(desc_addr + desc_offset)),
307ca059fa5SYuanhan Liu cpy_len);
308ca059fa5SYuanhan Liu
309ca059fa5SYuanhan Liu mbuf_avail -= cpy_len;
310ca059fa5SYuanhan Liu mbuf_offset += cpy_len;
311ca059fa5SYuanhan Liu desc_avail -= cpy_len;
312ca059fa5SYuanhan Liu desc_offset += cpy_len;
31382c93a56SMaxime Coquelin desc_chunck_len -= cpy_len;
314ca059fa5SYuanhan Liu
315ca059fa5SYuanhan Liu /* This desc reaches to its end, get the next one */
316ca059fa5SYuanhan Liu if (desc_avail == 0) {
317ca059fa5SYuanhan Liu if ((desc->flags & VRING_DESC_F_NEXT) == 0)
318ca059fa5SYuanhan Liu break;
319ca059fa5SYuanhan Liu
320ca059fa5SYuanhan Liu if (unlikely(desc->next >= vr->size ||
321ca059fa5SYuanhan Liu ++nr_desc > vr->size))
322ca059fa5SYuanhan Liu return -1;
323ca059fa5SYuanhan Liu desc = &vr->desc[desc->next];
324ca059fa5SYuanhan Liu
32582c93a56SMaxime Coquelin desc_chunck_len = desc->len;
32682c93a56SMaxime Coquelin desc_gaddr = desc->addr;
32782c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(
32882c93a56SMaxime Coquelin dev->mem, desc_gaddr, &desc_chunck_len);
329ca059fa5SYuanhan Liu if (unlikely(!desc_addr))
330ca059fa5SYuanhan Liu return -1;
331ca059fa5SYuanhan Liu rte_prefetch0((void *)(uintptr_t)desc_addr);
332ca059fa5SYuanhan Liu
333ca059fa5SYuanhan Liu desc_offset = 0;
334ca059fa5SYuanhan Liu desc_avail = desc->len;
33582c93a56SMaxime Coquelin } else if (unlikely(desc_chunck_len == 0)) {
33682c93a56SMaxime Coquelin desc_chunck_len = desc_avail;
33782c93a56SMaxime Coquelin desc_gaddr += desc_offset;
33882c93a56SMaxime Coquelin desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
33982c93a56SMaxime Coquelin desc_gaddr,
34082c93a56SMaxime Coquelin &desc_chunck_len);
34182c93a56SMaxime Coquelin if (unlikely(!desc_addr))
34282c93a56SMaxime Coquelin return -1;
34382c93a56SMaxime Coquelin
34482c93a56SMaxime Coquelin desc_offset = 0;
345ca059fa5SYuanhan Liu }
346ca059fa5SYuanhan Liu
347ca059fa5SYuanhan Liu /*
348ca059fa5SYuanhan Liu * This mbuf reaches to its end, get a new one
349ca059fa5SYuanhan Liu * to hold more data.
350ca059fa5SYuanhan Liu */
351ca059fa5SYuanhan Liu if (mbuf_avail == 0) {
352ca059fa5SYuanhan Liu cur = rte_pktmbuf_alloc(mbuf_pool);
353ca059fa5SYuanhan Liu if (unlikely(cur == NULL)) {
354ca059fa5SYuanhan Liu RTE_LOG(ERR, VHOST_DATA, "Failed to "
355ca059fa5SYuanhan Liu "allocate memory for mbuf.\n");
356ca059fa5SYuanhan Liu return -1;
357ca059fa5SYuanhan Liu }
358ca059fa5SYuanhan Liu
359ca059fa5SYuanhan Liu prev->next = cur;
360ca059fa5SYuanhan Liu prev->data_len = mbuf_offset;
361ca059fa5SYuanhan Liu m->nb_segs += 1;
362ca059fa5SYuanhan Liu m->pkt_len += mbuf_offset;
363ca059fa5SYuanhan Liu prev = cur;
364ca059fa5SYuanhan Liu
365ca059fa5SYuanhan Liu mbuf_offset = 0;
366ca059fa5SYuanhan Liu mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
367ca059fa5SYuanhan Liu }
368ca059fa5SYuanhan Liu }
369ca059fa5SYuanhan Liu
370ca059fa5SYuanhan Liu prev->data_len = mbuf_offset;
371ca059fa5SYuanhan Liu m->pkt_len += mbuf_offset;
372ca059fa5SYuanhan Liu
373ca059fa5SYuanhan Liu return 0;
374ca059fa5SYuanhan Liu }
375ca059fa5SYuanhan Liu
376a543dcb7SXuan Ding static uint16_t
vs_dequeue_pkts(struct vhost_dev * dev,uint16_t queue_id,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count)377ca059fa5SYuanhan Liu vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
378ca059fa5SYuanhan Liu struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
379ca059fa5SYuanhan Liu {
380ca059fa5SYuanhan Liu struct vhost_queue *queue;
381ca059fa5SYuanhan Liu struct rte_vhost_vring *vr;
382ca059fa5SYuanhan Liu uint32_t desc_indexes[MAX_PKT_BURST];
383ca059fa5SYuanhan Liu uint32_t used_idx;
384ca059fa5SYuanhan Liu uint32_t i = 0;
385ca059fa5SYuanhan Liu uint16_t free_entries;
386ca059fa5SYuanhan Liu uint16_t avail_idx;
387ca059fa5SYuanhan Liu
388ca059fa5SYuanhan Liu queue = &dev->queues[queue_id];
389ca059fa5SYuanhan Liu vr = &queue->vr;
390ca059fa5SYuanhan Liu
391*92e68d9cSTyler Retzlaff free_entries = rte_atomic_load_explicit((uint16_t __rte_atomic *)&vr->avail->idx,
392*92e68d9cSTyler Retzlaff rte_memory_order_acquire) - queue->last_avail_idx;
393ca059fa5SYuanhan Liu if (free_entries == 0)
394ca059fa5SYuanhan Liu return 0;
395ca059fa5SYuanhan Liu
396ca059fa5SYuanhan Liu /* Prefetch available and used ring */
397ca059fa5SYuanhan Liu avail_idx = queue->last_avail_idx & (vr->size - 1);
398ca059fa5SYuanhan Liu used_idx = queue->last_used_idx & (vr->size - 1);
399ca059fa5SYuanhan Liu rte_prefetch0(&vr->avail->ring[avail_idx]);
400ca059fa5SYuanhan Liu rte_prefetch0(&vr->used->ring[used_idx]);
401ca059fa5SYuanhan Liu
402ca059fa5SYuanhan Liu count = RTE_MIN(count, MAX_PKT_BURST);
403ca059fa5SYuanhan Liu count = RTE_MIN(count, free_entries);
404ca059fa5SYuanhan Liu
40562a0e941SJerin Jacob if (unlikely(count == 0))
40662a0e941SJerin Jacob return 0;
40762a0e941SJerin Jacob
408ca059fa5SYuanhan Liu /*
409ca059fa5SYuanhan Liu * Retrieve all of the head indexes first and pre-update used entries
410ca059fa5SYuanhan Liu * to avoid caching issues.
411ca059fa5SYuanhan Liu */
412ca059fa5SYuanhan Liu for (i = 0; i < count; i++) {
413ca059fa5SYuanhan Liu avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
414ca059fa5SYuanhan Liu used_idx = (queue->last_used_idx + i) & (vr->size - 1);
415ca059fa5SYuanhan Liu desc_indexes[i] = vr->avail->ring[avail_idx];
416ca059fa5SYuanhan Liu
417ca059fa5SYuanhan Liu vr->used->ring[used_idx].id = desc_indexes[i];
418ca059fa5SYuanhan Liu vr->used->ring[used_idx].len = 0;
419ca059fa5SYuanhan Liu }
420ca059fa5SYuanhan Liu
421ca059fa5SYuanhan Liu /* Prefetch descriptor index. */
422ca059fa5SYuanhan Liu rte_prefetch0(&vr->desc[desc_indexes[0]]);
423ca059fa5SYuanhan Liu for (i = 0; i < count; i++) {
424ca059fa5SYuanhan Liu int err;
425ca059fa5SYuanhan Liu
426ca059fa5SYuanhan Liu if (likely(i + 1 < count))
427ca059fa5SYuanhan Liu rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
428ca059fa5SYuanhan Liu
429ca059fa5SYuanhan Liu pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
430ca059fa5SYuanhan Liu if (unlikely(pkts[i] == NULL)) {
431ca059fa5SYuanhan Liu RTE_LOG(ERR, VHOST_DATA,
432ca059fa5SYuanhan Liu "Failed to allocate memory for mbuf.\n");
433ca059fa5SYuanhan Liu break;
434ca059fa5SYuanhan Liu }
435ca059fa5SYuanhan Liu
436ca059fa5SYuanhan Liu err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
437ca059fa5SYuanhan Liu if (unlikely(err)) {
438ca059fa5SYuanhan Liu rte_pktmbuf_free(pkts[i]);
439ca059fa5SYuanhan Liu break;
440ca059fa5SYuanhan Liu }
441ca059fa5SYuanhan Liu
442ca059fa5SYuanhan Liu }
443ca059fa5SYuanhan Liu
444ca059fa5SYuanhan Liu queue->last_avail_idx += i;
445ca059fa5SYuanhan Liu queue->last_used_idx += i;
446ca059fa5SYuanhan Liu
447*92e68d9cSTyler Retzlaff rte_atomic_fetch_add_explicit((uint16_t __rte_atomic *)&vr->used->idx, i,
448*92e68d9cSTyler Retzlaff rte_memory_order_acq_rel);
449ca059fa5SYuanhan Liu
4506c299bb7SStefan Hajnoczi rte_vhost_vring_call(dev->vid, queue_id);
451ca059fa5SYuanhan Liu
452ca059fa5SYuanhan Liu return i;
453ca059fa5SYuanhan Liu }
454a543dcb7SXuan Ding
455a543dcb7SXuan Ding uint16_t
builtin_dequeue_pkts(struct vhost_dev * dev,uint16_t queue_id,struct rte_mempool * mbuf_pool,struct rte_mbuf ** pkts,uint16_t count)456a543dcb7SXuan Ding builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
457a543dcb7SXuan Ding struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
458a543dcb7SXuan Ding {
459a543dcb7SXuan Ding return vs_dequeue_pkts(dev, queue_id, mbuf_pool, pkts, count);
460a543dcb7SXuan Ding }
461