xref: /dpdk/drivers/net/virtio/virtqueue.c (revision f5862ae99e058c0cee36a08dfd51f8a3b766999a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 #include <stdint.h>
5 
6 #include <rte_mbuf.h>
7 
8 #include "virtqueue.h"
9 #include "virtio_logs.h"
10 #include "virtio_pci.h"
11 #include "virtio_rxtx_simple.h"
12 
13 /*
14  * Two types of mbuf to be cleaned:
15  * 1) mbuf that has been consumed by backend but not used by virtio.
16  * 2) mbuf that hasn't been consued by backend.
17  */
18 struct rte_mbuf *
19 virtqueue_detach_unused(struct virtqueue *vq)
20 {
21 	struct rte_mbuf *cookie;
22 	struct virtio_hw *hw;
23 	uint16_t start, end;
24 	int type, idx;
25 
26 	if (vq == NULL)
27 		return NULL;
28 
29 	hw = vq->hw;
30 	type = virtio_get_queue_type(hw, vq->vq_queue_index);
31 	start = vq->vq_avail_idx & (vq->vq_nentries - 1);
32 	end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
33 
34 	for (idx = 0; idx < vq->vq_nentries; idx++) {
35 		if (hw->use_simple_rx && type == VTNET_RQ) {
36 			if (start <= end && idx >= start && idx < end)
37 				continue;
38 			if (start > end && (idx >= start || idx < end))
39 				continue;
40 			cookie = vq->sw_ring[idx];
41 			if (cookie != NULL) {
42 				vq->sw_ring[idx] = NULL;
43 				return cookie;
44 			}
45 		} else {
46 			cookie = vq->vq_descx[idx].cookie;
47 			if (cookie != NULL) {
48 				vq->vq_descx[idx].cookie = NULL;
49 				return cookie;
50 			}
51 		}
52 	}
53 
54 	return NULL;
55 }
56 
57 /* Flush used descs */
58 static void
59 virtqueue_rxvq_flush_packed(struct virtqueue *vq)
60 {
61 	struct vq_desc_extra *dxp;
62 	uint16_t i;
63 
64 	struct vring_packed_desc *descs = vq->vq_packed.ring.desc;
65 	int cnt = 0;
66 
67 	i = vq->vq_used_cons_idx;
68 	while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
69 		dxp = &vq->vq_descx[descs[i].id];
70 		if (dxp->cookie != NULL) {
71 			rte_pktmbuf_free(dxp->cookie);
72 			dxp->cookie = NULL;
73 		}
74 		vq->vq_free_cnt++;
75 		vq->vq_used_cons_idx++;
76 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
77 			vq->vq_used_cons_idx -= vq->vq_nentries;
78 			vq->vq_packed.used_wrap_counter ^= 1;
79 		}
80 		i = vq->vq_used_cons_idx;
81 	}
82 }
83 
84 /* Flush the elements in the used ring. */
85 static void
86 virtqueue_rxvq_flush_split(struct virtqueue *vq)
87 {
88 	struct virtnet_rx *rxq = &vq->rxq;
89 	struct virtio_hw *hw = vq->hw;
90 	struct vring_used_elem *uep;
91 	struct vq_desc_extra *dxp;
92 	uint16_t used_idx, desc_idx;
93 	uint16_t nb_used, i;
94 
95 	nb_used = VIRTQUEUE_NUSED(vq);
96 
97 	for (i = 0; i < nb_used; i++) {
98 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
99 		uep = &vq->vq_split.ring.used->ring[used_idx];
100 		if (hw->use_simple_rx) {
101 			desc_idx = used_idx;
102 			rte_pktmbuf_free(vq->sw_ring[desc_idx]);
103 			vq->vq_free_cnt++;
104 		} else if (hw->use_inorder_rx) {
105 			desc_idx = (uint16_t)uep->id;
106 			dxp = &vq->vq_descx[desc_idx];
107 			if (dxp->cookie != NULL) {
108 				rte_pktmbuf_free(dxp->cookie);
109 				dxp->cookie = NULL;
110 			}
111 			vq_ring_free_inorder(vq, desc_idx, 1);
112 		} else {
113 			desc_idx = (uint16_t)uep->id;
114 			dxp = &vq->vq_descx[desc_idx];
115 			if (dxp->cookie != NULL) {
116 				rte_pktmbuf_free(dxp->cookie);
117 				dxp->cookie = NULL;
118 			}
119 			vq_ring_free_chain(vq, desc_idx);
120 		}
121 		vq->vq_used_cons_idx++;
122 	}
123 
124 	if (hw->use_simple_rx) {
125 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
126 			virtio_rxq_rearm_vec(rxq);
127 			if (virtqueue_kick_prepare(vq))
128 				virtqueue_notify(vq);
129 		}
130 	}
131 }
132 
133 /* Flush the elements in the used ring. */
134 void
135 virtqueue_rxvq_flush(struct virtqueue *vq)
136 {
137 	struct virtio_hw *hw = vq->hw;
138 
139 	if (vtpci_packed_queue(hw))
140 		virtqueue_rxvq_flush_packed(vq);
141 	else
142 		virtqueue_rxvq_flush_split(vq);
143 }
144 
145 int
146 virtqueue_rxvq_reset_packed(struct virtqueue *vq)
147 {
148 	int size = vq->vq_nentries;
149 	struct vq_desc_extra *dxp;
150 	struct virtnet_rx *rxvq;
151 	uint16_t desc_idx;
152 
153 	vq->vq_used_cons_idx = 0;
154 	vq->vq_desc_head_idx = 0;
155 	vq->vq_avail_idx = 0;
156 	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
157 	vq->vq_free_cnt = vq->vq_nentries;
158 
159 	vq->vq_packed.used_wrap_counter = 1;
160 	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
161 	vq->vq_packed.event_flags_shadow = 0;
162 	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
163 
164 	rxvq = &vq->rxq;
165 	memset(rxvq->mz->addr, 0, rxvq->mz->len);
166 
167 	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
168 		dxp = &vq->vq_descx[desc_idx];
169 		if (dxp->cookie != NULL) {
170 			rte_pktmbuf_free(dxp->cookie);
171 			dxp->cookie = NULL;
172 		}
173 	}
174 
175 	vring_desc_init_packed(vq, size);
176 
177 	return 0;
178 }
179 
180 int
181 virtqueue_txvq_reset_packed(struct virtqueue *vq)
182 {
183 	int size = vq->vq_nentries;
184 	struct vq_desc_extra *dxp;
185 	struct virtnet_tx *txvq;
186 	uint16_t desc_idx;
187 
188 	vq->vq_used_cons_idx = 0;
189 	vq->vq_desc_head_idx = 0;
190 	vq->vq_avail_idx = 0;
191 	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
192 	vq->vq_free_cnt = vq->vq_nentries;
193 
194 	vq->vq_packed.used_wrap_counter = 1;
195 	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
196 	vq->vq_packed.event_flags_shadow = 0;
197 
198 	txvq = &vq->txq;
199 	memset(txvq->mz->addr, 0, txvq->mz->len);
200 	memset(txvq->virtio_net_hdr_mz->addr, 0,
201 		txvq->virtio_net_hdr_mz->len);
202 
203 	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
204 		dxp = &vq->vq_descx[desc_idx];
205 		if (dxp->cookie != NULL) {
206 			rte_pktmbuf_free(dxp->cookie);
207 			dxp->cookie = NULL;
208 		}
209 	}
210 
211 	vring_desc_init_packed(vq, size);
212 
213 	return 0;
214 }
215