xref: /dpdk/drivers/net/virtio/virtio_cvq.c (revision 1af8b0b2747fe6c6267fa7bedb602e569742362e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright(c) 2022 Red Hat Inc,
4  */
5 
6 #include <unistd.h>
7 
8 #include <rte_common.h>
9 #include <rte_eal.h>
10 #include <rte_errno.h>
11 
12 #include "virtio_cvq.h"
13 #include "virtqueue.h"
14 
15 static struct virtio_pmd_ctrl *
16 virtio_send_command_packed(struct virtnet_ctl *cvq,
17 			   struct virtio_pmd_ctrl *ctrl,
18 			   int *dlen, int pkt_num)
19 {
20 	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
21 	int head;
22 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
23 	struct virtio_pmd_ctrl *result;
24 	uint16_t flags;
25 	int sum = 0;
26 	int nb_descs = 0;
27 	int k;
28 
29 	/*
30 	 * Format is enforced in qemu code:
31 	 * One TX packet for header;
32 	 * At least one TX packet per argument;
33 	 * One RX packet for ACK.
34 	 */
35 	head = vq->vq_avail_idx;
36 	flags = vq->vq_packed.cached_flags;
37 	desc[head].addr = cvq->hdr_mem;
38 	desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
39 	vq->vq_free_cnt--;
40 	nb_descs++;
41 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
42 		vq->vq_avail_idx -= vq->vq_nentries;
43 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
44 	}
45 
46 	for (k = 0; k < pkt_num; k++) {
47 		desc[vq->vq_avail_idx].addr = cvq->hdr_mem
48 			+ sizeof(struct virtio_net_ctrl_hdr)
49 			+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
50 		desc[vq->vq_avail_idx].len = dlen[k];
51 		desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
52 			vq->vq_packed.cached_flags;
53 		sum += dlen[k];
54 		vq->vq_free_cnt--;
55 		nb_descs++;
56 		if (++vq->vq_avail_idx >= vq->vq_nentries) {
57 			vq->vq_avail_idx -= vq->vq_nentries;
58 			vq->vq_packed.cached_flags ^=
59 				VRING_PACKED_DESC_F_AVAIL_USED;
60 		}
61 	}
62 
63 	desc[vq->vq_avail_idx].addr = cvq->hdr_mem
64 		+ sizeof(struct virtio_net_ctrl_hdr);
65 	desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
66 	desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
67 		vq->vq_packed.cached_flags;
68 	vq->vq_free_cnt--;
69 	nb_descs++;
70 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
71 		vq->vq_avail_idx -= vq->vq_nentries;
72 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
73 	}
74 
75 	virtqueue_store_flags_packed(&desc[head], VRING_DESC_F_NEXT | flags,
76 			vq->hw->weak_barriers);
77 
78 	virtio_wmb(vq->hw->weak_barriers);
79 	cvq->notify_queue(vq, cvq->notify_cookie);
80 
81 	/* wait for used desc in virtqueue
82 	 * desc_is_used has a load-acquire or rte_io_rmb inside
83 	 */
84 	while (!desc_is_used(&desc[head], vq))
85 		usleep(100);
86 
87 	/* now get used descriptors */
88 	vq->vq_free_cnt += nb_descs;
89 	vq->vq_used_cons_idx += nb_descs;
90 	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
91 		vq->vq_used_cons_idx -= vq->vq_nentries;
92 		vq->vq_packed.used_wrap_counter ^= 1;
93 	}
94 
95 	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d", vq->vq_free_cnt);
96 	PMD_INIT_LOG(DEBUG, "vq->vq_avail_idx=%d", vq->vq_avail_idx);
97 	PMD_INIT_LOG(DEBUG, "vq->vq_used_cons_idx=%d", vq->vq_used_cons_idx);
98 	PMD_INIT_LOG(DEBUG, "vq->vq_packed.cached_flags=0x%x", vq->vq_packed.cached_flags);
99 	PMD_INIT_LOG(DEBUG, "vq->vq_packed.used_wrap_counter=%d", vq->vq_packed.used_wrap_counter);
100 
101 	result = cvq->hdr_mz->addr;
102 	return result;
103 }
104 
105 static struct virtio_pmd_ctrl *
106 virtio_send_command_split(struct virtnet_ctl *cvq,
107 			  struct virtio_pmd_ctrl *ctrl,
108 			  int *dlen, int pkt_num)
109 {
110 	struct virtio_pmd_ctrl *result;
111 	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
112 	uint32_t head, i;
113 	int k, sum = 0;
114 
115 	head = vq->vq_desc_head_idx;
116 
117 	/*
118 	 * Format is enforced in qemu code:
119 	 * One TX packet for header;
120 	 * At least one TX packet per argument;
121 	 * One RX packet for ACK.
122 	 */
123 	vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
124 	vq->vq_split.ring.desc[head].addr = cvq->hdr_mem;
125 	vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
126 	vq->vq_free_cnt--;
127 	i = vq->vq_split.ring.desc[head].next;
128 
129 	for (k = 0; k < pkt_num; k++) {
130 		vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
131 		vq->vq_split.ring.desc[i].addr = cvq->hdr_mem
132 			+ sizeof(struct virtio_net_ctrl_hdr)
133 			+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
134 		vq->vq_split.ring.desc[i].len = dlen[k];
135 		sum += dlen[k];
136 		vq->vq_free_cnt--;
137 		i = vq->vq_split.ring.desc[i].next;
138 	}
139 
140 	vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
141 	vq->vq_split.ring.desc[i].addr = cvq->hdr_mem
142 			+ sizeof(struct virtio_net_ctrl_hdr);
143 	vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
144 	vq->vq_free_cnt--;
145 
146 	vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
147 
148 	vq_update_avail_ring(vq, head);
149 	vq_update_avail_idx(vq);
150 
151 	PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
152 
153 	cvq->notify_queue(vq, cvq->notify_cookie);
154 
155 	while (virtqueue_nused(vq) == 0)
156 		usleep(100);
157 
158 	while (virtqueue_nused(vq)) {
159 		uint32_t idx, desc_idx, used_idx;
160 		struct vring_used_elem *uep;
161 
162 		used_idx = (uint32_t)(vq->vq_used_cons_idx
163 				& (vq->vq_nentries - 1));
164 		uep = &vq->vq_split.ring.used->ring[used_idx];
165 		idx = (uint32_t)uep->id;
166 		desc_idx = idx;
167 
168 		while (vq->vq_split.ring.desc[desc_idx].flags &
169 				VRING_DESC_F_NEXT) {
170 			desc_idx = vq->vq_split.ring.desc[desc_idx].next;
171 			vq->vq_free_cnt++;
172 		}
173 
174 		vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
175 		vq->vq_desc_head_idx = idx;
176 
177 		vq->vq_used_cons_idx++;
178 		vq->vq_free_cnt++;
179 	}
180 
181 	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d", vq->vq_free_cnt);
182 	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx=%d", vq->vq_desc_head_idx);
183 
184 	result = cvq->hdr_mz->addr;
185 	return result;
186 }
187 
188 int
189 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num)
190 {
191 	virtio_net_ctrl_ack status = ~0;
192 	struct virtio_pmd_ctrl *result;
193 	struct virtqueue *vq;
194 
195 	ctrl->status = status;
196 
197 	if (!cvq) {
198 		PMD_INIT_LOG(ERR, "Control queue is not supported.");
199 		return -1;
200 	}
201 
202 	rte_spinlock_lock(&cvq->lock);
203 	vq = virtnet_cq_to_vq(cvq);
204 
205 	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
206 		"vq->hw->cvq = %p vq = %p",
207 		vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
208 
209 	if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
210 		rte_spinlock_unlock(&cvq->lock);
211 		return -1;
212 	}
213 
214 	memcpy(cvq->hdr_mz->addr, ctrl, sizeof(struct virtio_pmd_ctrl));
215 
216 	if (virtio_with_packed_queue(vq->hw))
217 		result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
218 	else
219 		result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
220 
221 	rte_spinlock_unlock(&cvq->lock);
222 	return result->status;
223 }
224 
225