xref: /dpdk/lib/vhost/virtio_net_ctrl.c (revision a8ca598cd8e696b4135f04cdd86a93b12fd5642a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Red Hat, Inc.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 
9 #include "iotlb.h"
10 #include "vhost.h"
11 #include "virtio_net_ctrl.h"
12 
13 struct virtio_net_ctrl {
14 	uint8_t class;
15 	uint8_t command;
16 	uint8_t command_data[];
17 };
18 
19 struct virtio_net_ctrl_elem {
20 	struct virtio_net_ctrl *ctrl_req;
21 	uint16_t head_idx;
22 	uint16_t n_descs;
23 	uint8_t *desc_ack;
24 };
25 
26 static int
27 virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
28 		struct virtio_net_ctrl_elem *ctrl_elem)
29 	__rte_shared_locks_required(&cvq->iotlb_lock)
30 {
31 	uint16_t avail_idx, desc_idx, n_descs = 0;
32 	uint64_t desc_len, desc_addr, desc_iova, data_len = 0;
33 	uint8_t *ctrl_req;
34 	struct vring_desc *descs;
35 
36 	avail_idx = __atomic_load_n(&cvq->avail->idx, __ATOMIC_ACQUIRE);
37 	if (avail_idx == cvq->last_avail_idx) {
38 		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue empty\n");
39 		return 0;
40 	}
41 
42 	desc_idx = cvq->avail->ring[cvq->last_avail_idx];
43 	if (desc_idx >= cvq->size) {
44 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Out of range desc index, dropping\n");
45 		goto err;
46 	}
47 
48 	ctrl_elem->head_idx = desc_idx;
49 
50 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
51 		desc_len = cvq->desc[desc_idx].len;
52 		desc_iova = cvq->desc[desc_idx].addr;
53 
54 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
55 					desc_iova, &desc_len, VHOST_ACCESS_RO);
56 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
57 			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl indirect descs\n");
58 			goto err;
59 		}
60 
61 		desc_idx = 0;
62 	} else {
63 		descs = cvq->desc;
64 	}
65 
66 	while (1) {
67 		desc_len = descs[desc_idx].len;
68 		desc_iova = descs[desc_idx].addr;
69 
70 		n_descs++;
71 
72 		if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
73 			if (ctrl_elem->desc_ack) {
74 				VHOST_LOG_CONFIG(dev->ifname, ERR,
75 						"Unexpected ctrl chain layout\n");
76 				goto err;
77 			}
78 
79 			if (desc_len != sizeof(uint8_t)) {
80 				VHOST_LOG_CONFIG(dev->ifname, ERR,
81 						"Invalid ack size for ctrl req, dropping\n");
82 				goto err;
83 			}
84 
85 			ctrl_elem->desc_ack = (uint8_t *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
86 					desc_iova, &desc_len, VHOST_ACCESS_WO);
87 			if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t)) {
88 				VHOST_LOG_CONFIG(dev->ifname, ERR,
89 						"Failed to map ctrl ack descriptor\n");
90 				goto err;
91 			}
92 		} else {
93 			if (ctrl_elem->desc_ack) {
94 				VHOST_LOG_CONFIG(dev->ifname, ERR,
95 						"Unexpected ctrl chain layout\n");
96 				goto err;
97 			}
98 
99 			data_len += desc_len;
100 		}
101 
102 		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
103 			break;
104 
105 		desc_idx = descs[desc_idx].next;
106 	}
107 
108 	desc_idx = ctrl_elem->head_idx;
109 
110 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT)
111 		ctrl_elem->n_descs = 1;
112 	else
113 		ctrl_elem->n_descs = n_descs;
114 
115 	if (!ctrl_elem->desc_ack) {
116 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Missing ctrl ack descriptor\n");
117 		goto err;
118 	}
119 
120 	if (data_len < sizeof(ctrl_elem->ctrl_req->class) + sizeof(ctrl_elem->ctrl_req->command)) {
121 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Invalid control header size\n");
122 		goto err;
123 	}
124 
125 	ctrl_elem->ctrl_req = malloc(data_len);
126 	if (!ctrl_elem->ctrl_req) {
127 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to alloc ctrl request\n");
128 		goto err;
129 	}
130 
131 	ctrl_req = (uint8_t *)ctrl_elem->ctrl_req;
132 
133 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
134 		desc_len = cvq->desc[desc_idx].len;
135 		desc_iova = cvq->desc[desc_idx].addr;
136 
137 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
138 					desc_iova, &desc_len, VHOST_ACCESS_RO);
139 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
140 			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl indirect descs\n");
141 			goto free_err;
142 		}
143 
144 		desc_idx = 0;
145 	} else {
146 		descs = cvq->desc;
147 	}
148 
149 	while (!(descs[desc_idx].flags & VRING_DESC_F_WRITE)) {
150 		desc_len = descs[desc_idx].len;
151 		desc_iova = descs[desc_idx].addr;
152 
153 		desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len, VHOST_ACCESS_RO);
154 		if (!desc_addr || desc_len < descs[desc_idx].len) {
155 			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl descriptor\n");
156 			goto free_err;
157 		}
158 
159 		memcpy(ctrl_req, (void *)(uintptr_t)desc_addr, desc_len);
160 		ctrl_req += desc_len;
161 
162 		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
163 			break;
164 
165 		desc_idx = descs[desc_idx].next;
166 	}
167 
168 	cvq->last_avail_idx++;
169 	if (cvq->last_avail_idx >= cvq->size)
170 		cvq->last_avail_idx -= cvq->size;
171 
172 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
173 		vhost_avail_event(cvq) = cvq->last_avail_idx;
174 
175 	return 1;
176 
177 free_err:
178 	free(ctrl_elem->ctrl_req);
179 err:
180 	cvq->last_avail_idx++;
181 	if (cvq->last_avail_idx >= cvq->size)
182 		cvq->last_avail_idx -= cvq->size;
183 
184 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
185 		vhost_avail_event(cvq) = cvq->last_avail_idx;
186 
187 	return -1;
188 }
189 
190 static uint8_t
191 virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl *ctrl_req)
192 {
193 	uint8_t ret = VIRTIO_NET_ERR;
194 
195 	if (ctrl_req->class == VIRTIO_NET_CTRL_MQ &&
196 			ctrl_req->command == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
197 		uint16_t queue_pairs;
198 		uint32_t i;
199 
200 		queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
201 		VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl req: MQ %u queue pairs\n", queue_pairs);
202 		ret = VIRTIO_NET_OK;
203 
204 		for (i = 0; i < dev->nr_vring; i++) {
205 			struct vhost_virtqueue *vq = dev->virtqueue[i];
206 			bool enable;
207 
208 			if (vq == dev->cvq)
209 				continue;
210 
211 			if (i < queue_pairs * 2)
212 				enable = true;
213 			else
214 				enable = false;
215 
216 			vq->enabled = enable;
217 			if (dev->notify_ops->vring_state_changed)
218 				dev->notify_ops->vring_state_changed(dev->vid, i, enable);
219 		}
220 	}
221 
222 	return ret;
223 }
224 
225 static int
226 virtio_net_ctrl_push(struct virtio_net *dev, struct virtio_net_ctrl_elem *ctrl_elem)
227 {
228 	struct vhost_virtqueue *cvq = dev->cvq;
229 	struct vring_used_elem *used_elem;
230 
231 	used_elem = &cvq->used->ring[cvq->last_used_idx];
232 	used_elem->id = ctrl_elem->head_idx;
233 	used_elem->len = ctrl_elem->n_descs;
234 
235 	cvq->last_used_idx++;
236 	if (cvq->last_used_idx >= cvq->size)
237 		cvq->last_used_idx -= cvq->size;
238 
239 	__atomic_store_n(&cvq->used->idx, cvq->last_used_idx, __ATOMIC_RELEASE);
240 
241 	vhost_vring_call_split(dev, dev->cvq);
242 
243 	free(ctrl_elem->ctrl_req);
244 
245 	return 0;
246 }
247 
248 int
249 virtio_net_ctrl_handle(struct virtio_net *dev)
250 {
251 	int ret = 0;
252 
253 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
254 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Packed ring not supported yet\n");
255 		return -1;
256 	}
257 
258 	if (!dev->cvq) {
259 		VHOST_LOG_CONFIG(dev->ifname, ERR, "missing control queue\n");
260 		return -1;
261 	}
262 
263 	rte_rwlock_read_lock(&dev->cvq->access_lock);
264 	vhost_user_iotlb_rd_lock(dev->cvq);
265 
266 	while (1) {
267 		struct virtio_net_ctrl_elem ctrl_elem;
268 
269 		memset(&ctrl_elem, 0, sizeof(struct virtio_net_ctrl_elem));
270 
271 		ret = virtio_net_ctrl_pop(dev, dev->cvq, &ctrl_elem);
272 		if (ret <= 0)
273 			break;
274 
275 		*ctrl_elem.desc_ack = virtio_net_ctrl_handle_req(dev, ctrl_elem.ctrl_req);
276 
277 		ret = virtio_net_ctrl_push(dev, &ctrl_elem);
278 		if (ret < 0)
279 			break;
280 	}
281 
282 	vhost_user_iotlb_rd_unlock(dev->cvq);
283 	rte_rwlock_read_unlock(&dev->cvq->access_lock);
284 
285 	return ret;
286 }
287