xref: /dpdk/lib/vhost/virtio_net_ctrl.c (revision 070db97e017b7ed9a5320b2f624f05562a632bd3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Red Hat, Inc.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 
9 #include "iotlb.h"
10 #include "vhost.h"
11 #include "virtio_net_ctrl.h"
12 
13 struct virtio_net_ctrl {
14 	uint8_t class;
15 	uint8_t command;
16 	uint8_t command_data[];
17 };
18 
19 struct virtio_net_ctrl_elem {
20 	struct virtio_net_ctrl *ctrl_req;
21 	uint16_t head_idx;
22 	uint16_t n_descs;
23 	uint8_t *desc_ack;
24 };
25 
26 static int
27 virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
28 		struct virtio_net_ctrl_elem *ctrl_elem)
29 	__rte_shared_locks_required(&cvq->iotlb_lock)
30 {
31 	uint16_t avail_idx, desc_idx, n_descs = 0;
32 	uint64_t desc_len, desc_addr, desc_iova, data_len = 0;
33 	uint8_t *ctrl_req;
34 	struct vring_desc *descs;
35 
36 	avail_idx = rte_atomic_load_explicit((unsigned short __rte_atomic *)&cvq->avail->idx,
37 		rte_memory_order_acquire);
38 	if (avail_idx == cvq->last_avail_idx) {
39 		VHOST_CONFIG_LOG(dev->ifname, DEBUG, "Control queue empty");
40 		return 0;
41 	}
42 
43 	desc_idx = cvq->avail->ring[cvq->last_avail_idx];
44 	if (desc_idx >= cvq->size) {
45 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Out of range desc index, dropping");
46 		goto err;
47 	}
48 
49 	ctrl_elem->head_idx = desc_idx;
50 
51 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
52 		desc_len = cvq->desc[desc_idx].len;
53 		desc_iova = cvq->desc[desc_idx].addr;
54 
55 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
56 					desc_iova, &desc_len, VHOST_ACCESS_RO);
57 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
58 			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl indirect descs");
59 			goto err;
60 		}
61 
62 		desc_idx = 0;
63 	} else {
64 		descs = cvq->desc;
65 	}
66 
67 	while (1) {
68 		desc_len = descs[desc_idx].len;
69 		desc_iova = descs[desc_idx].addr;
70 
71 		n_descs++;
72 
73 		if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
74 			if (ctrl_elem->desc_ack) {
75 				VHOST_CONFIG_LOG(dev->ifname, ERR,
76 						"Unexpected ctrl chain layout");
77 				goto err;
78 			}
79 
80 			if (desc_len != sizeof(uint8_t)) {
81 				VHOST_CONFIG_LOG(dev->ifname, ERR,
82 						"Invalid ack size for ctrl req, dropping");
83 				goto err;
84 			}
85 
86 			ctrl_elem->desc_ack = (uint8_t *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
87 					desc_iova, &desc_len, VHOST_ACCESS_WO);
88 			if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t)) {
89 				VHOST_CONFIG_LOG(dev->ifname, ERR,
90 						"Failed to map ctrl ack descriptor");
91 				goto err;
92 			}
93 		} else {
94 			if (ctrl_elem->desc_ack) {
95 				VHOST_CONFIG_LOG(dev->ifname, ERR,
96 						"Unexpected ctrl chain layout");
97 				goto err;
98 			}
99 
100 			data_len += desc_len;
101 		}
102 
103 		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
104 			break;
105 
106 		desc_idx = descs[desc_idx].next;
107 	}
108 
109 	desc_idx = ctrl_elem->head_idx;
110 
111 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT)
112 		ctrl_elem->n_descs = 1;
113 	else
114 		ctrl_elem->n_descs = n_descs;
115 
116 	if (!ctrl_elem->desc_ack) {
117 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Missing ctrl ack descriptor");
118 		goto err;
119 	}
120 
121 	if (data_len < sizeof(ctrl_elem->ctrl_req->class) + sizeof(ctrl_elem->ctrl_req->command)) {
122 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Invalid control header size");
123 		goto err;
124 	}
125 
126 	ctrl_elem->ctrl_req = malloc(data_len);
127 	if (!ctrl_elem->ctrl_req) {
128 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to alloc ctrl request");
129 		goto err;
130 	}
131 
132 	ctrl_req = (uint8_t *)ctrl_elem->ctrl_req;
133 
134 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
135 		desc_len = cvq->desc[desc_idx].len;
136 		desc_iova = cvq->desc[desc_idx].addr;
137 
138 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
139 					desc_iova, &desc_len, VHOST_ACCESS_RO);
140 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
141 			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl indirect descs");
142 			goto free_err;
143 		}
144 
145 		desc_idx = 0;
146 	} else {
147 		descs = cvq->desc;
148 	}
149 
150 	while (!(descs[desc_idx].flags & VRING_DESC_F_WRITE)) {
151 		desc_len = descs[desc_idx].len;
152 		desc_iova = descs[desc_idx].addr;
153 
154 		desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len, VHOST_ACCESS_RO);
155 		if (!desc_addr || desc_len < descs[desc_idx].len) {
156 			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl descriptor");
157 			goto free_err;
158 		}
159 
160 		memcpy(ctrl_req, (void *)(uintptr_t)desc_addr, desc_len);
161 		ctrl_req += desc_len;
162 
163 		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
164 			break;
165 
166 		desc_idx = descs[desc_idx].next;
167 	}
168 
169 	cvq->last_avail_idx++;
170 	if (cvq->last_avail_idx >= cvq->size)
171 		cvq->last_avail_idx -= cvq->size;
172 	vhost_virtqueue_reconnect_log_split(cvq);
173 
174 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
175 		vhost_avail_event(cvq) = cvq->last_avail_idx;
176 
177 	return 1;
178 
179 free_err:
180 	free(ctrl_elem->ctrl_req);
181 err:
182 	cvq->last_avail_idx++;
183 	if (cvq->last_avail_idx >= cvq->size)
184 		cvq->last_avail_idx -= cvq->size;
185 	vhost_virtqueue_reconnect_log_split(cvq);
186 
187 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
188 		vhost_avail_event(cvq) = cvq->last_avail_idx;
189 
190 	return -1;
191 }
192 
193 static uint8_t
194 virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl *ctrl_req)
195 {
196 	uint8_t ret = VIRTIO_NET_ERR;
197 
198 	if (ctrl_req->class == VIRTIO_NET_CTRL_MQ &&
199 			ctrl_req->command == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
200 		uint16_t queue_pairs;
201 		uint32_t i;
202 
203 		queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
204 		VHOST_CONFIG_LOG(dev->ifname, INFO, "Ctrl req: MQ %u queue pairs", queue_pairs);
205 		ret = VIRTIO_NET_OK;
206 
207 		for (i = 0; i < dev->nr_vring; i++) {
208 			struct vhost_virtqueue *vq = dev->virtqueue[i];
209 			bool enable;
210 
211 			if (vq == dev->cvq)
212 				continue;
213 
214 			if (i < queue_pairs * 2)
215 				enable = true;
216 			else
217 				enable = false;
218 
219 			vq->enabled = enable;
220 			if (dev->notify_ops->vring_state_changed)
221 				dev->notify_ops->vring_state_changed(dev->vid, i, enable);
222 		}
223 	}
224 
225 	return ret;
226 }
227 
228 static int
229 virtio_net_ctrl_push(struct virtio_net *dev, struct virtio_net_ctrl_elem *ctrl_elem)
230 {
231 	struct vhost_virtqueue *cvq = dev->cvq;
232 	struct vring_used_elem *used_elem;
233 
234 	used_elem = &cvq->used->ring[cvq->last_used_idx];
235 	used_elem->id = ctrl_elem->head_idx;
236 	used_elem->len = ctrl_elem->n_descs;
237 
238 	cvq->last_used_idx++;
239 	if (cvq->last_used_idx >= cvq->size)
240 		cvq->last_used_idx -= cvq->size;
241 
242 	rte_atomic_store_explicit((unsigned short __rte_atomic *)&cvq->used->idx,
243 		cvq->last_used_idx, rte_memory_order_release);
244 
245 	vhost_vring_call_split(dev, dev->cvq);
246 
247 	free(ctrl_elem->ctrl_req);
248 
249 	return 0;
250 }
251 
252 int
253 virtio_net_ctrl_handle(struct virtio_net *dev)
254 {
255 	int ret = 0;
256 
257 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
258 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Packed ring not supported yet");
259 		return -1;
260 	}
261 
262 	if (!dev->cvq) {
263 		VHOST_CONFIG_LOG(dev->ifname, ERR, "missing control queue");
264 		return -1;
265 	}
266 
267 	rte_rwlock_read_lock(&dev->cvq->access_lock);
268 	vhost_user_iotlb_rd_lock(dev->cvq);
269 
270 	while (1) {
271 		struct virtio_net_ctrl_elem ctrl_elem;
272 
273 		memset(&ctrl_elem, 0, sizeof(struct virtio_net_ctrl_elem));
274 
275 		ret = virtio_net_ctrl_pop(dev, dev->cvq, &ctrl_elem);
276 		if (ret <= 0)
277 			break;
278 
279 		*ctrl_elem.desc_ack = virtio_net_ctrl_handle_req(dev, ctrl_elem.ctrl_req);
280 
281 		ret = virtio_net_ctrl_push(dev, &ctrl_elem);
282 		if (ret < 0)
283 			break;
284 	}
285 
286 	vhost_user_iotlb_rd_unlock(dev->cvq);
287 	rte_rwlock_read_unlock(&dev->cvq->access_lock);
288 
289 	return ret;
290 }
291