xref: /dpdk/lib/vhost/virtio_net_ctrl.c (revision d029f35384d0844e9aeb5dbc46fbe1b063d649f7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2023 Red Hat, Inc.
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 
9 #include "iotlb.h"
10 #include "vhost.h"
11 #include "virtio_net_ctrl.h"
12 
13 struct virtio_net_ctrl {
14 	uint8_t class;
15 	uint8_t command;
16 	uint8_t command_data[];
17 };
18 
19 struct virtio_net_ctrl_elem {
20 	struct virtio_net_ctrl *ctrl_req;
21 	uint16_t head_idx;
22 	uint16_t n_descs;
23 	uint8_t *desc_ack;
24 };
25 
26 static int
27 virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
28 		struct virtio_net_ctrl_elem *ctrl_elem)
29 	__rte_shared_locks_required(&cvq->iotlb_lock)
30 {
31 	uint16_t avail_idx, desc_idx, n_descs = 0;
32 	uint64_t desc_len, desc_addr, desc_iova, data_len = 0;
33 	uint8_t *ctrl_req;
34 	struct vring_desc *descs;
35 
36 	avail_idx = rte_atomic_load_explicit((unsigned short __rte_atomic *)&cvq->avail->idx,
37 		rte_memory_order_acquire);
38 	if (avail_idx == cvq->last_avail_idx) {
39 		VHOST_CONFIG_LOG(dev->ifname, DEBUG, "Control queue empty");
40 		return 0;
41 	}
42 
43 	desc_idx = cvq->avail->ring[cvq->last_avail_idx];
44 	if (desc_idx >= cvq->size) {
45 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Out of range desc index, dropping");
46 		goto err;
47 	}
48 
49 	ctrl_elem->head_idx = desc_idx;
50 
51 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
52 		desc_len = cvq->desc[desc_idx].len;
53 		desc_iova = cvq->desc[desc_idx].addr;
54 
55 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
56 					desc_iova, &desc_len, VHOST_ACCESS_RO);
57 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
58 			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl indirect descs");
59 			goto err;
60 		}
61 
62 		desc_idx = 0;
63 	} else {
64 		descs = cvq->desc;
65 	}
66 
67 	while (1) {
68 		desc_len = descs[desc_idx].len;
69 		desc_iova = descs[desc_idx].addr;
70 
71 		n_descs++;
72 
73 		if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
74 			if (ctrl_elem->desc_ack) {
75 				VHOST_CONFIG_LOG(dev->ifname, ERR,
76 						"Unexpected ctrl chain layout");
77 				goto err;
78 			}
79 
80 			if (desc_len != sizeof(uint8_t)) {
81 				VHOST_CONFIG_LOG(dev->ifname, ERR,
82 						"Invalid ack size for ctrl req, dropping");
83 				goto err;
84 			}
85 
86 			ctrl_elem->desc_ack = (uint8_t *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
87 					desc_iova, &desc_len, VHOST_ACCESS_WO);
88 			if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t)) {
89 				VHOST_CONFIG_LOG(dev->ifname, ERR,
90 						"Failed to map ctrl ack descriptor");
91 				goto err;
92 			}
93 		} else {
94 			if (ctrl_elem->desc_ack) {
95 				VHOST_CONFIG_LOG(dev->ifname, ERR,
96 						"Unexpected ctrl chain layout");
97 				goto err;
98 			}
99 
100 			data_len += desc_len;
101 		}
102 
103 		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
104 			break;
105 
106 		desc_idx = descs[desc_idx].next;
107 	}
108 
109 	desc_idx = ctrl_elem->head_idx;
110 
111 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT)
112 		ctrl_elem->n_descs = 1;
113 	else
114 		ctrl_elem->n_descs = n_descs;
115 
116 	if (!ctrl_elem->desc_ack) {
117 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Missing ctrl ack descriptor");
118 		goto err;
119 	}
120 
121 	if (data_len < sizeof(ctrl_elem->ctrl_req->class) + sizeof(ctrl_elem->ctrl_req->command)) {
122 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Invalid control header size");
123 		goto err;
124 	}
125 
126 	ctrl_elem->ctrl_req = malloc(data_len);
127 	if (!ctrl_elem->ctrl_req) {
128 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to alloc ctrl request");
129 		goto err;
130 	}
131 
132 	ctrl_req = (uint8_t *)ctrl_elem->ctrl_req;
133 
134 	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
135 		desc_len = cvq->desc[desc_idx].len;
136 		desc_iova = cvq->desc[desc_idx].addr;
137 
138 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
139 					desc_iova, &desc_len, VHOST_ACCESS_RO);
140 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
141 			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl indirect descs");
142 			goto free_err;
143 		}
144 
145 		desc_idx = 0;
146 	} else {
147 		descs = cvq->desc;
148 	}
149 
150 	while (!(descs[desc_idx].flags & VRING_DESC_F_WRITE)) {
151 		desc_len = descs[desc_idx].len;
152 		desc_iova = descs[desc_idx].addr;
153 
154 		desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len, VHOST_ACCESS_RO);
155 		if (!desc_addr || desc_len < descs[desc_idx].len) {
156 			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl descriptor");
157 			goto free_err;
158 		}
159 
160 		memcpy(ctrl_req, (void *)(uintptr_t)desc_addr, desc_len);
161 		ctrl_req += desc_len;
162 
163 		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
164 			break;
165 
166 		desc_idx = descs[desc_idx].next;
167 	}
168 
169 	cvq->last_avail_idx++;
170 	if (cvq->last_avail_idx >= cvq->size)
171 		cvq->last_avail_idx -= cvq->size;
172 
173 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
174 		vhost_avail_event(cvq) = cvq->last_avail_idx;
175 
176 	return 1;
177 
178 free_err:
179 	free(ctrl_elem->ctrl_req);
180 err:
181 	cvq->last_avail_idx++;
182 	if (cvq->last_avail_idx >= cvq->size)
183 		cvq->last_avail_idx -= cvq->size;
184 
185 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
186 		vhost_avail_event(cvq) = cvq->last_avail_idx;
187 
188 	return -1;
189 }
190 
191 static uint8_t
192 virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl *ctrl_req)
193 {
194 	uint8_t ret = VIRTIO_NET_ERR;
195 
196 	if (ctrl_req->class == VIRTIO_NET_CTRL_MQ &&
197 			ctrl_req->command == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
198 		uint16_t queue_pairs;
199 		uint32_t i;
200 
201 		queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
202 		VHOST_CONFIG_LOG(dev->ifname, INFO, "Ctrl req: MQ %u queue pairs", queue_pairs);
203 		ret = VIRTIO_NET_OK;
204 
205 		for (i = 0; i < dev->nr_vring; i++) {
206 			struct vhost_virtqueue *vq = dev->virtqueue[i];
207 			bool enable;
208 
209 			if (vq == dev->cvq)
210 				continue;
211 
212 			if (i < queue_pairs * 2)
213 				enable = true;
214 			else
215 				enable = false;
216 
217 			vq->enabled = enable;
218 			if (dev->notify_ops->vring_state_changed)
219 				dev->notify_ops->vring_state_changed(dev->vid, i, enable);
220 		}
221 	}
222 
223 	return ret;
224 }
225 
226 static int
227 virtio_net_ctrl_push(struct virtio_net *dev, struct virtio_net_ctrl_elem *ctrl_elem)
228 {
229 	struct vhost_virtqueue *cvq = dev->cvq;
230 	struct vring_used_elem *used_elem;
231 
232 	used_elem = &cvq->used->ring[cvq->last_used_idx];
233 	used_elem->id = ctrl_elem->head_idx;
234 	used_elem->len = ctrl_elem->n_descs;
235 
236 	cvq->last_used_idx++;
237 	if (cvq->last_used_idx >= cvq->size)
238 		cvq->last_used_idx -= cvq->size;
239 
240 	rte_atomic_store_explicit((unsigned short __rte_atomic *)&cvq->used->idx,
241 		cvq->last_used_idx, rte_memory_order_release);
242 
243 	vhost_vring_call_split(dev, dev->cvq);
244 
245 	free(ctrl_elem->ctrl_req);
246 
247 	return 0;
248 }
249 
250 int
251 virtio_net_ctrl_handle(struct virtio_net *dev)
252 {
253 	int ret = 0;
254 
255 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
256 		VHOST_CONFIG_LOG(dev->ifname, ERR, "Packed ring not supported yet");
257 		return -1;
258 	}
259 
260 	if (!dev->cvq) {
261 		VHOST_CONFIG_LOG(dev->ifname, ERR, "missing control queue");
262 		return -1;
263 	}
264 
265 	rte_rwlock_read_lock(&dev->cvq->access_lock);
266 	vhost_user_iotlb_rd_lock(dev->cvq);
267 
268 	while (1) {
269 		struct virtio_net_ctrl_elem ctrl_elem;
270 
271 		memset(&ctrl_elem, 0, sizeof(struct virtio_net_ctrl_elem));
272 
273 		ret = virtio_net_ctrl_pop(dev, dev->cvq, &ctrl_elem);
274 		if (ret <= 0)
275 			break;
276 
277 		*ctrl_elem.desc_ack = virtio_net_ctrl_handle_req(dev, ctrl_elem.ctrl_req);
278 
279 		ret = virtio_net_ctrl_push(dev, &ctrl_elem);
280 		if (ret < 0)
281 			break;
282 	}
283 
284 	vhost_user_iotlb_rd_unlock(dev->cvq);
285 	rte_rwlock_read_unlock(&dev->cvq->access_lock);
286 
287 	return ret;
288 }
289