xref: /dpdk/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <string.h>
5 #include <unistd.h>
6 #include <sys/mman.h>
7 
8 #include <rte_malloc.h>
9 #include <rte_errno.h>
10 #include <rte_io.h>
11 
12 #include <mlx5_common.h>
13 
14 #include "mlx5_vdpa_utils.h"
15 #include "mlx5_vdpa.h"
16 
17 
18 static void
19 mlx5_vdpa_virtq_handler(void *cb_arg)
20 {
21 	struct mlx5_vdpa_virtq *virtq = cb_arg;
22 	struct mlx5_vdpa_priv *priv = virtq->priv;
23 	uint64_t buf;
24 	int nbytes;
25 
26 	do {
27 		nbytes = read(virtq->intr_handle.fd, &buf, 8);
28 		if (nbytes < 0) {
29 			if (errno == EINTR ||
30 			    errno == EWOULDBLOCK ||
31 			    errno == EAGAIN)
32 				continue;
33 			DRV_LOG(ERR,  "Failed to read kickfd of virtq %d: %s",
34 				virtq->index, strerror(errno));
35 		}
36 		break;
37 	} while (1);
38 	rte_write32(virtq->index, priv->virtq_db_addr);
39 	if (virtq->notifier_state == MLX5_VDPA_NOTIFIER_STATE_DISABLED) {
40 		if (rte_vhost_host_notifier_ctrl(priv->vid, virtq->index, true))
41 			virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_ERR;
42 		else
43 			virtq->notifier_state =
44 					       MLX5_VDPA_NOTIFIER_STATE_ENABLED;
45 		DRV_LOG(INFO, "Virtq %u notifier state is %s.", virtq->index,
46 			virtq->notifier_state ==
47 				MLX5_VDPA_NOTIFIER_STATE_ENABLED ? "enabled" :
48 								    "disabled");
49 	}
50 	DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
51 }
52 
53 static int
54 mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
55 {
56 	unsigned int i;
57 	int retries = MLX5_VDPA_INTR_RETRIES;
58 	int ret = -EAGAIN;
59 
60 	if (virtq->intr_handle.fd != -1) {
61 		while (retries-- && ret == -EAGAIN) {
62 			ret = rte_intr_callback_unregister(&virtq->intr_handle,
63 							mlx5_vdpa_virtq_handler,
64 							virtq);
65 			if (ret == -EAGAIN) {
66 				DRV_LOG(DEBUG, "Try again to unregister fd %d "
67 					"of virtq %d interrupt, retries = %d.",
68 					virtq->intr_handle.fd,
69 					(int)virtq->index, retries);
70 				usleep(MLX5_VDPA_INTR_RETRIES_USEC);
71 			}
72 		}
73 		virtq->intr_handle.fd = -1;
74 	}
75 	if (virtq->virtq) {
76 		ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
77 		if (ret)
78 			DRV_LOG(WARNING, "Failed to stop virtq %d.",
79 				virtq->index);
80 		claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
81 	}
82 	virtq->virtq = NULL;
83 	for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
84 		if (virtq->umems[i].obj)
85 			claim_zero(mlx5_glue->devx_umem_dereg
86 							 (virtq->umems[i].obj));
87 		if (virtq->umems[i].buf)
88 			rte_free(virtq->umems[i].buf);
89 	}
90 	memset(&virtq->umems, 0, sizeof(virtq->umems));
91 	if (virtq->eqp.fw_qp)
92 		mlx5_vdpa_event_qp_destroy(&virtq->eqp);
93 	virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
94 	return 0;
95 }
96 
97 void
98 mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
99 {
100 	int i;
101 	struct mlx5_vdpa_virtq *virtq;
102 
103 	for (i = 0; i < priv->nr_virtqs; i++) {
104 		virtq = &priv->virtqs[i];
105 		mlx5_vdpa_virtq_unset(virtq);
106 		if (virtq->counters)
107 			claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
108 	}
109 	for (i = 0; i < priv->num_lag_ports; i++) {
110 		if (priv->tiss[i]) {
111 			claim_zero(mlx5_devx_cmd_destroy(priv->tiss[i]));
112 			priv->tiss[i] = NULL;
113 		}
114 	}
115 	if (priv->td) {
116 		claim_zero(mlx5_devx_cmd_destroy(priv->td));
117 		priv->td = NULL;
118 	}
119 	if (priv->virtq_db_addr) {
120 		claim_zero(munmap(priv->virtq_db_addr, priv->var->length));
121 		priv->virtq_db_addr = NULL;
122 	}
123 	priv->features = 0;
124 	memset(priv->virtqs, 0, sizeof(*virtq) * priv->nr_virtqs);
125 	priv->nr_virtqs = 0;
126 }
127 
128 int
129 mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state)
130 {
131 	struct mlx5_devx_virtq_attr attr = {
132 			.type = MLX5_VIRTQ_MODIFY_TYPE_STATE,
133 			.state = state ? MLX5_VIRTQ_STATE_RDY :
134 					 MLX5_VIRTQ_STATE_SUSPEND,
135 			.queue_index = virtq->index,
136 	};
137 
138 	return mlx5_devx_cmd_modify_virtq(virtq->virtq, &attr);
139 }
140 
141 int
142 mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index)
143 {
144 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
145 	int ret;
146 
147 	if (virtq->stopped)
148 		return 0;
149 	ret = mlx5_vdpa_virtq_modify(virtq, 0);
150 	if (ret)
151 		return -1;
152 	virtq->stopped = true;
153 	DRV_LOG(DEBUG, "vid %u virtq %u was stopped.", priv->vid, index);
154 	return mlx5_vdpa_virtq_query(priv, index);
155 }
156 
157 int
158 mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index)
159 {
160 	struct mlx5_devx_virtq_attr attr = {0};
161 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
162 	int ret;
163 
164 	if (mlx5_devx_cmd_query_virtq(virtq->virtq, &attr)) {
165 		DRV_LOG(ERR, "Failed to query virtq %d.", index);
166 		return -1;
167 	}
168 	DRV_LOG(INFO, "Query vid %d vring %d: hw_available_idx=%d, "
169 		"hw_used_index=%d", priv->vid, index,
170 		attr.hw_available_index, attr.hw_used_index);
171 	ret = rte_vhost_set_vring_base(priv->vid, index,
172 				       attr.hw_available_index,
173 				       attr.hw_used_index);
174 	if (ret) {
175 		DRV_LOG(ERR, "Failed to set virtq %d base.", index);
176 		return -1;
177 	}
178 	if (attr.state == MLX5_VIRTQ_STATE_ERROR)
179 		DRV_LOG(WARNING, "vid %d vring %d hw error=%hhu",
180 			priv->vid, index, attr.error_type);
181 	return 0;
182 }
183 
184 static uint64_t
185 mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
186 {
187 	struct rte_vhost_mem_region *reg;
188 	uint32_t i;
189 	uint64_t gpa = 0;
190 
191 	for (i = 0; i < mem->nregions; i++) {
192 		reg = &mem->regions[i];
193 		if (hva >= reg->host_user_addr &&
194 		    hva < reg->host_user_addr + reg->size) {
195 			gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
196 			break;
197 		}
198 	}
199 	return gpa;
200 }
201 
202 static int
203 mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
204 {
205 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
206 	struct rte_vhost_vring vq;
207 	struct mlx5_devx_virtq_attr attr = {0};
208 	uint64_t gpa;
209 	int ret;
210 	unsigned int i;
211 	uint16_t last_avail_idx;
212 	uint16_t last_used_idx;
213 	uint16_t event_num = MLX5_EVENT_TYPE_OBJECT_CHANGE;
214 	uint64_t cookie;
215 
216 	ret = rte_vhost_get_vhost_vring(priv->vid, index, &vq);
217 	if (ret)
218 		return -1;
219 	virtq->index = index;
220 	virtq->vq_size = vq.size;
221 	attr.tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
222 	attr.tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
223 	attr.tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
224 	attr.rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
225 	attr.virtio_version_1_0 = !!(priv->features & (1ULL <<
226 							VIRTIO_F_VERSION_1));
227 	attr.type = (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
228 			MLX5_VIRTQ_TYPE_PACKED : MLX5_VIRTQ_TYPE_SPLIT;
229 	/*
230 	 * No need event QPs creation when the guest in poll mode or when the
231 	 * capability allows it.
232 	 */
233 	attr.event_mode = vq.callfd != -1 || !(priv->caps.event_mode & (1 <<
234 					       MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
235 						      MLX5_VIRTQ_EVENT_MODE_QP :
236 						  MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
237 	if (attr.event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
238 		ret = mlx5_vdpa_event_qp_create(priv, vq.size, vq.callfd,
239 						&virtq->eqp);
240 		if (ret) {
241 			DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
242 				index);
243 			return -1;
244 		}
245 		attr.qp_id = virtq->eqp.fw_qp->id;
246 	} else {
247 		DRV_LOG(INFO, "Virtq %d is, for sure, working by poll mode, no"
248 			" need event QPs and event mechanism.", index);
249 	}
250 	if (priv->caps.queue_counters_valid) {
251 		if (!virtq->counters)
252 			virtq->counters = mlx5_devx_cmd_create_virtio_q_counters
253 								(priv->ctx);
254 		if (!virtq->counters) {
255 			DRV_LOG(ERR, "Failed to create virtq couners for virtq"
256 				" %d.", index);
257 			goto error;
258 		}
259 		attr.counters_obj_id = virtq->counters->id;
260 	}
261 	/* Setup 3 UMEMs for each virtq. */
262 	for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
263 		virtq->umems[i].size = priv->caps.umems[i].a * vq.size +
264 							  priv->caps.umems[i].b;
265 		virtq->umems[i].buf = rte_zmalloc(__func__,
266 						  virtq->umems[i].size, 4096);
267 		if (!virtq->umems[i].buf) {
268 			DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
269 				" %u.", i, index);
270 			goto error;
271 		}
272 		virtq->umems[i].obj = mlx5_glue->devx_umem_reg(priv->ctx,
273 							virtq->umems[i].buf,
274 							virtq->umems[i].size,
275 							IBV_ACCESS_LOCAL_WRITE);
276 		if (!virtq->umems[i].obj) {
277 			DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
278 				i, index);
279 			goto error;
280 		}
281 		attr.umems[i].id = virtq->umems[i].obj->umem_id;
282 		attr.umems[i].offset = 0;
283 		attr.umems[i].size = virtq->umems[i].size;
284 	}
285 	if (attr.type == MLX5_VIRTQ_TYPE_SPLIT) {
286 		gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
287 					   (uint64_t)(uintptr_t)vq.desc);
288 		if (!gpa) {
289 			DRV_LOG(ERR, "Failed to get descriptor ring GPA.");
290 			goto error;
291 		}
292 		attr.desc_addr = gpa;
293 		gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
294 					   (uint64_t)(uintptr_t)vq.used);
295 		if (!gpa) {
296 			DRV_LOG(ERR, "Failed to get GPA for used ring.");
297 			goto error;
298 		}
299 		attr.used_addr = gpa;
300 		gpa = mlx5_vdpa_hva_to_gpa(priv->vmem,
301 					   (uint64_t)(uintptr_t)vq.avail);
302 		if (!gpa) {
303 			DRV_LOG(ERR, "Failed to get GPA for available ring.");
304 			goto error;
305 		}
306 		attr.available_addr = gpa;
307 	}
308 	ret = rte_vhost_get_vring_base(priv->vid, index, &last_avail_idx,
309 				 &last_used_idx);
310 	if (ret) {
311 		last_avail_idx = 0;
312 		last_used_idx = 0;
313 		DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0");
314 	} else {
315 		DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
316 				"virtq %d.", priv->vid, last_avail_idx,
317 				last_used_idx, index);
318 	}
319 	attr.hw_available_index = last_avail_idx;
320 	attr.hw_used_index = last_used_idx;
321 	attr.q_size = vq.size;
322 	attr.mkey = priv->gpa_mkey_index;
323 	attr.tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
324 	attr.queue_index = index;
325 	attr.pd = priv->pdn;
326 	attr.hw_latency_mode = priv->hw_latency_mode;
327 	attr.hw_max_latency_us = priv->hw_max_latency_us;
328 	attr.hw_max_pending_comp = priv->hw_max_pending_comp;
329 	virtq->virtq = mlx5_devx_cmd_create_virtq(priv->ctx, &attr);
330 	virtq->priv = priv;
331 	if (!virtq->virtq)
332 		goto error;
333 	claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
334 	if (mlx5_vdpa_virtq_modify(virtq, 1))
335 		goto error;
336 	virtq->priv = priv;
337 	rte_write32(virtq->index, priv->virtq_db_addr);
338 	/* Setup doorbell mapping. */
339 	virtq->intr_handle.fd = vq.kickfd;
340 	if (virtq->intr_handle.fd == -1) {
341 		DRV_LOG(WARNING, "Virtq %d kickfd is invalid.", index);
342 	} else {
343 		virtq->intr_handle.type = RTE_INTR_HANDLE_EXT;
344 		if (rte_intr_callback_register(&virtq->intr_handle,
345 					       mlx5_vdpa_virtq_handler,
346 					       virtq)) {
347 			virtq->intr_handle.fd = -1;
348 			DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
349 				index);
350 			goto error;
351 		} else {
352 			DRV_LOG(DEBUG, "Register fd %d interrupt for virtq %d.",
353 				virtq->intr_handle.fd, index);
354 		}
355 	}
356 	/* Subscribe virtq error event. */
357 	virtq->version++;
358 	cookie = ((uint64_t)virtq->version << 32) + index;
359 	ret = mlx5_glue->devx_subscribe_devx_event(priv->err_chnl,
360 						   virtq->virtq->obj,
361 						   sizeof(event_num),
362 						   &event_num, cookie);
363 	if (ret) {
364 		DRV_LOG(ERR, "Failed to subscribe device %d virtq %d error event.",
365 			priv->vid, index);
366 		rte_errno = errno;
367 		goto error;
368 	}
369 	virtq->stopped = false;
370 	DRV_LOG(DEBUG, "vid %u virtq %u was created successfully.", priv->vid,
371 		index);
372 	return 0;
373 error:
374 	mlx5_vdpa_virtq_unset(virtq);
375 	return -1;
376 }
377 
378 static int
379 mlx5_vdpa_features_validate(struct mlx5_vdpa_priv *priv)
380 {
381 	if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
382 		if (!(priv->caps.virtio_queue_type & (1 <<
383 						     MLX5_VIRTQ_TYPE_PACKED))) {
384 			DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
385 				"%d - it was not reported by HW/driver"
386 				" capability.", priv->vid);
387 			return -ENOTSUP;
388 		}
389 	}
390 	if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4)) {
391 		if (!priv->caps.tso_ipv4) {
392 			DRV_LOG(ERR, "Failed to enable TSO4 for vdev %d - TSO4"
393 				" was not reported by HW/driver capability.",
394 				priv->vid);
395 			return -ENOTSUP;
396 		}
397 	}
398 	if (priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6)) {
399 		if (!priv->caps.tso_ipv6) {
400 			DRV_LOG(ERR, "Failed to enable TSO6 for vdev %d - TSO6"
401 				" was not reported by HW/driver capability.",
402 				priv->vid);
403 			return -ENOTSUP;
404 		}
405 	}
406 	if (priv->features & (1ULL << VIRTIO_NET_F_CSUM)) {
407 		if (!priv->caps.tx_csum) {
408 			DRV_LOG(ERR, "Failed to enable CSUM for vdev %d - CSUM"
409 				" was not reported by HW/driver capability.",
410 				priv->vid);
411 			return -ENOTSUP;
412 		}
413 	}
414 	if (priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
415 		if (!priv->caps.rx_csum) {
416 			DRV_LOG(ERR, "Failed to enable GUEST CSUM for vdev %d"
417 				" GUEST CSUM was not reported by HW/driver "
418 				"capability.", priv->vid);
419 			return -ENOTSUP;
420 		}
421 	}
422 	if (priv->features & (1ULL << VIRTIO_F_VERSION_1)) {
423 		if (!priv->caps.virtio_version_1_0) {
424 			DRV_LOG(ERR, "Failed to enable version 1 for vdev %d "
425 				"version 1 was not reported by HW/driver"
426 				" capability.", priv->vid);
427 			return -ENOTSUP;
428 		}
429 	}
430 	return 0;
431 }
432 
433 int
434 mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
435 {
436 	struct mlx5_devx_tis_attr tis_attr = {0};
437 	uint32_t i;
438 	uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
439 	int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
440 
441 	if (ret || mlx5_vdpa_features_validate(priv)) {
442 		DRV_LOG(ERR, "Failed to configure negotiated features.");
443 		return -1;
444 	}
445 	if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
446 		DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
447 			(int)priv->caps.max_num_virtio_queues * 2,
448 			(int)nr_vring);
449 		return -1;
450 	}
451 	/* Always map the entire page. */
452 	priv->virtq_db_addr = mmap(NULL, priv->var->length, PROT_READ |
453 				   PROT_WRITE, MAP_SHARED, priv->ctx->cmd_fd,
454 				   priv->var->mmap_off);
455 	if (priv->virtq_db_addr == MAP_FAILED) {
456 		DRV_LOG(ERR, "Failed to map doorbell page %u.", errno);
457 		priv->virtq_db_addr = NULL;
458 		goto error;
459 	} else {
460 		DRV_LOG(DEBUG, "VAR address of doorbell mapping is %p.",
461 			priv->virtq_db_addr);
462 	}
463 	priv->td = mlx5_devx_cmd_create_td(priv->ctx);
464 	if (!priv->td) {
465 		DRV_LOG(ERR, "Failed to create transport domain.");
466 		return -rte_errno;
467 	}
468 	tis_attr.transport_domain = priv->td->id;
469 	for (i = 0; i < priv->num_lag_ports; i++) {
470 		/* 0 is auto affinity, non-zero value to propose port. */
471 		tis_attr.lag_tx_port_affinity = i + 1;
472 		priv->tiss[i] = mlx5_devx_cmd_create_tis(priv->ctx, &tis_attr);
473 		if (!priv->tiss[i]) {
474 			DRV_LOG(ERR, "Failed to create TIS %u.", i);
475 			goto error;
476 		}
477 	}
478 	priv->nr_virtqs = nr_vring;
479 	for (i = 0; i < nr_vring; i++)
480 		if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
481 			goto error;
482 	return 0;
483 error:
484 	mlx5_vdpa_virtqs_release(priv);
485 	return -1;
486 }
487 
488 static int
489 mlx5_vdpa_virtq_is_modified(struct mlx5_vdpa_priv *priv,
490 			    struct mlx5_vdpa_virtq *virtq)
491 {
492 	struct rte_vhost_vring vq;
493 	int ret = rte_vhost_get_vhost_vring(priv->vid, virtq->index, &vq);
494 
495 	if (ret)
496 		return -1;
497 	if (vq.size != virtq->vq_size || vq.kickfd != virtq->intr_handle.fd)
498 		return 1;
499 	if (virtq->eqp.cq.cq_obj.cq) {
500 		if (vq.callfd != virtq->eqp.cq.callfd)
501 			return 1;
502 	} else if (vq.callfd != -1) {
503 		return 1;
504 	}
505 	return 0;
506 }
507 
508 int
509 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
510 {
511 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
512 	int ret;
513 
514 	DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
515 		virtq->enable ? "en" : "dis", enable ? "en" : "dis");
516 	if (!priv->configured) {
517 		virtq->enable = !!enable;
518 		return 0;
519 	}
520 	if (virtq->enable == !!enable) {
521 		if (!enable)
522 			return 0;
523 		ret = mlx5_vdpa_virtq_is_modified(priv, virtq);
524 		if (ret < 0) {
525 			DRV_LOG(ERR, "Virtq %d modify check failed.", index);
526 			return -1;
527 		}
528 		if (ret == 0)
529 			return 0;
530 		DRV_LOG(INFO, "Virtq %d was modified, recreate it.", index);
531 	}
532 	if (virtq->virtq) {
533 		virtq->enable = 0;
534 		if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
535 			ret = mlx5_vdpa_steer_update(priv);
536 			if (ret)
537 				DRV_LOG(WARNING, "Failed to disable steering "
538 					"for virtq %d.", index);
539 		}
540 		mlx5_vdpa_virtq_unset(virtq);
541 	}
542 	if (enable) {
543 		ret = mlx5_vdpa_virtq_setup(priv, index);
544 		if (ret) {
545 			DRV_LOG(ERR, "Failed to setup virtq %d.", index);
546 			return ret;
547 		}
548 		virtq->enable = 1;
549 		if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
550 			ret = mlx5_vdpa_steer_update(priv);
551 			if (ret)
552 				DRV_LOG(WARNING, "Failed to enable steering "
553 					"for virtq %d.", index);
554 		}
555 	}
556 	return 0;
557 }
558 
559 int
560 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
561 			  struct rte_vdpa_stat *stats, unsigned int n)
562 {
563 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
564 	struct mlx5_devx_virtio_q_couners_attr attr = {0};
565 	int ret;
566 
567 	if (!virtq->counters) {
568 		DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
569 			"is invalid.", qid);
570 		return -EINVAL;
571 	}
572 	ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
573 	if (ret) {
574 		DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
575 		return ret;
576 	}
577 	ret = (int)RTE_MIN(n, (unsigned int)MLX5_VDPA_STATS_MAX);
578 	if (ret == MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS)
579 		return ret;
580 	stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
581 		.id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
582 		.value = attr.received_desc - virtq->reset.received_desc,
583 	};
584 	if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
585 		return ret;
586 	stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
587 		.id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
588 		.value = attr.completed_desc - virtq->reset.completed_desc,
589 	};
590 	if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
591 		return ret;
592 	stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
593 		.id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
594 		.value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
595 	};
596 	if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
597 		return ret;
598 	stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
599 		.id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
600 		.value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
601 	};
602 	if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
603 		return ret;
604 	stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
605 		.id = MLX5_VDPA_STATS_INVALID_BUFFER,
606 		.value = attr.invalid_buffer - virtq->reset.invalid_buffer,
607 	};
608 	if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
609 		return ret;
610 	stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
611 		.id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
612 		.value = attr.error_cqes - virtq->reset.error_cqes,
613 	};
614 	return ret;
615 }
616 
617 int
618 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
619 {
620 	struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
621 	int ret;
622 
623 	if (!virtq->counters) {
624 		DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
625 			"is invalid.", qid);
626 		return -EINVAL;
627 	}
628 	ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
629 						    &virtq->reset);
630 	if (ret)
631 		DRV_LOG(ERR, "Failed to read virtq %d reset stats from HW.",
632 			qid);
633 	return ret;
634 }
635