xref: /dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c (revision 32fbcf3139fbff04651b3fe173e9f3457f105221)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <sched.h>
7 #include <fcntl.h>
8 #include <sys/eventfd.h>
9 
10 #include <rte_malloc.h>
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_atomic.h>
15 #include <rte_common.h>
16 #include <rte_io.h>
17 #include <rte_alarm.h>
18 
19 #include <mlx5_common.h>
20 #include <mlx5_common_os.h>
21 #include <mlx5_common_devx.h>
22 #include <mlx5_glue.h>
23 
24 #include "mlx5_vdpa_utils.h"
25 #include "mlx5_vdpa.h"
26 
27 
28 #define MLX5_VDPA_ERROR_TIME_SEC 3u
29 
30 void
mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv * priv)31 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
32 {
33 	mlx5_devx_uar_release(&priv->uar);
34 #ifdef HAVE_IBV_DEVX_EVENT
35 	if (priv->eventc) {
36 		mlx5_os_devx_destroy_event_channel(priv->eventc);
37 		priv->eventc = NULL;
38 	}
39 #endif
40 }
41 
42 /* Prepare all the global resources for all the event objects.*/
43 int
mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv * priv)44 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
45 {
46 	priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
47 			   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
48 	if (!priv->eventc) {
49 		rte_errno = errno;
50 		DRV_LOG(ERR, "Failed to create event channel %d.",
51 			rte_errno);
52 		goto error;
53 	}
54 	if (mlx5_devx_uar_prepare(priv->cdev, &priv->uar) != 0) {
55 		DRV_LOG(ERR, "Failed to allocate UAR.");
56 		goto error;
57 	}
58 	return 0;
59 error:
60 	mlx5_vdpa_event_qp_global_release(priv);
61 	return -1;
62 }
63 
64 static void
mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq * cq)65 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
66 {
67 	mlx5_devx_cq_destroy(&cq->cq_obj);
68 	memset(cq, 0, sizeof(*cq));
69 }
70 
71 static inline void __rte_unused
mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv * priv,struct mlx5_vdpa_cq * cq)72 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
73 {
74 	uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
75 	uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
76 	uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
77 	uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
78 	uint64_t db_be = rte_cpu_to_be_64(doorbell);
79 
80 	mlx5_doorbell_ring(&priv->uar.cq_db, db_be, doorbell_hi,
81 			   &cq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
82 	cq->arm_sn++;
83 	cq->armed = 1;
84 }
85 
86 static int
mlx5_vdpa_cq_create(struct mlx5_vdpa_priv * priv,uint16_t log_desc_n,int callfd,struct mlx5_vdpa_virtq * virtq)87 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
88 		int callfd, struct mlx5_vdpa_virtq *virtq)
89 {
90 	struct mlx5_devx_cq_attr attr = {
91 		.use_first_only = 1,
92 		.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
93 	};
94 	struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
95 	uint16_t event_nums[1] = {0};
96 	int ret;
97 
98 	ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
99 				  &attr, SOCKET_ID_ANY);
100 	if (ret)
101 		goto error;
102 	cq->cq_ci = 0;
103 	cq->log_desc_n = log_desc_n;
104 	rte_spinlock_init(&cq->sl);
105 	/* Subscribe CQ event to the event channel controlled by the driver. */
106 	ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,
107 							cq->cq_obj.cq->obj,
108 						   sizeof(event_nums),
109 						   event_nums,
110 						   (uint64_t)(uintptr_t)virtq);
111 	if (ret) {
112 		DRV_LOG(ERR, "Failed to subscribe CQE event.");
113 		rte_errno = errno;
114 		goto error;
115 	}
116 	cq->callfd = callfd;
117 	/* Init CQ to ones to be in HW owner in the start. */
118 	cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
119 	cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
120 	/* First arming. */
121 	mlx5_vdpa_cq_arm(priv, cq);
122 	return 0;
123 error:
124 	mlx5_vdpa_cq_destroy(cq);
125 	return -1;
126 }
127 
128 static inline uint32_t
mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq * cq)129 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
130 {
131 	struct mlx5_vdpa_event_qp *eqp =
132 				container_of(cq, struct mlx5_vdpa_event_qp, cq);
133 	const unsigned int cq_size = 1 << cq->log_desc_n;
134 	union {
135 		struct {
136 			uint16_t wqe_counter;
137 			uint8_t rsvd5;
138 			uint8_t op_own;
139 		};
140 		uint32_t word;
141 	} last_word;
142 	uint16_t next_wqe_counter = eqp->qp_pi;
143 	uint16_t cur_wqe_counter;
144 	uint16_t comp;
145 
146 	last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
147 	cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
148 	comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
149 	if (comp) {
150 		cq->cq_ci += comp;
151 		MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
152 			    MLX5_CQE_INVALID);
153 		if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
154 			       MLX5_CQE_RESP_ERR ||
155 			       MLX5_CQE_OPCODE(last_word.op_own) ==
156 			       MLX5_CQE_REQ_ERR)))
157 			cq->errors++;
158 		rte_io_wmb();
159 		/* Ring CQ doorbell record. */
160 		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
161 		eqp->qp_pi += comp;
162 		rte_io_wmb();
163 		/* Ring SW QP doorbell record. */
164 		eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(eqp->qp_pi + cq_size);
165 	}
166 	return comp;
167 }
168 
169 static void
mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv * priv)170 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
171 {
172 	struct mlx5_vdpa_virtq *virtq;
173 	struct mlx5_vdpa_cq *cq;
174 	int i;
175 
176 	for (i = 0; i < priv->nr_virtqs; i++) {
177 		virtq = &priv->virtqs[i];
178 		pthread_mutex_lock(&virtq->virtq_lock);
179 		cq = &priv->virtqs[i].eqp.cq;
180 		if (cq->cq_obj.cq && !cq->armed)
181 			mlx5_vdpa_cq_arm(priv, cq);
182 		pthread_mutex_unlock(&virtq->virtq_lock);
183 	}
184 }
185 
186 static void
mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv * priv,uint32_t max)187 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
188 {
189 	if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
190 		switch (max) {
191 		case 0:
192 			priv->timer_delay_us += priv->event_us;
193 			break;
194 		case 1:
195 			break;
196 		default:
197 			priv->timer_delay_us /= max;
198 			break;
199 		}
200 	}
201 	if (priv->timer_delay_us)
202 		usleep(priv->timer_delay_us);
203 	else
204 		/* Give-up CPU to improve polling threads scheduling. */
205 		sched_yield();
206 }
207 
208 /* Notify virtio device for specific virtq new traffic. */
209 static uint32_t
mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq * cq)210 mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
211 {
212 	uint32_t comp = 0;
213 
214 	if (cq->cq_obj.cq) {
215 		comp = mlx5_vdpa_cq_poll(cq);
216 		if (comp) {
217 			if (cq->callfd != -1)
218 				eventfd_write(cq->callfd, (eventfd_t)1);
219 			cq->armed = 0;
220 		}
221 	}
222 	return comp;
223 }
224 
225 /* Notify virtio device for any virtq new traffic. */
226 static uint32_t
mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv * priv)227 mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
228 {
229 	struct mlx5_vdpa_virtq *virtq;
230 	struct mlx5_vdpa_cq *cq;
231 	uint32_t max = 0;
232 	uint32_t comp;
233 	int i;
234 
235 	for (i = 0; i < priv->nr_virtqs; i++) {
236 		virtq = &priv->virtqs[i];
237 		pthread_mutex_lock(&virtq->virtq_lock);
238 		cq = &virtq->eqp.cq;
239 		comp = mlx5_vdpa_queue_complete(cq);
240 		pthread_mutex_unlock(&virtq->virtq_lock);
241 		if (comp > max)
242 			max = comp;
243 	}
244 	return max;
245 }
246 
247 static void
mlx5_vdpa_drain_cq_one(struct mlx5_vdpa_priv * priv,struct mlx5_vdpa_virtq * virtq)248 mlx5_vdpa_drain_cq_one(struct mlx5_vdpa_priv *priv,
249 	struct mlx5_vdpa_virtq *virtq)
250 {
251 	struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
252 
253 	mlx5_vdpa_queue_complete(cq);
254 	if (cq->cq_obj.cq) {
255 		cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
256 		virtq->eqp.qp_pi = 0;
257 		if (!cq->armed)
258 			mlx5_vdpa_cq_arm(priv, cq);
259 	}
260 }
261 
262 void
mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv * priv)263 mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
264 {
265 	struct mlx5_vdpa_virtq *virtq;
266 	unsigned int i;
267 
268 	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
269 		virtq = &priv->virtqs[i];
270 		mlx5_vdpa_drain_cq_one(priv, virtq);
271 	}
272 }
273 
274 /* Wait on all CQs channel for completion event. */
275 static struct mlx5_vdpa_virtq *
mlx5_vdpa_event_wait(struct mlx5_vdpa_priv * priv __rte_unused)276 mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
277 {
278 #ifdef HAVE_IBV_DEVX_EVENT
279 	union {
280 		struct mlx5dv_devx_async_event_hdr event_resp;
281 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
282 	} out;
283 	int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
284 					    sizeof(out.buf));
285 
286 	if (ret >= 0)
287 		return (struct mlx5_vdpa_virtq *)
288 				(uintptr_t)out.event_resp.cookie;
289 	DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
290 		ret, errno);
291 #endif
292 	return NULL;
293 }
294 
295 static uint32_t
mlx5_vdpa_event_handle(void * arg)296 mlx5_vdpa_event_handle(void *arg)
297 {
298 	struct mlx5_vdpa_priv *priv = arg;
299 	struct mlx5_vdpa_virtq *virtq;
300 	uint32_t max;
301 
302 	switch (priv->event_mode) {
303 	case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
304 	case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
305 		priv->timer_delay_us = priv->event_us;
306 		while (1) {
307 			max = mlx5_vdpa_queues_complete(priv);
308 			if (max == 0 && priv->no_traffic_counter++ >=
309 			    priv->no_traffic_max) {
310 				DRV_LOG(DEBUG, "Device %s traffic was stopped.",
311 					priv->vdev->device->name);
312 				mlx5_vdpa_arm_all_cqs(priv);
313 				do {
314 					virtq = mlx5_vdpa_event_wait(priv);
315 					if (virtq == NULL)
316 						break;
317 					pthread_mutex_lock(
318 						&virtq->virtq_lock);
319 					if (mlx5_vdpa_queue_complete(
320 						&virtq->eqp.cq) > 0) {
321 						pthread_mutex_unlock(
322 							&virtq->virtq_lock);
323 						break;
324 					}
325 					pthread_mutex_unlock(
326 						&virtq->virtq_lock);
327 				} while (1);
328 				priv->timer_delay_us = priv->event_us;
329 				priv->no_traffic_counter = 0;
330 			} else if (max != 0) {
331 				priv->no_traffic_counter = 0;
332 			}
333 			mlx5_vdpa_timer_sleep(priv, max);
334 		}
335 		return 0;
336 	case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
337 		do {
338 			virtq = mlx5_vdpa_event_wait(priv);
339 			if (virtq != NULL) {
340 				pthread_mutex_lock(&virtq->virtq_lock);
341 				if (mlx5_vdpa_queue_complete(
342 					&virtq->eqp.cq) > 0)
343 					mlx5_vdpa_cq_arm(priv, &virtq->eqp.cq);
344 				pthread_mutex_unlock(&virtq->virtq_lock);
345 			}
346 		} while (1);
347 		return 0;
348 	default:
349 		return 0;
350 	}
351 }
352 
353 static void
mlx5_vdpa_err_interrupt_handler(void * cb_arg __rte_unused)354 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
355 {
356 #ifdef HAVE_IBV_DEVX_EVENT
357 	struct mlx5_vdpa_priv *priv = cb_arg;
358 	union {
359 		struct mlx5dv_devx_async_event_hdr event_resp;
360 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
361 	} out;
362 	uint32_t vq_index, i, version;
363 	struct mlx5_vdpa_virtq *virtq;
364 	uint64_t sec;
365 
366 	while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
367 					 sizeof(out.buf)) >=
368 				       (ssize_t)sizeof(out.event_resp.cookie)) {
369 		vq_index = out.event_resp.cookie & UINT32_MAX;
370 		version = out.event_resp.cookie >> 32;
371 		if (vq_index >= priv->nr_virtqs) {
372 			DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
373 				priv->vdev->device->name, vq_index);
374 			continue;
375 		}
376 		virtq = &priv->virtqs[vq_index];
377 		pthread_mutex_lock(&virtq->virtq_lock);
378 		if (!virtq->enable || virtq->version != version)
379 			goto unlock;
380 		if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
381 			goto unlock;
382 		virtq->stopped = 1;
383 		/* Query error info. */
384 		if (mlx5_vdpa_virtq_query(priv, vq_index))
385 			goto log;
386 		/* Disable vq. */
387 		if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
388 			DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
389 			goto log;
390 		}
391 		/* Retry if error happens less than N times in 3 seconds. */
392 		sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
393 		if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
394 			/* Retry. */
395 			if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
396 				DRV_LOG(ERR, "Failed to enable virtq %d.",
397 					vq_index);
398 			else
399 				DRV_LOG(WARNING, "Recover virtq %d: %u.",
400 					vq_index, ++virtq->n_retry);
401 		} else {
402 			/* Retry timeout, give up. */
403 			DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
404 				priv->vdev->device->name, vq_index);
405 		}
406 log:
407 		/* Shift in current time to error time log end. */
408 		for (i = 1; i < RTE_DIM(virtq->err_time); i++)
409 			virtq->err_time[i - 1] = virtq->err_time[i];
410 		virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
411 unlock:
412 		pthread_mutex_unlock(&virtq->virtq_lock);
413 	}
414 #endif
415 }
416 
417 int
mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv * priv)418 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
419 {
420 	int ret;
421 	int flags;
422 
423 	/* Setup device event channel. */
424 	priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
425 							      0);
426 	if (!priv->err_chnl) {
427 		rte_errno = errno;
428 		DRV_LOG(ERR, "Failed to create device event channel %d.",
429 			rte_errno);
430 		goto error;
431 	}
432 	flags = fcntl(priv->err_chnl->fd, F_GETFL);
433 	ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
434 	if (ret) {
435 		rte_errno = errno;
436 		DRV_LOG(ERR, "Failed to change device event channel FD.");
437 		goto error;
438 	}
439 	priv->err_intr_handle =
440 		rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
441 	if (priv->err_intr_handle == NULL) {
442 		DRV_LOG(ERR, "Fail to allocate intr_handle");
443 		goto error;
444 	}
445 	if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
446 		goto error;
447 
448 	if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
449 		goto error;
450 
451 	ret = rte_intr_callback_register(priv->err_intr_handle,
452 					 mlx5_vdpa_err_interrupt_handler,
453 					 priv);
454 	if (ret != 0) {
455 		rte_intr_fd_set(priv->err_intr_handle, 0);
456 		DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
457 			priv->vid);
458 		rte_errno = -ret;
459 		goto error;
460 	} else {
461 		DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
462 			priv->vid);
463 	}
464 	return 0;
465 error:
466 	mlx5_vdpa_err_event_unset(priv);
467 	return -1;
468 }
469 
470 void
mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv * priv)471 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
472 {
473 	int retries = MLX5_VDPA_INTR_RETRIES;
474 	int ret = -EAGAIN;
475 
476 	if (!rte_intr_fd_get(priv->err_intr_handle))
477 		return;
478 	while (retries-- && ret == -EAGAIN) {
479 		ret = rte_intr_callback_unregister(priv->err_intr_handle,
480 					    mlx5_vdpa_err_interrupt_handler,
481 					    priv);
482 		if (ret == -EAGAIN) {
483 			DRV_LOG(DEBUG, "Try again to unregister fd %d "
484 				"of error interrupt, retries = %d.",
485 				rte_intr_fd_get(priv->err_intr_handle),
486 				retries);
487 			rte_pause();
488 		}
489 	}
490 	if (priv->err_chnl) {
491 #ifdef HAVE_IBV_DEVX_EVENT
492 		union {
493 			struct mlx5dv_devx_async_event_hdr event_resp;
494 			uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
495 				    128];
496 		} out;
497 
498 		/* Clean all pending events. */
499 		while (mlx5_glue->devx_get_event(priv->err_chnl,
500 		       &out.event_resp, sizeof(out.buf)) >=
501 		       (ssize_t)sizeof(out.event_resp.cookie))
502 			;
503 #endif
504 		mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
505 		priv->err_chnl = NULL;
506 	}
507 	rte_intr_instance_free(priv->err_intr_handle);
508 }
509 
510 int
mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv * priv)511 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
512 {
513 	int ret;
514 	rte_thread_attr_t attr;
515 	char name[RTE_THREAD_INTERNAL_NAME_SIZE];
516 
517 	if (!priv->eventc)
518 		/* All virtqs are in poll mode. */
519 		return 0;
520 	ret = rte_thread_attr_init(&attr);
521 	if (ret != 0) {
522 		DRV_LOG(ERR, "Failed to initialize thread attributes");
523 		goto out;
524 	}
525 	if (priv->event_core != -1)
526 		CPU_SET(priv->event_core, &attr.cpuset);
527 	else
528 		attr.cpuset = rte_lcore_cpuset(rte_get_main_lcore());
529 	ret = rte_thread_create(&priv->timer_tid,
530 			&attr, mlx5_vdpa_event_handle, priv);
531 	if (ret != 0) {
532 		DRV_LOG(ERR, "Failed to create timer thread.");
533 		goto out;
534 	}
535 	snprintf(name, sizeof(name), "vmlx5-%d", priv->vid);
536 	rte_thread_set_prefixed_name(priv->timer_tid, name);
537 out:
538 	if (ret != 0)
539 		return -1;
540 	return 0;
541 }
542 
543 void
mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv * priv)544 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
545 {
546 	struct mlx5_vdpa_virtq *virtq;
547 	int i;
548 
549 	if (priv->timer_tid.opaque_id != 0) {
550 		pthread_cancel((pthread_t)priv->timer_tid.opaque_id);
551 		rte_thread_join(priv->timer_tid, NULL);
552 		/* The mutex may stay locked after event thread cancel, initiate it. */
553 		for (i = 0; i < priv->nr_virtqs; i++) {
554 			virtq = &priv->virtqs[i];
555 			pthread_mutex_init(&virtq->virtq_lock, NULL);
556 		}
557 	}
558 	priv->timer_tid.opaque_id = 0;
559 }
560 
561 void
mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp * eqp)562 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
563 {
564 	mlx5_devx_qp_destroy(&eqp->sw_qp);
565 	if (eqp->fw_qp)
566 		claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
567 	mlx5_vdpa_cq_destroy(&eqp->cq);
568 	memset(eqp, 0, sizeof(*eqp));
569 }
570 
571 static int
mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp * eqp)572 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
573 {
574 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
575 					  eqp->sw_qp.qp->id)) {
576 		DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
577 			rte_errno);
578 		return -1;
579 	}
580 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
581 			MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
582 		DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
583 			rte_errno);
584 		return -1;
585 	}
586 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
587 					  eqp->sw_qp.qp->id)) {
588 		DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
589 			rte_errno);
590 		return -1;
591 	}
592 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
593 			MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
594 		DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
595 			rte_errno);
596 		return -1;
597 	}
598 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
599 					  eqp->sw_qp.qp->id)) {
600 		DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
601 			rte_errno);
602 		return -1;
603 	}
604 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
605 					  eqp->fw_qp->id)) {
606 		DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
607 			rte_errno);
608 		return -1;
609 	}
610 	return 0;
611 }
612 
613 int
mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp * eqp)614 mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
615 {
616 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,
617 					  eqp->sw_qp.qp->id)) {
618 		DRV_LOG(ERR, "Failed to modify FW QP to RST state(%u).",
619 			rte_errno);
620 		return -1;
621 	}
622 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
623 			MLX5_CMD_OP_QP_2RST, eqp->fw_qp->id)) {
624 		DRV_LOG(ERR, "Failed to modify SW QP to RST state(%u).",
625 			rte_errno);
626 		return -1;
627 	}
628 	return mlx5_vdpa_qps2rts(eqp);
629 }
630 
631 int
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv * priv,uint16_t desc_n,int callfd,struct mlx5_vdpa_virtq * virtq,bool reset)632 mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
633 	int callfd, struct mlx5_vdpa_virtq *virtq, bool reset)
634 {
635 	struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
636 	struct mlx5_devx_qp_attr attr = {0};
637 	uint16_t log_desc_n = rte_log2_u32(desc_n);
638 	uint32_t ret;
639 
640 	if (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) {
641 		/* Reuse existing resources. */
642 		eqp->cq.callfd = callfd;
643 		mlx5_vdpa_drain_cq_one(priv, virtq);
644 		/* FW will set event qp to error state in q destroy. */
645 		if (reset && !mlx5_vdpa_qps2rst2rts(eqp))
646 			rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
647 					&eqp->sw_qp.db_rec[0]);
648 		return 0;
649 	}
650 	if (eqp->fw_qp)
651 		mlx5_vdpa_event_qp_destroy(eqp);
652 	if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, virtq) ||
653 		!eqp->cq.cq_obj.cq)
654 		return -1;
655 	attr.pd = priv->cdev->pdn;
656 	attr.ts_format =
657 		mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
658 	eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
659 	if (!eqp->fw_qp) {
660 		DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
661 		goto error;
662 	}
663 	attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
664 	attr.cqn = eqp->cq.cq_obj.cq->id;
665 	attr.num_of_receive_wqes = RTE_BIT32(log_desc_n);
666 	attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
667 	attr.num_of_send_wqbbs = 0; /* No need SQ. */
668 	attr.ts_format =
669 		mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
670 	ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
671 				  attr.num_of_receive_wqes * MLX5_WSEG_SIZE,
672 				  &attr, SOCKET_ID_ANY);
673 	if (ret) {
674 		DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
675 		goto error;
676 	}
677 	if (mlx5_vdpa_qps2rts(eqp))
678 		goto error;
679 	eqp->qp_pi = 0;
680 	/* First ringing. */
681 	if (eqp->sw_qp.db_rec)
682 		rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
683 			&eqp->sw_qp.db_rec[0]);
684 	return 0;
685 error:
686 	mlx5_vdpa_event_qp_destroy(eqp);
687 	return -1;
688 }
689 
690