xref: /dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c (revision 32fbcf3139fbff04651b3fe173e9f3457f105221)
18395927cSMatan Azrad /* SPDX-License-Identifier: BSD-3-Clause
28395927cSMatan Azrad  * Copyright 2019 Mellanox Technologies, Ltd
38395927cSMatan Azrad  */
48395927cSMatan Azrad #include <unistd.h>
58395927cSMatan Azrad #include <stdint.h>
641b5a7a8SThomas Monjalon #include <sched.h>
78395927cSMatan Azrad #include <fcntl.h>
8d76a17f7SMatan Azrad #include <sys/eventfd.h>
98395927cSMatan Azrad 
108395927cSMatan Azrad #include <rte_malloc.h>
110e41abd1SMichael Baum #include <rte_memory.h>
128395927cSMatan Azrad #include <rte_errno.h>
138395927cSMatan Azrad #include <rte_lcore.h>
148395927cSMatan Azrad #include <rte_atomic.h>
158395927cSMatan Azrad #include <rte_common.h>
168395927cSMatan Azrad #include <rte_io.h>
17a9dd7275SMatan Azrad #include <rte_alarm.h>
188395927cSMatan Azrad 
198395927cSMatan Azrad #include <mlx5_common.h>
2098174626STal Shnaiderman #include <mlx5_common_os.h>
210e41abd1SMichael Baum #include <mlx5_common_devx.h>
220474419bSXueming Li #include <mlx5_glue.h>
238395927cSMatan Azrad 
248395927cSMatan Azrad #include "mlx5_vdpa_utils.h"
258395927cSMatan Azrad #include "mlx5_vdpa.h"
268395927cSMatan Azrad 
278395927cSMatan Azrad 
280474419bSXueming Li #define MLX5_VDPA_ERROR_TIME_SEC 3u
290474419bSXueming Li 
308395927cSMatan Azrad void
mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv * priv)318395927cSMatan Azrad mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
328395927cSMatan Azrad {
335dfa003dSMichael Baum 	mlx5_devx_uar_release(&priv->uar);
34a9dd7275SMatan Azrad #ifdef HAVE_IBV_DEVX_EVENT
358395927cSMatan Azrad 	if (priv->eventc) {
3698174626STal Shnaiderman 		mlx5_os_devx_destroy_event_channel(priv->eventc);
378395927cSMatan Azrad 		priv->eventc = NULL;
388395927cSMatan Azrad 	}
39a9dd7275SMatan Azrad #endif
408395927cSMatan Azrad }
418395927cSMatan Azrad 
428395927cSMatan Azrad /* Prepare all the global resources for all the event objects.*/
435fe068bfSXueming Li int
mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv * priv)448395927cSMatan Azrad mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
458395927cSMatan Azrad {
46662d0dc6SMichael Baum 	priv->eventc = mlx5_os_devx_create_event_channel(priv->cdev->ctx,
478395927cSMatan Azrad 			   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
488395927cSMatan Azrad 	if (!priv->eventc) {
498395927cSMatan Azrad 		rte_errno = errno;
508395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to create event channel %d.",
518395927cSMatan Azrad 			rte_errno);
528395927cSMatan Azrad 		goto error;
538395927cSMatan Azrad 	}
545dfa003dSMichael Baum 	if (mlx5_devx_uar_prepare(priv->cdev, &priv->uar) != 0) {
558395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to allocate UAR.");
568395927cSMatan Azrad 		goto error;
578395927cSMatan Azrad 	}
588395927cSMatan Azrad 	return 0;
598395927cSMatan Azrad error:
608395927cSMatan Azrad 	mlx5_vdpa_event_qp_global_release(priv);
618395927cSMatan Azrad 	return -1;
628395927cSMatan Azrad }
638395927cSMatan Azrad 
648395927cSMatan Azrad static void
mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq * cq)658395927cSMatan Azrad mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
668395927cSMatan Azrad {
670e41abd1SMichael Baum 	mlx5_devx_cq_destroy(&cq->cq_obj);
688395927cSMatan Azrad 	memset(cq, 0, sizeof(*cq));
698395927cSMatan Azrad }
708395927cSMatan Azrad 
71a9dd7275SMatan Azrad static inline void __rte_unused
mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv * priv,struct mlx5_vdpa_cq * cq)728395927cSMatan Azrad mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
738395927cSMatan Azrad {
748395927cSMatan Azrad 	uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
7530b69744SMatan Azrad 	uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
768395927cSMatan Azrad 	uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
770e41abd1SMichael Baum 	uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq_obj.cq->id;
788395927cSMatan Azrad 	uint64_t db_be = rte_cpu_to_be_64(doorbell);
798395927cSMatan Azrad 
805dfa003dSMichael Baum 	mlx5_doorbell_ring(&priv->uar.cq_db, db_be, doorbell_hi,
815dfa003dSMichael Baum 			   &cq->cq_obj.db_rec[MLX5_CQ_ARM_DB], 0);
828395927cSMatan Azrad 	cq->arm_sn++;
83a9dd7275SMatan Azrad 	cq->armed = 1;
848395927cSMatan Azrad }
858395927cSMatan Azrad 
868395927cSMatan Azrad static int
mlx5_vdpa_cq_create(struct mlx5_vdpa_priv * priv,uint16_t log_desc_n,int callfd,struct mlx5_vdpa_virtq * virtq)878395927cSMatan Azrad mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
88057f7d20SLi Zhang 		int callfd, struct mlx5_vdpa_virtq *virtq)
898395927cSMatan Azrad {
900e41abd1SMichael Baum 	struct mlx5_devx_cq_attr attr = {
910e41abd1SMichael Baum 		.use_first_only = 1,
925dfa003dSMichael Baum 		.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
930e41abd1SMichael Baum 	};
94057f7d20SLi Zhang 	struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
958395927cSMatan Azrad 	uint16_t event_nums[1] = {0};
96c5f714e5SMatan Azrad 	int ret;
978395927cSMatan Azrad 
98662d0dc6SMichael Baum 	ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, log_desc_n,
99662d0dc6SMichael Baum 				  &attr, SOCKET_ID_ANY);
1000e41abd1SMichael Baum 	if (ret)
1018395927cSMatan Azrad 		goto error;
1028395927cSMatan Azrad 	cq->cq_ci = 0;
1030e41abd1SMichael Baum 	cq->log_desc_n = log_desc_n;
1048395927cSMatan Azrad 	rte_spinlock_init(&cq->sl);
1058395927cSMatan Azrad 	/* Subscribe CQ event to the event channel controlled by the driver. */
106057f7d20SLi Zhang 	ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,
1070e41abd1SMichael Baum 							cq->cq_obj.cq->obj,
108057f7d20SLi Zhang 						   sizeof(event_nums),
109057f7d20SLi Zhang 						   event_nums,
110057f7d20SLi Zhang 						   (uint64_t)(uintptr_t)virtq);
1118395927cSMatan Azrad 	if (ret) {
1128395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to subscribe CQE event.");
1138395927cSMatan Azrad 		rte_errno = errno;
1148395927cSMatan Azrad 		goto error;
1158395927cSMatan Azrad 	}
116d76a17f7SMatan Azrad 	cq->callfd = callfd;
117d76a17f7SMatan Azrad 	/* Init CQ to ones to be in HW owner in the start. */
1180e41abd1SMichael Baum 	cq->cq_obj.cqes[0].op_own = MLX5_CQE_OWNER_MASK;
1190e41abd1SMichael Baum 	cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
1208395927cSMatan Azrad 	/* First arming. */
1218395927cSMatan Azrad 	mlx5_vdpa_cq_arm(priv, cq);
1228395927cSMatan Azrad 	return 0;
1238395927cSMatan Azrad error:
1248395927cSMatan Azrad 	mlx5_vdpa_cq_destroy(cq);
1258395927cSMatan Azrad 	return -1;
1268395927cSMatan Azrad }
1278395927cSMatan Azrad 
128a9dd7275SMatan Azrad static inline uint32_t
mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq * cq)129a9dd7275SMatan Azrad mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
1308395927cSMatan Azrad {
1318395927cSMatan Azrad 	struct mlx5_vdpa_event_qp *eqp =
1328395927cSMatan Azrad 				container_of(cq, struct mlx5_vdpa_event_qp, cq);
13330b69744SMatan Azrad 	const unsigned int cq_size = 1 << cq->log_desc_n;
134c5f714e5SMatan Azrad 	union {
135c5f714e5SMatan Azrad 		struct {
136c5f714e5SMatan Azrad 			uint16_t wqe_counter;
137c5f714e5SMatan Azrad 			uint8_t rsvd5;
138c5f714e5SMatan Azrad 			uint8_t op_own;
139c5f714e5SMatan Azrad 		};
140c5f714e5SMatan Azrad 		uint32_t word;
141c5f714e5SMatan Azrad 	} last_word;
14224969c7bSYajun Wu 	uint16_t next_wqe_counter = eqp->qp_pi;
143c5f714e5SMatan Azrad 	uint16_t cur_wqe_counter;
144c5f714e5SMatan Azrad 	uint16_t comp;
1458395927cSMatan Azrad 
1460e41abd1SMichael Baum 	last_word.word = rte_read32(&cq->cq_obj.cqes[0].wqe_counter);
147c5f714e5SMatan Azrad 	cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
1484fb86eb5SMatan Azrad 	comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
149c5f714e5SMatan Azrad 	if (comp) {
150c5f714e5SMatan Azrad 		cq->cq_ci += comp;
151c5f714e5SMatan Azrad 		MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
152c5f714e5SMatan Azrad 			    MLX5_CQE_INVALID);
153c5f714e5SMatan Azrad 		if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
154c5f714e5SMatan Azrad 			       MLX5_CQE_RESP_ERR ||
155c5f714e5SMatan Azrad 			       MLX5_CQE_OPCODE(last_word.op_own) ==
156c5f714e5SMatan Azrad 			       MLX5_CQE_REQ_ERR)))
1578395927cSMatan Azrad 			cq->errors++;
1588395927cSMatan Azrad 		rte_io_wmb();
1598395927cSMatan Azrad 		/* Ring CQ doorbell record. */
1600e41abd1SMichael Baum 		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
16124969c7bSYajun Wu 		eqp->qp_pi += comp;
1628395927cSMatan Azrad 		rte_io_wmb();
1638395927cSMatan Azrad 		/* Ring SW QP doorbell record. */
16424969c7bSYajun Wu 		eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(eqp->qp_pi + cq_size);
165c5f714e5SMatan Azrad 	}
166c5f714e5SMatan Azrad 	return comp;
167a9dd7275SMatan Azrad }
168a9dd7275SMatan Azrad 
169a9dd7275SMatan Azrad static void
mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv * priv)170a9dd7275SMatan Azrad mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
171a9dd7275SMatan Azrad {
172057f7d20SLi Zhang 	struct mlx5_vdpa_virtq *virtq;
173a9dd7275SMatan Azrad 	struct mlx5_vdpa_cq *cq;
174a9dd7275SMatan Azrad 	int i;
175a9dd7275SMatan Azrad 
176a9dd7275SMatan Azrad 	for (i = 0; i < priv->nr_virtqs; i++) {
177057f7d20SLi Zhang 		virtq = &priv->virtqs[i];
178057f7d20SLi Zhang 		pthread_mutex_lock(&virtq->virtq_lock);
179a9dd7275SMatan Azrad 		cq = &priv->virtqs[i].eqp.cq;
1800e41abd1SMichael Baum 		if (cq->cq_obj.cq && !cq->armed)
181a9dd7275SMatan Azrad 			mlx5_vdpa_cq_arm(priv, cq);
182057f7d20SLi Zhang 		pthread_mutex_unlock(&virtq->virtq_lock);
183a9dd7275SMatan Azrad 	}
184a9dd7275SMatan Azrad }
185a9dd7275SMatan Azrad 
186edc6391eSMatan Azrad static void
mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv * priv,uint32_t max)187edc6391eSMatan Azrad mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
188edc6391eSMatan Azrad {
189edc6391eSMatan Azrad 	if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
190edc6391eSMatan Azrad 		switch (max) {
191edc6391eSMatan Azrad 		case 0:
192edc6391eSMatan Azrad 			priv->timer_delay_us += priv->event_us;
193edc6391eSMatan Azrad 			break;
194edc6391eSMatan Azrad 		case 1:
195edc6391eSMatan Azrad 			break;
196edc6391eSMatan Azrad 		default:
197edc6391eSMatan Azrad 			priv->timer_delay_us /= max;
198edc6391eSMatan Azrad 			break;
199edc6391eSMatan Azrad 		}
200edc6391eSMatan Azrad 	}
201c9a189f4SXueming Li 	if (priv->timer_delay_us)
202edc6391eSMatan Azrad 		usleep(priv->timer_delay_us);
203b7fa0bf4SMatan Azrad 	else
204b7fa0bf4SMatan Azrad 		/* Give-up CPU to improve polling threads scheduling. */
20541b5a7a8SThomas Monjalon 		sched_yield();
206edc6391eSMatan Azrad }
207edc6391eSMatan Azrad 
20899f9d799SMatan Azrad /* Notify virtio device for specific virtq new traffic. */
20999f9d799SMatan Azrad static uint32_t
mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq * cq)21099f9d799SMatan Azrad mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
211a9dd7275SMatan Azrad {
21299f9d799SMatan Azrad 	uint32_t comp = 0;
213a9dd7275SMatan Azrad 
21499f9d799SMatan Azrad 	if (cq->cq_obj.cq) {
21599f9d799SMatan Azrad 		comp = mlx5_vdpa_cq_poll(cq);
216a9dd7275SMatan Azrad 		if (comp) {
217a9dd7275SMatan Azrad 			if (cq->callfd != -1)
21899f9d799SMatan Azrad 				eventfd_write(cq->callfd, (eventfd_t)1);
21999f9d799SMatan Azrad 			cq->armed = 0;
22099f9d799SMatan Azrad 		}
22199f9d799SMatan Azrad 	}
22299f9d799SMatan Azrad 	return comp;
22399f9d799SMatan Azrad }
22499f9d799SMatan Azrad 
22599f9d799SMatan Azrad /* Notify virtio device for any virtq new traffic. */
22699f9d799SMatan Azrad static uint32_t
mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv * priv)22799f9d799SMatan Azrad mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
22899f9d799SMatan Azrad {
229057f7d20SLi Zhang 	struct mlx5_vdpa_virtq *virtq;
230057f7d20SLi Zhang 	struct mlx5_vdpa_cq *cq;
23199f9d799SMatan Azrad 	uint32_t max = 0;
232057f7d20SLi Zhang 	uint32_t comp;
233057f7d20SLi Zhang 	int i;
23499f9d799SMatan Azrad 
23599f9d799SMatan Azrad 	for (i = 0; i < priv->nr_virtqs; i++) {
236057f7d20SLi Zhang 		virtq = &priv->virtqs[i];
237057f7d20SLi Zhang 		pthread_mutex_lock(&virtq->virtq_lock);
238057f7d20SLi Zhang 		cq = &virtq->eqp.cq;
239057f7d20SLi Zhang 		comp = mlx5_vdpa_queue_complete(cq);
240057f7d20SLi Zhang 		pthread_mutex_unlock(&virtq->virtq_lock);
241edc6391eSMatan Azrad 		if (comp > max)
242edc6391eSMatan Azrad 			max = comp;
243a9dd7275SMatan Azrad 	}
24499f9d799SMatan Azrad 	return max;
2458395927cSMatan Azrad }
2468395927cSMatan Azrad 
247*32fbcf31SYajun Wu static void
mlx5_vdpa_drain_cq_one(struct mlx5_vdpa_priv * priv,struct mlx5_vdpa_virtq * virtq)248*32fbcf31SYajun Wu mlx5_vdpa_drain_cq_one(struct mlx5_vdpa_priv *priv,
249*32fbcf31SYajun Wu 	struct mlx5_vdpa_virtq *virtq)
25024969c7bSYajun Wu {
251*32fbcf31SYajun Wu 	struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
25224969c7bSYajun Wu 
25324969c7bSYajun Wu 	mlx5_vdpa_queue_complete(cq);
25424969c7bSYajun Wu 	if (cq->cq_obj.cq) {
255*32fbcf31SYajun Wu 		cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
256*32fbcf31SYajun Wu 		virtq->eqp.qp_pi = 0;
25724969c7bSYajun Wu 		if (!cq->armed)
25824969c7bSYajun Wu 			mlx5_vdpa_cq_arm(priv, cq);
25924969c7bSYajun Wu 	}
26024969c7bSYajun Wu }
261*32fbcf31SYajun Wu 
262*32fbcf31SYajun Wu void
mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv * priv)263*32fbcf31SYajun Wu mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
264*32fbcf31SYajun Wu {
265*32fbcf31SYajun Wu 	struct mlx5_vdpa_virtq *virtq;
266*32fbcf31SYajun Wu 	unsigned int i;
267*32fbcf31SYajun Wu 
268*32fbcf31SYajun Wu 	for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
269*32fbcf31SYajun Wu 		virtq = &priv->virtqs[i];
270*32fbcf31SYajun Wu 		mlx5_vdpa_drain_cq_one(priv, virtq);
271*32fbcf31SYajun Wu 	}
27224969c7bSYajun Wu }
27324969c7bSYajun Wu 
27499f9d799SMatan Azrad /* Wait on all CQs channel for completion event. */
275057f7d20SLi Zhang static struct mlx5_vdpa_virtq *
mlx5_vdpa_event_wait(struct mlx5_vdpa_priv * priv __rte_unused)27699f9d799SMatan Azrad mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
2778395927cSMatan Azrad {
278a9dd7275SMatan Azrad #ifdef HAVE_IBV_DEVX_EVENT
2798395927cSMatan Azrad 	union {
2808395927cSMatan Azrad 		struct mlx5dv_devx_async_event_hdr event_resp;
2818395927cSMatan Azrad 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
2828395927cSMatan Azrad 	} out;
28399f9d799SMatan Azrad 	int ret = mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
28499f9d799SMatan Azrad 					    sizeof(out.buf));
2858395927cSMatan Azrad 
28699f9d799SMatan Azrad 	if (ret >= 0)
287057f7d20SLi Zhang 		return (struct mlx5_vdpa_virtq *)
288057f7d20SLi Zhang 				(uintptr_t)out.event_resp.cookie;
28999f9d799SMatan Azrad 	DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
29099f9d799SMatan Azrad 		ret, errno);
29199f9d799SMatan Azrad #endif
29299f9d799SMatan Azrad 	return NULL;
29399f9d799SMatan Azrad }
29499f9d799SMatan Azrad 
295a7ba40b2SThomas Monjalon static uint32_t
mlx5_vdpa_event_handle(void * arg)29699f9d799SMatan Azrad mlx5_vdpa_event_handle(void *arg)
29799f9d799SMatan Azrad {
29899f9d799SMatan Azrad 	struct mlx5_vdpa_priv *priv = arg;
299057f7d20SLi Zhang 	struct mlx5_vdpa_virtq *virtq;
30099f9d799SMatan Azrad 	uint32_t max;
30199f9d799SMatan Azrad 
30299f9d799SMatan Azrad 	switch (priv->event_mode) {
30399f9d799SMatan Azrad 	case MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER:
30499f9d799SMatan Azrad 	case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
30599f9d799SMatan Azrad 		priv->timer_delay_us = priv->event_us;
30699f9d799SMatan Azrad 		while (1) {
30799f9d799SMatan Azrad 			max = mlx5_vdpa_queues_complete(priv);
30899f9d799SMatan Azrad 			if (max == 0 && priv->no_traffic_counter++ >=
30999f9d799SMatan Azrad 			    priv->no_traffic_max) {
31099f9d799SMatan Azrad 				DRV_LOG(DEBUG, "Device %s traffic was stopped.",
31199f9d799SMatan Azrad 					priv->vdev->device->name);
31299f9d799SMatan Azrad 				mlx5_vdpa_arm_all_cqs(priv);
31399f9d799SMatan Azrad 				do {
314057f7d20SLi Zhang 					virtq = mlx5_vdpa_event_wait(priv);
315057f7d20SLi Zhang 					if (virtq == NULL)
31699f9d799SMatan Azrad 						break;
317057f7d20SLi Zhang 					pthread_mutex_lock(
318057f7d20SLi Zhang 						&virtq->virtq_lock);
319057f7d20SLi Zhang 					if (mlx5_vdpa_queue_complete(
320057f7d20SLi Zhang 						&virtq->eqp.cq) > 0) {
321057f7d20SLi Zhang 						pthread_mutex_unlock(
322057f7d20SLi Zhang 							&virtq->virtq_lock);
323057f7d20SLi Zhang 						break;
324057f7d20SLi Zhang 					}
325057f7d20SLi Zhang 					pthread_mutex_unlock(
326057f7d20SLi Zhang 						&virtq->virtq_lock);
32799f9d799SMatan Azrad 				} while (1);
32899f9d799SMatan Azrad 				priv->timer_delay_us = priv->event_us;
32999f9d799SMatan Azrad 				priv->no_traffic_counter = 0;
33099f9d799SMatan Azrad 			} else if (max != 0) {
33199f9d799SMatan Azrad 				priv->no_traffic_counter = 0;
33299f9d799SMatan Azrad 			}
33399f9d799SMatan Azrad 			mlx5_vdpa_timer_sleep(priv, max);
33499f9d799SMatan Azrad 		}
335a7ba40b2SThomas Monjalon 		return 0;
33699f9d799SMatan Azrad 	case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
33799f9d799SMatan Azrad 		do {
338057f7d20SLi Zhang 			virtq = mlx5_vdpa_event_wait(priv);
339057f7d20SLi Zhang 			if (virtq != NULL) {
340057f7d20SLi Zhang 				pthread_mutex_lock(&virtq->virtq_lock);
341057f7d20SLi Zhang 				if (mlx5_vdpa_queue_complete(
342057f7d20SLi Zhang 					&virtq->eqp.cq) > 0)
343057f7d20SLi Zhang 					mlx5_vdpa_cq_arm(priv, &virtq->eqp.cq);
344057f7d20SLi Zhang 				pthread_mutex_unlock(&virtq->virtq_lock);
345edc6391eSMatan Azrad 			}
34699f9d799SMatan Azrad 		} while (1);
347a7ba40b2SThomas Monjalon 		return 0;
34899f9d799SMatan Azrad 	default:
349a7ba40b2SThomas Monjalon 		return 0;
3508395927cSMatan Azrad 	}
3518395927cSMatan Azrad }
3528395927cSMatan Azrad 
3530474419bSXueming Li static void
mlx5_vdpa_err_interrupt_handler(void * cb_arg __rte_unused)3540474419bSXueming Li mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
3550474419bSXueming Li {
3560474419bSXueming Li #ifdef HAVE_IBV_DEVX_EVENT
3570474419bSXueming Li 	struct mlx5_vdpa_priv *priv = cb_arg;
3580474419bSXueming Li 	union {
3590474419bSXueming Li 		struct mlx5dv_devx_async_event_hdr event_resp;
3600474419bSXueming Li 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
3610474419bSXueming Li 	} out;
3620474419bSXueming Li 	uint32_t vq_index, i, version;
3630474419bSXueming Li 	struct mlx5_vdpa_virtq *virtq;
3640474419bSXueming Li 	uint64_t sec;
3650474419bSXueming Li 
3660474419bSXueming Li 	while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
3670474419bSXueming Li 					 sizeof(out.buf)) >=
3680474419bSXueming Li 				       (ssize_t)sizeof(out.event_resp.cookie)) {
3690474419bSXueming Li 		vq_index = out.event_resp.cookie & UINT32_MAX;
3700474419bSXueming Li 		version = out.event_resp.cookie >> 32;
3710474419bSXueming Li 		if (vq_index >= priv->nr_virtqs) {
3720474419bSXueming Li 			DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
3730474419bSXueming Li 				priv->vdev->device->name, vq_index);
3740474419bSXueming Li 			continue;
3750474419bSXueming Li 		}
3760474419bSXueming Li 		virtq = &priv->virtqs[vq_index];
377057f7d20SLi Zhang 		pthread_mutex_lock(&virtq->virtq_lock);
3780474419bSXueming Li 		if (!virtq->enable || virtq->version != version)
379057f7d20SLi Zhang 			goto unlock;
3800474419bSXueming Li 		if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
381057f7d20SLi Zhang 			goto unlock;
3828e72e6bdSLi Zhang 		virtq->stopped = 1;
3830474419bSXueming Li 		/* Query error info. */
3840474419bSXueming Li 		if (mlx5_vdpa_virtq_query(priv, vq_index))
3850474419bSXueming Li 			goto log;
3860474419bSXueming Li 		/* Disable vq. */
3870474419bSXueming Li 		if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
3880474419bSXueming Li 			DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
3890474419bSXueming Li 			goto log;
3900474419bSXueming Li 		}
3910474419bSXueming Li 		/* Retry if error happens less than N times in 3 seconds. */
3920474419bSXueming Li 		sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
3930474419bSXueming Li 		if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
3940474419bSXueming Li 			/* Retry. */
3950474419bSXueming Li 			if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
3960474419bSXueming Li 				DRV_LOG(ERR, "Failed to enable virtq %d.",
3970474419bSXueming Li 					vq_index);
3980474419bSXueming Li 			else
3990474419bSXueming Li 				DRV_LOG(WARNING, "Recover virtq %d: %u.",
4000474419bSXueming Li 					vq_index, ++virtq->n_retry);
4010474419bSXueming Li 		} else {
4020474419bSXueming Li 			/* Retry timeout, give up. */
4030474419bSXueming Li 			DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
4040474419bSXueming Li 				priv->vdev->device->name, vq_index);
4050474419bSXueming Li 		}
4060474419bSXueming Li log:
4070474419bSXueming Li 		/* Shift in current time to error time log end. */
4080474419bSXueming Li 		for (i = 1; i < RTE_DIM(virtq->err_time); i++)
4090474419bSXueming Li 			virtq->err_time[i - 1] = virtq->err_time[i];
4100474419bSXueming Li 		virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
411057f7d20SLi Zhang unlock:
412057f7d20SLi Zhang 		pthread_mutex_unlock(&virtq->virtq_lock);
4130474419bSXueming Li 	}
4140474419bSXueming Li #endif
4150474419bSXueming Li }
4160474419bSXueming Li 
4170474419bSXueming Li int
mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv * priv)4180474419bSXueming Li mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
4190474419bSXueming Li {
4200474419bSXueming Li 	int ret;
4210474419bSXueming Li 	int flags;
4220474419bSXueming Li 
4230474419bSXueming Li 	/* Setup device event channel. */
424662d0dc6SMichael Baum 	priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->cdev->ctx,
425662d0dc6SMichael Baum 							      0);
4260474419bSXueming Li 	if (!priv->err_chnl) {
4270474419bSXueming Li 		rte_errno = errno;
4280474419bSXueming Li 		DRV_LOG(ERR, "Failed to create device event channel %d.",
4290474419bSXueming Li 			rte_errno);
4300474419bSXueming Li 		goto error;
4310474419bSXueming Li 	}
4320474419bSXueming Li 	flags = fcntl(priv->err_chnl->fd, F_GETFL);
4330474419bSXueming Li 	ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
4340474419bSXueming Li 	if (ret) {
4355fe068bfSXueming Li 		rte_errno = errno;
4360474419bSXueming Li 		DRV_LOG(ERR, "Failed to change device event channel FD.");
4370474419bSXueming Li 		goto error;
4380474419bSXueming Li 	}
4395fe068bfSXueming Li 	priv->err_intr_handle =
4405fe068bfSXueming Li 		rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
4415fe068bfSXueming Li 	if (priv->err_intr_handle == NULL) {
4425fe068bfSXueming Li 		DRV_LOG(ERR, "Fail to allocate intr_handle");
4435fe068bfSXueming Li 		goto error;
4445fe068bfSXueming Li 	}
445d61138d4SHarman Kalra 	if (rte_intr_fd_set(priv->err_intr_handle, priv->err_chnl->fd))
446d61138d4SHarman Kalra 		goto error;
447d61138d4SHarman Kalra 
448d61138d4SHarman Kalra 	if (rte_intr_type_set(priv->err_intr_handle, RTE_INTR_HANDLE_EXT))
449d61138d4SHarman Kalra 		goto error;
450d61138d4SHarman Kalra 
4515fe068bfSXueming Li 	ret = rte_intr_callback_register(priv->err_intr_handle,
4520474419bSXueming Li 					 mlx5_vdpa_err_interrupt_handler,
4535fe068bfSXueming Li 					 priv);
4545fe068bfSXueming Li 	if (ret != 0) {
455d61138d4SHarman Kalra 		rte_intr_fd_set(priv->err_intr_handle, 0);
4560474419bSXueming Li 		DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
4570474419bSXueming Li 			priv->vid);
4585fe068bfSXueming Li 		rte_errno = -ret;
4590474419bSXueming Li 		goto error;
4600474419bSXueming Li 	} else {
4610474419bSXueming Li 		DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
4620474419bSXueming Li 			priv->vid);
4630474419bSXueming Li 	}
4640474419bSXueming Li 	return 0;
4650474419bSXueming Li error:
4660474419bSXueming Li 	mlx5_vdpa_err_event_unset(priv);
4670474419bSXueming Li 	return -1;
4680474419bSXueming Li }
4690474419bSXueming Li 
4700474419bSXueming Li void
mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv * priv)4710474419bSXueming Li mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
4720474419bSXueming Li {
4730474419bSXueming Li 	int retries = MLX5_VDPA_INTR_RETRIES;
4740474419bSXueming Li 	int ret = -EAGAIN;
4750474419bSXueming Li 
476d61138d4SHarman Kalra 	if (!rte_intr_fd_get(priv->err_intr_handle))
4770474419bSXueming Li 		return;
4780474419bSXueming Li 	while (retries-- && ret == -EAGAIN) {
479d61138d4SHarman Kalra 		ret = rte_intr_callback_unregister(priv->err_intr_handle,
4800474419bSXueming Li 					    mlx5_vdpa_err_interrupt_handler,
4810474419bSXueming Li 					    priv);
4820474419bSXueming Li 		if (ret == -EAGAIN) {
4830474419bSXueming Li 			DRV_LOG(DEBUG, "Try again to unregister fd %d "
4840474419bSXueming Li 				"of error interrupt, retries = %d.",
485d61138d4SHarman Kalra 				rte_intr_fd_get(priv->err_intr_handle),
486d61138d4SHarman Kalra 				retries);
4870474419bSXueming Li 			rte_pause();
4880474419bSXueming Li 		}
4890474419bSXueming Li 	}
4900474419bSXueming Li 	if (priv->err_chnl) {
4910474419bSXueming Li #ifdef HAVE_IBV_DEVX_EVENT
4920474419bSXueming Li 		union {
4930474419bSXueming Li 			struct mlx5dv_devx_async_event_hdr event_resp;
4940474419bSXueming Li 			uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
4950474419bSXueming Li 				    128];
4960474419bSXueming Li 		} out;
4970474419bSXueming Li 
4980474419bSXueming Li 		/* Clean all pending events. */
4990474419bSXueming Li 		while (mlx5_glue->devx_get_event(priv->err_chnl,
5000474419bSXueming Li 		       &out.event_resp, sizeof(out.buf)) >=
5010474419bSXueming Li 		       (ssize_t)sizeof(out.event_resp.cookie))
5020474419bSXueming Li 			;
5030474419bSXueming Li #endif
5040474419bSXueming Li 		mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
5050474419bSXueming Li 		priv->err_chnl = NULL;
5060474419bSXueming Li 	}
5075fe068bfSXueming Li 	rte_intr_instance_free(priv->err_intr_handle);
5080474419bSXueming Li }
5090474419bSXueming Li 
5108395927cSMatan Azrad int
mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv * priv)5118395927cSMatan Azrad mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
5128395927cSMatan Azrad {
51306da8cccSMatan Azrad 	int ret;
514a7ba40b2SThomas Monjalon 	rte_thread_attr_t attr;
515a7ba40b2SThomas Monjalon 	char name[RTE_THREAD_INTERNAL_NAME_SIZE];
51606da8cccSMatan Azrad 
51706da8cccSMatan Azrad 	if (!priv->eventc)
51806da8cccSMatan Azrad 		/* All virtqs are in poll mode. */
51906da8cccSMatan Azrad 		return 0;
520a7ba40b2SThomas Monjalon 	ret = rte_thread_attr_init(&attr);
521ea2810fcSDavid Marchand 	if (ret != 0) {
522ea2810fcSDavid Marchand 		DRV_LOG(ERR, "Failed to initialize thread attributes");
523ea2810fcSDavid Marchand 		goto out;
524ea2810fcSDavid Marchand 	}
525a7ba40b2SThomas Monjalon 	if (priv->event_core != -1)
526a7ba40b2SThomas Monjalon 		CPU_SET(priv->event_core, &attr.cpuset);
527a7ba40b2SThomas Monjalon 	else
528a7ba40b2SThomas Monjalon 		attr.cpuset = rte_lcore_cpuset(rte_get_main_lcore());
529a7ba40b2SThomas Monjalon 	ret = rte_thread_create(&priv->timer_tid,
530a7ba40b2SThomas Monjalon 			&attr, mlx5_vdpa_event_handle, priv);
531a7ba40b2SThomas Monjalon 	if (ret != 0) {
532a9dd7275SMatan Azrad 		DRV_LOG(ERR, "Failed to create timer thread.");
533ea2810fcSDavid Marchand 		goto out;
534a9dd7275SMatan Azrad 	}
535a7ba40b2SThomas Monjalon 	snprintf(name, sizeof(name), "vmlx5-%d", priv->vid);
536a7ba40b2SThomas Monjalon 	rte_thread_set_prefixed_name(priv->timer_tid, name);
537ea2810fcSDavid Marchand out:
538ea2810fcSDavid Marchand 	if (ret != 0)
539ea2810fcSDavid Marchand 		return -1;
5408395927cSMatan Azrad 	return 0;
5418395927cSMatan Azrad }
5428395927cSMatan Azrad 
5438395927cSMatan Azrad void
mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv * priv)5448395927cSMatan Azrad mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
5458395927cSMatan Azrad {
546057f7d20SLi Zhang 	struct mlx5_vdpa_virtq *virtq;
547057f7d20SLi Zhang 	int i;
5488395927cSMatan Azrad 
549a7ba40b2SThomas Monjalon 	if (priv->timer_tid.opaque_id != 0) {
550a7ba40b2SThomas Monjalon 		pthread_cancel((pthread_t)priv->timer_tid.opaque_id);
551a7ba40b2SThomas Monjalon 		rte_thread_join(priv->timer_tid, NULL);
552057f7d20SLi Zhang 		/* The mutex may stay locked after event thread cancel, initiate it. */
553057f7d20SLi Zhang 		for (i = 0; i < priv->nr_virtqs; i++) {
554057f7d20SLi Zhang 			virtq = &priv->virtqs[i];
555057f7d20SLi Zhang 			pthread_mutex_init(&virtq->virtq_lock, NULL);
556057f7d20SLi Zhang 		}
557a9dd7275SMatan Azrad 	}
558a7ba40b2SThomas Monjalon 	priv->timer_tid.opaque_id = 0;
5598395927cSMatan Azrad }
5608395927cSMatan Azrad 
5618395927cSMatan Azrad void
mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp * eqp)5628395927cSMatan Azrad mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
5638395927cSMatan Azrad {
564f9213ab1SRaja Zidane 	mlx5_devx_qp_destroy(&eqp->sw_qp);
5658395927cSMatan Azrad 	if (eqp->fw_qp)
5668395927cSMatan Azrad 		claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
5678395927cSMatan Azrad 	mlx5_vdpa_cq_destroy(&eqp->cq);
5688395927cSMatan Azrad 	memset(eqp, 0, sizeof(*eqp));
5698395927cSMatan Azrad }
5708395927cSMatan Azrad 
5718395927cSMatan Azrad static int
mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp * eqp)5728395927cSMatan Azrad mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
5738395927cSMatan Azrad {
5748395927cSMatan Azrad 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
575f9213ab1SRaja Zidane 					  eqp->sw_qp.qp->id)) {
5768395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
5778395927cSMatan Azrad 			rte_errno);
5788395927cSMatan Azrad 		return -1;
5798395927cSMatan Azrad 	}
580f9213ab1SRaja Zidane 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
581f9213ab1SRaja Zidane 			MLX5_CMD_OP_RST2INIT_QP, eqp->fw_qp->id)) {
5828395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
5838395927cSMatan Azrad 			rte_errno);
5848395927cSMatan Azrad 		return -1;
5858395927cSMatan Azrad 	}
5868395927cSMatan Azrad 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
587f9213ab1SRaja Zidane 					  eqp->sw_qp.qp->id)) {
5888395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
5898395927cSMatan Azrad 			rte_errno);
5908395927cSMatan Azrad 		return -1;
5918395927cSMatan Azrad 	}
592f9213ab1SRaja Zidane 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
593f9213ab1SRaja Zidane 			MLX5_CMD_OP_INIT2RTR_QP, eqp->fw_qp->id)) {
5948395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
5958395927cSMatan Azrad 			rte_errno);
5968395927cSMatan Azrad 		return -1;
5978395927cSMatan Azrad 	}
5988395927cSMatan Azrad 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
599f9213ab1SRaja Zidane 					  eqp->sw_qp.qp->id)) {
6008395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
6018395927cSMatan Azrad 			rte_errno);
6028395927cSMatan Azrad 		return -1;
6038395927cSMatan Azrad 	}
604f9213ab1SRaja Zidane 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
6058395927cSMatan Azrad 					  eqp->fw_qp->id)) {
6068395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
6078395927cSMatan Azrad 			rte_errno);
6088395927cSMatan Azrad 		return -1;
6098395927cSMatan Azrad 	}
6108395927cSMatan Azrad 	return 0;
6118395927cSMatan Azrad }
6128395927cSMatan Azrad 
61391edbbfbSLi Zhang int
mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp * eqp)61424969c7bSYajun Wu mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
61524969c7bSYajun Wu {
61624969c7bSYajun Wu 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,
61724969c7bSYajun Wu 					  eqp->sw_qp.qp->id)) {
61824969c7bSYajun Wu 		DRV_LOG(ERR, "Failed to modify FW QP to RST state(%u).",
61924969c7bSYajun Wu 			rte_errno);
62024969c7bSYajun Wu 		return -1;
62124969c7bSYajun Wu 	}
62224969c7bSYajun Wu 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp,
62324969c7bSYajun Wu 			MLX5_CMD_OP_QP_2RST, eqp->fw_qp->id)) {
62424969c7bSYajun Wu 		DRV_LOG(ERR, "Failed to modify SW QP to RST state(%u).",
62524969c7bSYajun Wu 			rte_errno);
62624969c7bSYajun Wu 		return -1;
62724969c7bSYajun Wu 	}
62824969c7bSYajun Wu 	return mlx5_vdpa_qps2rts(eqp);
62924969c7bSYajun Wu }
63024969c7bSYajun Wu 
6318395927cSMatan Azrad int
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv * priv,uint16_t desc_n,int callfd,struct mlx5_vdpa_virtq * virtq,bool reset)63224969c7bSYajun Wu mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
63391edbbfbSLi Zhang 	int callfd, struct mlx5_vdpa_virtq *virtq, bool reset)
6348395927cSMatan Azrad {
635057f7d20SLi Zhang 	struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
6368395927cSMatan Azrad 	struct mlx5_devx_qp_attr attr = {0};
6378395927cSMatan Azrad 	uint16_t log_desc_n = rte_log2_u32(desc_n);
638f9213ab1SRaja Zidane 	uint32_t ret;
6398395927cSMatan Azrad 
64024969c7bSYajun Wu 	if (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) {
64124969c7bSYajun Wu 		/* Reuse existing resources. */
64224969c7bSYajun Wu 		eqp->cq.callfd = callfd;
643*32fbcf31SYajun Wu 		mlx5_vdpa_drain_cq_one(priv, virtq);
64424969c7bSYajun Wu 		/* FW will set event qp to error state in q destroy. */
64591edbbfbSLi Zhang 		if (reset && !mlx5_vdpa_qps2rst2rts(eqp))
64624969c7bSYajun Wu 			rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
64724969c7bSYajun Wu 					&eqp->sw_qp.db_rec[0]);
64824969c7bSYajun Wu 		return 0;
64924969c7bSYajun Wu 	}
65024969c7bSYajun Wu 	if (eqp->fw_qp)
65124969c7bSYajun Wu 		mlx5_vdpa_event_qp_destroy(eqp);
652057f7d20SLi Zhang 	if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, virtq) ||
653057f7d20SLi Zhang 		!eqp->cq.cq_obj.cq)
6548395927cSMatan Azrad 		return -1;
655e35ccf24SMichael Baum 	attr.pd = priv->cdev->pdn;
656fe46b20cSMichael Baum 	attr.ts_format =
657fe46b20cSMichael Baum 		mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
658662d0dc6SMichael Baum 	eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->cdev->ctx, &attr);
6598395927cSMatan Azrad 	if (!eqp->fw_qp) {
6608395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
6618395927cSMatan Azrad 		goto error;
6628395927cSMatan Azrad 	}
6635dfa003dSMichael Baum 	attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
6640e41abd1SMichael Baum 	attr.cqn = eqp->cq.cq_obj.cq->id;
665ba707cdbSRaja Zidane 	attr.num_of_receive_wqes = RTE_BIT32(log_desc_n);
6668395927cSMatan Azrad 	attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
667ba707cdbSRaja Zidane 	attr.num_of_send_wqbbs = 0; /* No need SQ. */
668fe46b20cSMichael Baum 	attr.ts_format =
669fe46b20cSMichael Baum 		mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
670bba8281dSRaja Zidane 	ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
671057f7d20SLi Zhang 				  attr.num_of_receive_wqes * MLX5_WSEG_SIZE,
672057f7d20SLi Zhang 				  &attr, SOCKET_ID_ANY);
673f9213ab1SRaja Zidane 	if (ret) {
6748395927cSMatan Azrad 		DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
6758395927cSMatan Azrad 		goto error;
6768395927cSMatan Azrad 	}
6778395927cSMatan Azrad 	if (mlx5_vdpa_qps2rts(eqp))
6788395927cSMatan Azrad 		goto error;
67924969c7bSYajun Wu 	eqp->qp_pi = 0;
6808395927cSMatan Azrad 	/* First ringing. */
68124969c7bSYajun Wu 	if (eqp->sw_qp.db_rec)
682f9213ab1SRaja Zidane 		rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
683f9213ab1SRaja Zidane 			&eqp->sw_qp.db_rec[0]);
6848395927cSMatan Azrad 	return 0;
6858395927cSMatan Azrad error:
6868395927cSMatan Azrad 	mlx5_vdpa_event_qp_destroy(eqp);
6878395927cSMatan Azrad 	return -1;
6888395927cSMatan Azrad }
689057f7d20SLi Zhang 
690