xref: /dpdk/drivers/net/mlx5/linux/mlx5_verbs.c (revision 5a959cbfa68cfd15ca024a2883c6bacd7aa046bb)
14f96d913SOphir Munk /* SPDX-License-Identifier: BSD-3-Clause
24f96d913SOphir Munk  * Copyright 2020 Mellanox Technologies, Ltd
34f96d913SOphir Munk  */
44f96d913SOphir Munk 
54f96d913SOphir Munk #include <stddef.h>
64f96d913SOphir Munk #include <errno.h>
7c279f187SMichael Baum #include <stdbool.h>
84f96d913SOphir Munk #include <string.h>
94f96d913SOphir Munk #include <stdint.h>
104f96d913SOphir Munk #include <unistd.h>
114f96d913SOphir Munk #include <inttypes.h>
126deb19e1SMichael Baum #include <sys/queue.h>
134f96d913SOphir Munk 
144f96d913SOphir Munk #include "mlx5_autoconf.h"
154f96d913SOphir Munk 
164f96d913SOphir Munk #include <rte_mbuf.h>
174f96d913SOphir Munk #include <rte_malloc.h>
184f96d913SOphir Munk #include <rte_ethdev_driver.h>
194f96d913SOphir Munk #include <rte_common.h>
204f96d913SOphir Munk 
214f96d913SOphir Munk #include <mlx5_glue.h>
224f96d913SOphir Munk #include <mlx5_common.h>
234f96d913SOphir Munk #include <mlx5_common_mr.h>
248bb2410eSOphir Munk #include <mlx5_rxtx.h>
254f96d913SOphir Munk #include <mlx5_verbs.h>
266deb19e1SMichael Baum #include <mlx5_utils.h>
276deb19e1SMichael Baum #include <mlx5_malloc.h>
286deb19e1SMichael Baum 
294f96d913SOphir Munk /**
304f96d913SOphir Munk  * Register mr. Given protection domain pointer, pointer to addr and length
314f96d913SOphir Munk  * register the memory region.
324f96d913SOphir Munk  *
334f96d913SOphir Munk  * @param[in] pd
344f96d913SOphir Munk  *   Pointer to protection domain context.
354f96d913SOphir Munk  * @param[in] addr
364f96d913SOphir Munk  *   Pointer to memory start address.
374f96d913SOphir Munk  * @param[in] length
384f96d913SOphir Munk  *   Length of the memory to register.
394f96d913SOphir Munk  * @param[out] pmd_mr
404f96d913SOphir Munk  *   pmd_mr struct set with lkey, address, length and pointer to mr object
414f96d913SOphir Munk  *
424f96d913SOphir Munk  * @return
434f96d913SOphir Munk  *   0 on successful registration, -1 otherwise
444f96d913SOphir Munk  */
454f96d913SOphir Munk static int
464f96d913SOphir Munk mlx5_reg_mr(void *pd, void *addr, size_t length,
474f96d913SOphir Munk 		 struct mlx5_pmd_mr *pmd_mr)
484f96d913SOphir Munk {
494f96d913SOphir Munk 	return mlx5_common_verbs_reg_mr(pd, addr, length, pmd_mr);
504f96d913SOphir Munk }
514f96d913SOphir Munk 
524f96d913SOphir Munk /**
534f96d913SOphir Munk  * Deregister mr. Given the mlx5 pmd MR - deregister the MR
544f96d913SOphir Munk  *
554f96d913SOphir Munk  * @param[in] pmd_mr
564f96d913SOphir Munk  *   pmd_mr struct set with lkey, address, length and pointer to mr object
574f96d913SOphir Munk  *
584f96d913SOphir Munk  */
594f96d913SOphir Munk static void
604f96d913SOphir Munk mlx5_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
614f96d913SOphir Munk {
624f96d913SOphir Munk 	mlx5_common_verbs_dereg_mr(pmd_mr);
634f96d913SOphir Munk }
644f96d913SOphir Munk 
654f96d913SOphir Munk /* verbs operations. */
664f96d913SOphir Munk const struct mlx5_verbs_ops mlx5_verbs_ops = {
674f96d913SOphir Munk 	.reg_mr = mlx5_reg_mr,
684f96d913SOphir Munk 	.dereg_mr = mlx5_dereg_mr,
694f96d913SOphir Munk };
708bb2410eSOphir Munk 
718bb2410eSOphir Munk /**
728bb2410eSOphir Munk  * Modify Rx WQ vlan stripping offload
738bb2410eSOphir Munk  *
748bb2410eSOphir Munk  * @param rxq_obj
758bb2410eSOphir Munk  *   Rx queue object.
768bb2410eSOphir Munk  *
778bb2410eSOphir Munk  * @return 0 on success, non-0 otherwise
788bb2410eSOphir Munk  */
798bb2410eSOphir Munk static int
808bb2410eSOphir Munk mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
818bb2410eSOphir Munk {
828bb2410eSOphir Munk 	uint16_t vlan_offloads =
838bb2410eSOphir Munk 		(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
848bb2410eSOphir Munk 		0;
858bb2410eSOphir Munk 	struct ibv_wq_attr mod;
868bb2410eSOphir Munk 	mod = (struct ibv_wq_attr){
878bb2410eSOphir Munk 		.attr_mask = IBV_WQ_ATTR_FLAGS,
888bb2410eSOphir Munk 		.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
898bb2410eSOphir Munk 		.flags = vlan_offloads,
908bb2410eSOphir Munk 	};
91fa2c85ccSMichael Baum 
92fa2c85ccSMichael Baum 	return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
93fa2c85ccSMichael Baum }
94fa2c85ccSMichael Baum 
95fa2c85ccSMichael Baum /**
96fa2c85ccSMichael Baum  * Modifies the attributes for the specified WQ.
97fa2c85ccSMichael Baum  *
98fa2c85ccSMichael Baum  * @param rxq_obj
99fa2c85ccSMichael Baum  *   Verbs Rx queue object.
100fa2c85ccSMichael Baum  *
101fa2c85ccSMichael Baum  * @return
102fa2c85ccSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
103fa2c85ccSMichael Baum  */
104fa2c85ccSMichael Baum static int
105fa2c85ccSMichael Baum mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
106fa2c85ccSMichael Baum {
107fa2c85ccSMichael Baum 	struct ibv_wq_attr mod = {
108fa2c85ccSMichael Baum 		.attr_mask = IBV_WQ_ATTR_STATE,
109fa2c85ccSMichael Baum 		.wq_state = is_start ? IBV_WQS_RDY : IBV_WQS_RESET,
110fa2c85ccSMichael Baum 	};
111fa2c85ccSMichael Baum 
1128bb2410eSOphir Munk 	return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
1138bb2410eSOphir Munk }
1148bb2410eSOphir Munk 
1156deb19e1SMichael Baum /**
1166deb19e1SMichael Baum  * Create a CQ Verbs object.
1176deb19e1SMichael Baum  *
1186deb19e1SMichael Baum  * @param dev
1196deb19e1SMichael Baum  *   Pointer to Ethernet device.
120675911d0SMichael Baum  * @param idx
121675911d0SMichael Baum  *   Queue index in DPDK Rx queue array.
1226deb19e1SMichael Baum  *
1236deb19e1SMichael Baum  * @return
124675911d0SMichael Baum  *   The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
1256deb19e1SMichael Baum  */
1266deb19e1SMichael Baum static struct ibv_cq *
127675911d0SMichael Baum mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
1286deb19e1SMichael Baum {
129675911d0SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
130675911d0SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
131675911d0SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
132675911d0SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
133675911d0SMichael Baum 	struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
134675911d0SMichael Baum 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
1356deb19e1SMichael Baum 	struct {
1366deb19e1SMichael Baum 		struct ibv_cq_init_attr_ex ibv;
1376deb19e1SMichael Baum 		struct mlx5dv_cq_init_attr mlx5;
1386deb19e1SMichael Baum 	} cq_attr;
1396deb19e1SMichael Baum 
1406deb19e1SMichael Baum 	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
1416deb19e1SMichael Baum 		.cqe = cqe_n,
1426deb19e1SMichael Baum 		.channel = rxq_obj->ibv_channel,
1436deb19e1SMichael Baum 		.comp_mask = 0,
1446deb19e1SMichael Baum 	};
1456deb19e1SMichael Baum 	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
1466deb19e1SMichael Baum 		.comp_mask = 0,
1476deb19e1SMichael Baum 	};
1486deb19e1SMichael Baum 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
1496deb19e1SMichael Baum 		cq_attr.mlx5.comp_mask |=
1506deb19e1SMichael Baum 				MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
1516deb19e1SMichael Baum #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1526deb19e1SMichael Baum 		cq_attr.mlx5.cqe_comp_res_format =
1536deb19e1SMichael Baum 				mlx5_rxq_mprq_enabled(rxq_data) ?
1546deb19e1SMichael Baum 				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
1556deb19e1SMichael Baum 				MLX5DV_CQE_RES_FORMAT_HASH;
1566deb19e1SMichael Baum #else
1576deb19e1SMichael Baum 		cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1586deb19e1SMichael Baum #endif
1596deb19e1SMichael Baum 		/*
1606deb19e1SMichael Baum 		 * For vectorized Rx, it must not be doubled in order to
1616deb19e1SMichael Baum 		 * make cq_ci and rq_ci aligned.
1626deb19e1SMichael Baum 		 */
1636deb19e1SMichael Baum 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1646deb19e1SMichael Baum 			cq_attr.ibv.cqe *= 2;
1656deb19e1SMichael Baum 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1666deb19e1SMichael Baum 		DRV_LOG(DEBUG,
1676deb19e1SMichael Baum 			"Port %u Rx CQE compression is disabled for HW"
1686deb19e1SMichael Baum 			" timestamp.",
1696deb19e1SMichael Baum 			dev->data->port_id);
1706deb19e1SMichael Baum 	}
1716deb19e1SMichael Baum #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1726deb19e1SMichael Baum 	if (priv->config.cqe_pad) {
1736deb19e1SMichael Baum 		cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1746deb19e1SMichael Baum 		cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1756deb19e1SMichael Baum 	}
1766deb19e1SMichael Baum #endif
1776deb19e1SMichael Baum 	return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1786deb19e1SMichael Baum 							      &cq_attr.ibv,
1796deb19e1SMichael Baum 							      &cq_attr.mlx5));
1806deb19e1SMichael Baum }
1816deb19e1SMichael Baum 
1826deb19e1SMichael Baum /**
1836deb19e1SMichael Baum  * Create a WQ Verbs object.
1846deb19e1SMichael Baum  *
1856deb19e1SMichael Baum  * @param dev
1866deb19e1SMichael Baum  *   Pointer to Ethernet device.
1876deb19e1SMichael Baum  * @param idx
1886deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
1896deb19e1SMichael Baum  *
1906deb19e1SMichael Baum  * @return
191675911d0SMichael Baum  *   The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
1926deb19e1SMichael Baum  */
1936deb19e1SMichael Baum static struct ibv_wq *
194675911d0SMichael Baum mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
1956deb19e1SMichael Baum {
196675911d0SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
197675911d0SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
198675911d0SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
199675911d0SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
200675911d0SMichael Baum 	struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
201675911d0SMichael Baum 	unsigned int wqe_n = 1 << rxq_data->elts_n;
2026deb19e1SMichael Baum 	struct {
2036deb19e1SMichael Baum 		struct ibv_wq_init_attr ibv;
2046deb19e1SMichael Baum #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2056deb19e1SMichael Baum 		struct mlx5dv_wq_init_attr mlx5;
2066deb19e1SMichael Baum #endif
2076deb19e1SMichael Baum 	} wq_attr;
2086deb19e1SMichael Baum 
2096deb19e1SMichael Baum 	wq_attr.ibv = (struct ibv_wq_init_attr){
2106deb19e1SMichael Baum 		.wq_context = NULL, /* Could be useful in the future. */
2116deb19e1SMichael Baum 		.wq_type = IBV_WQT_RQ,
2126deb19e1SMichael Baum 		/* Max number of outstanding WRs. */
2136deb19e1SMichael Baum 		.max_wr = wqe_n >> rxq_data->sges_n,
2146deb19e1SMichael Baum 		/* Max number of scatter/gather elements in a WR. */
2156deb19e1SMichael Baum 		.max_sge = 1 << rxq_data->sges_n,
2166deb19e1SMichael Baum 		.pd = priv->sh->pd,
2176deb19e1SMichael Baum 		.cq = rxq_obj->ibv_cq,
2186deb19e1SMichael Baum 		.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
2196deb19e1SMichael Baum 		.create_flags = (rxq_data->vlan_strip ?
2206deb19e1SMichael Baum 				 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
2216deb19e1SMichael Baum 	};
2226deb19e1SMichael Baum 	/* By default, FCS (CRC) is stripped by hardware. */
2236deb19e1SMichael Baum 	if (rxq_data->crc_present) {
2246deb19e1SMichael Baum 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
2256deb19e1SMichael Baum 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
2266deb19e1SMichael Baum 	}
2276deb19e1SMichael Baum 	if (priv->config.hw_padding) {
2286deb19e1SMichael Baum #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
2296deb19e1SMichael Baum 		wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
2306deb19e1SMichael Baum 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
2316deb19e1SMichael Baum #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
2326deb19e1SMichael Baum 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
2336deb19e1SMichael Baum 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
2346deb19e1SMichael Baum #endif
2356deb19e1SMichael Baum 	}
2366deb19e1SMichael Baum #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
2376deb19e1SMichael Baum 	wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
2386deb19e1SMichael Baum 		.comp_mask = 0,
2396deb19e1SMichael Baum 	};
2406deb19e1SMichael Baum 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
2416deb19e1SMichael Baum 		struct mlx5dv_striding_rq_init_attr *mprq_attr =
2426deb19e1SMichael Baum 						&wq_attr.mlx5.striding_rq_attrs;
2436deb19e1SMichael Baum 
2446deb19e1SMichael Baum 		wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
2456deb19e1SMichael Baum 		*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
2466deb19e1SMichael Baum 			.single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
2476deb19e1SMichael Baum 			.single_wqe_log_num_of_strides = rxq_data->strd_num_n,
2486deb19e1SMichael Baum 			.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
2496deb19e1SMichael Baum 		};
2506deb19e1SMichael Baum 	}
2516deb19e1SMichael Baum 	rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
2526deb19e1SMichael Baum 					      &wq_attr.mlx5);
2536deb19e1SMichael Baum #else
2546deb19e1SMichael Baum 	rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
2556deb19e1SMichael Baum #endif
2566deb19e1SMichael Baum 	if (rxq_obj->wq) {
2576deb19e1SMichael Baum 		/*
2586deb19e1SMichael Baum 		 * Make sure number of WRs*SGEs match expectations since a queue
2596deb19e1SMichael Baum 		 * cannot allocate more than "desc" buffers.
2606deb19e1SMichael Baum 		 */
2616deb19e1SMichael Baum 		if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
2626deb19e1SMichael Baum 		    wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
2636deb19e1SMichael Baum 			DRV_LOG(ERR,
2646deb19e1SMichael Baum 				"Port %u Rx queue %u requested %u*%u but got"
2656deb19e1SMichael Baum 				" %u*%u WRs*SGEs.",
2666deb19e1SMichael Baum 				dev->data->port_id, idx,
2676deb19e1SMichael Baum 				wqe_n >> rxq_data->sges_n,
2686deb19e1SMichael Baum 				(1 << rxq_data->sges_n),
2696deb19e1SMichael Baum 				wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
2706deb19e1SMichael Baum 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
2716deb19e1SMichael Baum 			rxq_obj->wq = NULL;
2726deb19e1SMichael Baum 			rte_errno = EINVAL;
2736deb19e1SMichael Baum 		}
2746deb19e1SMichael Baum 	}
2756deb19e1SMichael Baum 	return rxq_obj->wq;
2766deb19e1SMichael Baum }
2776deb19e1SMichael Baum 
2786deb19e1SMichael Baum /**
2796deb19e1SMichael Baum  * Create the Rx queue Verbs object.
2806deb19e1SMichael Baum  *
2816deb19e1SMichael Baum  * @param dev
2826deb19e1SMichael Baum  *   Pointer to Ethernet device.
2836deb19e1SMichael Baum  * @param idx
2846deb19e1SMichael Baum  *   Queue index in DPDK Rx queue array.
2856deb19e1SMichael Baum  *
2866deb19e1SMichael Baum  * @return
2871260a87bSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
2886deb19e1SMichael Baum  */
2891260a87bSMichael Baum static int
2906deb19e1SMichael Baum mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
2916deb19e1SMichael Baum {
2926deb19e1SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
2936deb19e1SMichael Baum 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
2946deb19e1SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
2956deb19e1SMichael Baum 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2961260a87bSMichael Baum 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
2976deb19e1SMichael Baum 	struct mlx5dv_cq cq_info;
2986deb19e1SMichael Baum 	struct mlx5dv_rwq rwq;
2996deb19e1SMichael Baum 	int ret = 0;
3006deb19e1SMichael Baum 	struct mlx5dv_obj obj;
3016deb19e1SMichael Baum 
3026deb19e1SMichael Baum 	MLX5_ASSERT(rxq_data);
3031260a87bSMichael Baum 	MLX5_ASSERT(tmpl);
3046deb19e1SMichael Baum 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
3056deb19e1SMichael Baum 	priv->verbs_alloc_ctx.obj = rxq_ctrl;
3066deb19e1SMichael Baum 	tmpl->type = MLX5_RXQ_OBJ_TYPE_IBV;
3076deb19e1SMichael Baum 	tmpl->rxq_ctrl = rxq_ctrl;
3086deb19e1SMichael Baum 	if (rxq_ctrl->irq) {
3096deb19e1SMichael Baum 		tmpl->ibv_channel =
3106deb19e1SMichael Baum 				mlx5_glue->create_comp_channel(priv->sh->ctx);
3116deb19e1SMichael Baum 		if (!tmpl->ibv_channel) {
3126deb19e1SMichael Baum 			DRV_LOG(ERR, "Port %u: comp channel creation failure.",
3136deb19e1SMichael Baum 				dev->data->port_id);
3146deb19e1SMichael Baum 			rte_errno = ENOMEM;
3156deb19e1SMichael Baum 			goto error;
3166deb19e1SMichael Baum 		}
3176deb19e1SMichael Baum 		tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
3186deb19e1SMichael Baum 	}
3196deb19e1SMichael Baum 	/* Create CQ using Verbs API. */
320675911d0SMichael Baum 	tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx);
3216deb19e1SMichael Baum 	if (!tmpl->ibv_cq) {
3226deb19e1SMichael Baum 		DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
3236deb19e1SMichael Baum 			dev->data->port_id, idx);
3246deb19e1SMichael Baum 		rte_errno = ENOMEM;
3256deb19e1SMichael Baum 		goto error;
3266deb19e1SMichael Baum 	}
3276deb19e1SMichael Baum 	obj.cq.in = tmpl->ibv_cq;
3286deb19e1SMichael Baum 	obj.cq.out = &cq_info;
3296deb19e1SMichael Baum 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
3306deb19e1SMichael Baum 	if (ret) {
3316deb19e1SMichael Baum 		rte_errno = ret;
3326deb19e1SMichael Baum 		goto error;
3336deb19e1SMichael Baum 	}
3346deb19e1SMichael Baum 	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
3356deb19e1SMichael Baum 		DRV_LOG(ERR,
3366deb19e1SMichael Baum 			"Port %u wrong MLX5_CQE_SIZE environment "
3376deb19e1SMichael Baum 			"variable value: it should be set to %u.",
3386deb19e1SMichael Baum 			dev->data->port_id, RTE_CACHE_LINE_SIZE);
3396deb19e1SMichael Baum 		rte_errno = EINVAL;
3406deb19e1SMichael Baum 		goto error;
3416deb19e1SMichael Baum 	}
3426deb19e1SMichael Baum 	/* Fill the rings. */
3436deb19e1SMichael Baum 	rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
3446deb19e1SMichael Baum 	rxq_data->cq_db = cq_info.dbrec;
3456deb19e1SMichael Baum 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
3466deb19e1SMichael Baum 	rxq_data->cq_uar = cq_info.cq_uar;
3476deb19e1SMichael Baum 	rxq_data->cqn = cq_info.cqn;
3486deb19e1SMichael Baum 	/* Create WQ (RQ) using Verbs API. */
349675911d0SMichael Baum 	tmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx);
3506deb19e1SMichael Baum 	if (!tmpl->wq) {
3516deb19e1SMichael Baum 		DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
3526deb19e1SMichael Baum 			dev->data->port_id, idx);
3536deb19e1SMichael Baum 		rte_errno = ENOMEM;
3546deb19e1SMichael Baum 		goto error;
3556deb19e1SMichael Baum 	}
3566deb19e1SMichael Baum 	/* Change queue state to ready. */
357fa2c85ccSMichael Baum 	ret = mlx5_ibv_modify_wq(tmpl, true);
3586deb19e1SMichael Baum 	if (ret) {
3596deb19e1SMichael Baum 		DRV_LOG(ERR,
3606deb19e1SMichael Baum 			"Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
3616deb19e1SMichael Baum 			dev->data->port_id, idx);
3626deb19e1SMichael Baum 		rte_errno = ret;
3636deb19e1SMichael Baum 		goto error;
3646deb19e1SMichael Baum 	}
3656deb19e1SMichael Baum 	obj.rwq.in = tmpl->wq;
3666deb19e1SMichael Baum 	obj.rwq.out = &rwq;
3676deb19e1SMichael Baum 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
3686deb19e1SMichael Baum 	if (ret) {
3696deb19e1SMichael Baum 		rte_errno = ret;
3706deb19e1SMichael Baum 		goto error;
3716deb19e1SMichael Baum 	}
3726deb19e1SMichael Baum 	rxq_data->wqes = rwq.buf;
3736deb19e1SMichael Baum 	rxq_data->rq_db = rwq.dbrec;
3746deb19e1SMichael Baum 	rxq_data->cq_arm_sn = 0;
3756deb19e1SMichael Baum 	mlx5_rxq_initialize(rxq_data);
3766deb19e1SMichael Baum 	rxq_data->cq_ci = 0;
3776deb19e1SMichael Baum 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
3786deb19e1SMichael Baum 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
3796deb19e1SMichael Baum 	rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
3801260a87bSMichael Baum 	return 0;
3816deb19e1SMichael Baum error:
3826deb19e1SMichael Baum 	ret = rte_errno; /* Save rte_errno before cleanup. */
3836deb19e1SMichael Baum 	if (tmpl->wq)
3846deb19e1SMichael Baum 		claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
3856deb19e1SMichael Baum 	if (tmpl->ibv_cq)
3866deb19e1SMichael Baum 		claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
3876deb19e1SMichael Baum 	if (tmpl->ibv_channel)
3881260a87bSMichael Baum 		claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
3896deb19e1SMichael Baum 	rte_errno = ret; /* Restore rte_errno. */
3906deb19e1SMichael Baum 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
3911260a87bSMichael Baum 	return -rte_errno;
3926deb19e1SMichael Baum }
3936deb19e1SMichael Baum 
3946deb19e1SMichael Baum /**
3956deb19e1SMichael Baum  * Release an Rx verbs queue object.
3966deb19e1SMichael Baum  *
3976deb19e1SMichael Baum  * @param rxq_obj
3986deb19e1SMichael Baum  *   Verbs Rx queue object.
3996deb19e1SMichael Baum  */
4006deb19e1SMichael Baum static void
4016deb19e1SMichael Baum mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)
4026deb19e1SMichael Baum {
4036deb19e1SMichael Baum 	MLX5_ASSERT(rxq_obj);
4046deb19e1SMichael Baum 	MLX5_ASSERT(rxq_obj->wq);
4056deb19e1SMichael Baum 	MLX5_ASSERT(rxq_obj->ibv_cq);
4066deb19e1SMichael Baum 	claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
4076deb19e1SMichael Baum 	claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
4086deb19e1SMichael Baum 	if (rxq_obj->ibv_channel)
4096deb19e1SMichael Baum 		claim_zero(mlx5_glue->destroy_comp_channel
4106deb19e1SMichael Baum 							(rxq_obj->ibv_channel));
4116deb19e1SMichael Baum }
4126deb19e1SMichael Baum 
41332287079SMichael Baum /**
41432287079SMichael Baum  * Get event for an Rx verbs queue object.
41532287079SMichael Baum  *
41632287079SMichael Baum  * @param rxq_obj
41732287079SMichael Baum  *   Verbs Rx queue object.
41832287079SMichael Baum  *
41932287079SMichael Baum  * @return
42032287079SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
42132287079SMichael Baum  */
42232287079SMichael Baum static int
42332287079SMichael Baum mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj)
42432287079SMichael Baum {
42532287079SMichael Baum 	struct ibv_cq *ev_cq;
42632287079SMichael Baum 	void *ev_ctx;
42732287079SMichael Baum 	int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel,
42832287079SMichael Baum 					  &ev_cq, &ev_ctx);
42932287079SMichael Baum 
43032287079SMichael Baum 	if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
43132287079SMichael Baum 		goto exit;
43232287079SMichael Baum 	mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
43332287079SMichael Baum 	return 0;
43432287079SMichael Baum exit:
43532287079SMichael Baum 	if (ret < 0)
43632287079SMichael Baum 		rte_errno = errno;
43732287079SMichael Baum 	else
43832287079SMichael Baum 		rte_errno = EINVAL;
43932287079SMichael Baum 	return -rte_errno;
44032287079SMichael Baum }
44132287079SMichael Baum 
44287e2db37SMichael Baum /**
44325ae7f1aSMichael Baum  * Creates a receive work queue as a filed of indirection table.
44487e2db37SMichael Baum  *
44587e2db37SMichael Baum  * @param dev
44687e2db37SMichael Baum  *   Pointer to Ethernet device.
44725ae7f1aSMichael Baum  * @param log_n
44825ae7f1aSMichael Baum  *   Log of number of queues in the array.
44925ae7f1aSMichael Baum  * @param ind_tbl
45025ae7f1aSMichael Baum  *   Verbs indirection table object.
45187e2db37SMichael Baum  *
45287e2db37SMichael Baum  * @return
45325ae7f1aSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
45487e2db37SMichael Baum  */
45525ae7f1aSMichael Baum static int
45625ae7f1aSMichael Baum mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
45725ae7f1aSMichael Baum 		       struct mlx5_ind_table_obj *ind_tbl)
45887e2db37SMichael Baum {
45987e2db37SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
46025ae7f1aSMichael Baum 	struct ibv_wq *wq[1 << log_n];
46125ae7f1aSMichael Baum 	unsigned int i, j;
46287e2db37SMichael Baum 
46325ae7f1aSMichael Baum 	MLX5_ASSERT(ind_tbl);
46425ae7f1aSMichael Baum 	for (i = 0; i != ind_tbl->queues_n; ++i) {
46525ae7f1aSMichael Baum 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
46625ae7f1aSMichael Baum 		struct mlx5_rxq_ctrl *rxq_ctrl =
46725ae7f1aSMichael Baum 				container_of(rxq, struct mlx5_rxq_ctrl, rxq);
46825ae7f1aSMichael Baum 
46925ae7f1aSMichael Baum 		wq[i] = rxq_ctrl->obj->wq;
47087e2db37SMichael Baum 	}
47125ae7f1aSMichael Baum 	MLX5_ASSERT(i > 0);
47287e2db37SMichael Baum 	/* Finalise indirection table. */
47325ae7f1aSMichael Baum 	for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
47425ae7f1aSMichael Baum 		wq[i] = wq[j];
47587e2db37SMichael Baum 	ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,
47687e2db37SMichael Baum 					&(struct ibv_rwq_ind_table_init_attr){
47725ae7f1aSMichael Baum 						.log_ind_tbl_size = log_n,
47887e2db37SMichael Baum 						.ind_tbl = wq,
47987e2db37SMichael Baum 						.comp_mask = 0,
48087e2db37SMichael Baum 					});
48187e2db37SMichael Baum 	if (!ind_tbl->ind_table) {
48287e2db37SMichael Baum 		rte_errno = errno;
48325ae7f1aSMichael Baum 		return -rte_errno;
48487e2db37SMichael Baum 	}
48525ae7f1aSMichael Baum 	return 0;
48687e2db37SMichael Baum }
48787e2db37SMichael Baum 
48887e2db37SMichael Baum /**
48987e2db37SMichael Baum  * Destroys the specified Indirection Table.
49087e2db37SMichael Baum  *
49187e2db37SMichael Baum  * @param ind_table
49287e2db37SMichael Baum  *   Indirection table to release.
49387e2db37SMichael Baum  */
49487e2db37SMichael Baum static void
49525ae7f1aSMichael Baum mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
49687e2db37SMichael Baum {
49787e2db37SMichael Baum 	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
49887e2db37SMichael Baum }
49987e2db37SMichael Baum 
50085552726SMichael Baum /**
50185552726SMichael Baum  * Create an Rx Hash queue.
50285552726SMichael Baum  *
50385552726SMichael Baum  * @param dev
50485552726SMichael Baum  *   Pointer to Ethernet device.
505*5a959cbfSMichael Baum  * @param hrxq
506*5a959cbfSMichael Baum  *   Pointer to Rx Hash queue.
50785552726SMichael Baum  * @param tunnel
50885552726SMichael Baum  *   Tunnel type.
50985552726SMichael Baum  *
51085552726SMichael Baum  * @return
511*5a959cbfSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
51285552726SMichael Baum  */
513*5a959cbfSMichael Baum static int
514*5a959cbfSMichael Baum mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
51585552726SMichael Baum 		  int tunnel __rte_unused)
51685552726SMichael Baum {
51785552726SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
51885552726SMichael Baum 	struct ibv_qp *qp = NULL;
519*5a959cbfSMichael Baum 	struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
520*5a959cbfSMichael Baum 	const uint8_t *rss_key = hrxq->rss_key;
521*5a959cbfSMichael Baum 	uint64_t hash_fields = hrxq->hash_fields;
52285552726SMichael Baum 	int err;
52385552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
52485552726SMichael Baum 	struct mlx5dv_qp_init_attr qp_init_attr;
52585552726SMichael Baum 
52685552726SMichael Baum 	memset(&qp_init_attr, 0, sizeof(qp_init_attr));
52785552726SMichael Baum 	if (tunnel) {
52885552726SMichael Baum 		qp_init_attr.comp_mask =
52985552726SMichael Baum 				       MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
53085552726SMichael Baum 		qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
53185552726SMichael Baum 	}
53285552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
53385552726SMichael Baum 	if (dev->data->dev_conf.lpbk_mode) {
53485552726SMichael Baum 		/* Allow packet sent from NIC loop back w/o source MAC check. */
53585552726SMichael Baum 		qp_init_attr.comp_mask |=
53685552726SMichael Baum 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
53785552726SMichael Baum 		qp_init_attr.create_flags |=
53885552726SMichael Baum 				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
53985552726SMichael Baum 	}
54085552726SMichael Baum #endif
54185552726SMichael Baum 	qp = mlx5_glue->dv_create_qp
54285552726SMichael Baum 			(priv->sh->ctx,
54385552726SMichael Baum 			 &(struct ibv_qp_init_attr_ex){
54485552726SMichael Baum 				.qp_type = IBV_QPT_RAW_PACKET,
54585552726SMichael Baum 				.comp_mask =
54685552726SMichael Baum 					IBV_QP_INIT_ATTR_PD |
54785552726SMichael Baum 					IBV_QP_INIT_ATTR_IND_TABLE |
54885552726SMichael Baum 					IBV_QP_INIT_ATTR_RX_HASH,
54985552726SMichael Baum 				.rx_hash_conf = (struct ibv_rx_hash_conf){
55085552726SMichael Baum 					.rx_hash_function =
55185552726SMichael Baum 						IBV_RX_HASH_FUNC_TOEPLITZ,
552*5a959cbfSMichael Baum 					.rx_hash_key_len = hrxq->rss_key_len,
55385552726SMichael Baum 					.rx_hash_key =
55485552726SMichael Baum 						(void *)(uintptr_t)rss_key,
55585552726SMichael Baum 					.rx_hash_fields_mask = hash_fields,
55685552726SMichael Baum 				},
55785552726SMichael Baum 				.rwq_ind_tbl = ind_tbl->ind_table,
55885552726SMichael Baum 				.pd = priv->sh->pd,
55985552726SMichael Baum 			  },
56085552726SMichael Baum 			  &qp_init_attr);
56185552726SMichael Baum #else
56285552726SMichael Baum 	qp = mlx5_glue->create_qp_ex
56385552726SMichael Baum 			(priv->sh->ctx,
56485552726SMichael Baum 			 &(struct ibv_qp_init_attr_ex){
56585552726SMichael Baum 				.qp_type = IBV_QPT_RAW_PACKET,
56685552726SMichael Baum 				.comp_mask =
56785552726SMichael Baum 					IBV_QP_INIT_ATTR_PD |
56885552726SMichael Baum 					IBV_QP_INIT_ATTR_IND_TABLE |
56985552726SMichael Baum 					IBV_QP_INIT_ATTR_RX_HASH,
57085552726SMichael Baum 				.rx_hash_conf = (struct ibv_rx_hash_conf){
57185552726SMichael Baum 					.rx_hash_function =
57285552726SMichael Baum 						IBV_RX_HASH_FUNC_TOEPLITZ,
573*5a959cbfSMichael Baum 					.rx_hash_key_len = hrxq->rss_key_len,
57485552726SMichael Baum 					.rx_hash_key =
57585552726SMichael Baum 						(void *)(uintptr_t)rss_key,
57685552726SMichael Baum 					.rx_hash_fields_mask = hash_fields,
57785552726SMichael Baum 				},
57885552726SMichael Baum 				.rwq_ind_tbl = ind_tbl->ind_table,
57985552726SMichael Baum 				.pd = priv->sh->pd,
58085552726SMichael Baum 			 });
58185552726SMichael Baum #endif
58285552726SMichael Baum 	if (!qp) {
58385552726SMichael Baum 		rte_errno = errno;
58485552726SMichael Baum 		goto error;
58585552726SMichael Baum 	}
58685552726SMichael Baum 	hrxq->qp = qp;
58785552726SMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT
58885552726SMichael Baum 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
58985552726SMichael Baum 	if (!hrxq->action) {
59085552726SMichael Baum 		rte_errno = errno;
59185552726SMichael Baum 		goto error;
59285552726SMichael Baum 	}
59385552726SMichael Baum #endif
594*5a959cbfSMichael Baum 	return 0;
59585552726SMichael Baum error:
59685552726SMichael Baum 	err = rte_errno; /* Save rte_errno before cleanup. */
59785552726SMichael Baum 	if (qp)
59885552726SMichael Baum 		claim_zero(mlx5_glue->destroy_qp(qp));
59985552726SMichael Baum 	rte_errno = err; /* Restore rte_errno. */
600*5a959cbfSMichael Baum 	return -rte_errno;
60185552726SMichael Baum }
60285552726SMichael Baum 
60385552726SMichael Baum /**
60485552726SMichael Baum  * Destroy a Verbs queue pair.
60585552726SMichael Baum  *
60685552726SMichael Baum  * @param hrxq
60785552726SMichael Baum  *   Hash Rx queue to release its qp.
60885552726SMichael Baum  */
60985552726SMichael Baum static void
61085552726SMichael Baum mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq)
61185552726SMichael Baum {
61285552726SMichael Baum 	claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
61385552726SMichael Baum }
61485552726SMichael Baum 
6158bb2410eSOphir Munk struct mlx5_obj_ops ibv_obj_ops = {
6168bb2410eSOphir Munk 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
6176deb19e1SMichael Baum 	.rxq_obj_new = mlx5_rxq_ibv_obj_new,
61832287079SMichael Baum 	.rxq_event_get = mlx5_rx_ibv_get_event,
619c279f187SMichael Baum 	.rxq_obj_modify = mlx5_ibv_modify_wq,
6206deb19e1SMichael Baum 	.rxq_obj_release = mlx5_rxq_ibv_obj_release,
62125ae7f1aSMichael Baum 	.ind_table_new = mlx5_ibv_ind_table_new,
62225ae7f1aSMichael Baum 	.ind_table_destroy = mlx5_ibv_ind_table_destroy,
62385552726SMichael Baum 	.hrxq_new = mlx5_ibv_hrxq_new,
62485552726SMichael Baum 	.hrxq_destroy = mlx5_ibv_qp_destroy,
6258bb2410eSOphir Munk };
626