18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 28bb2410eSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 38bb2410eSOphir Munk */ 48bb2410eSOphir Munk 58bb2410eSOphir Munk #include <stddef.h> 68bb2410eSOphir Munk #include <errno.h> 7c279f187SMichael Baum #include <stdbool.h> 88bb2410eSOphir Munk #include <string.h> 98bb2410eSOphir Munk #include <stdint.h> 108bb2410eSOphir Munk #include <sys/queue.h> 118bb2410eSOphir Munk 128bb2410eSOphir Munk #include <rte_malloc.h> 138bb2410eSOphir Munk #include <rte_common.h> 148bb2410eSOphir Munk #include <rte_eal_paging.h> 158bb2410eSOphir Munk 168bb2410eSOphir Munk #include <mlx5_glue.h> 178bb2410eSOphir Munk #include <mlx5_devx_cmds.h> 185f04f70cSMichael Baum #include <mlx5_common_devx.h> 198bb2410eSOphir Munk #include <mlx5_malloc.h> 208bb2410eSOphir Munk 218bb2410eSOphir Munk #include "mlx5.h" 228bb2410eSOphir Munk #include "mlx5_common_os.h" 23377b69fbSMichael Baum #include "mlx5_tx.h" 24151cbe3aSMichael Baum #include "mlx5_rx.h" 258bb2410eSOphir Munk #include "mlx5_utils.h" 268bb2410eSOphir Munk #include "mlx5_devx.h" 2787e2db37SMichael Baum #include "mlx5_flow.h" 2888019723SOphir Munk #include "mlx5_flow_os.h" 29f6dee900SMichael Baum 30f6dee900SMichael Baum /** 318bb2410eSOphir Munk * Modify RQ vlan stripping offload 328bb2410eSOphir Munk * 335ceb3a02SXueming Li * @param rxq 345ceb3a02SXueming Li * Rx queue. 355ceb3a02SXueming Li * @param on 365ceb3a02SXueming Li * Enable/disable VLAN stripping. 378bb2410eSOphir Munk * 38f6dee900SMichael Baum * @return 39f6dee900SMichael Baum * 0 on success, non-0 otherwise 408bb2410eSOphir Munk */ 418bb2410eSOphir Munk static int 425ceb3a02SXueming Li mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on) 438bb2410eSOphir Munk { 448bb2410eSOphir Munk struct mlx5_devx_modify_rq_attr rq_attr; 458bb2410eSOphir Munk 468bb2410eSOphir Munk memset(&rq_attr, 0, sizeof(rq_attr)); 478bb2410eSOphir Munk rq_attr.rq_state = MLX5_RQC_STATE_RDY; 488bb2410eSOphir Munk rq_attr.state = MLX5_RQC_STATE_RDY; 498bb2410eSOphir Munk rq_attr.vsd = (on ? 0 : 1); 508bb2410eSOphir Munk rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 515ceb3a02SXueming Li return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); 528bb2410eSOphir Munk } 538bb2410eSOphir Munk 546deb19e1SMichael Baum /** 55fa2c85ccSMichael Baum * Modify RQ using DevX API. 56fa2c85ccSMichael Baum * 575ceb3a02SXueming Li * @param rxq 585ceb3a02SXueming Li * DevX rx queue. 594c6d80f1SMichael Baum * @param type 604c6d80f1SMichael Baum * Type of change queue state. 61fa2c85ccSMichael Baum * 62fa2c85ccSMichael Baum * @return 63fa2c85ccSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 64fa2c85ccSMichael Baum */ 657158e46cSSpike Du int 665ceb3a02SXueming Li mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type) 67fa2c85ccSMichael Baum { 68fa2c85ccSMichael Baum struct mlx5_devx_modify_rq_attr rq_attr; 69fa2c85ccSMichael Baum 70fa2c85ccSMichael Baum memset(&rq_attr, 0, sizeof(rq_attr)); 714c6d80f1SMichael Baum switch (type) { 724c6d80f1SMichael Baum case MLX5_RXQ_MOD_ERR2RST: 734c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_ERR; 744c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 754c6d80f1SMichael Baum break; 764c6d80f1SMichael Baum case MLX5_RXQ_MOD_RST2RDY: 77fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RST; 78fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RDY; 797158e46cSSpike Du if (rxq->lwm) { 807158e46cSSpike Du rq_attr.modify_bitmask |= 817158e46cSSpike Du MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM; 827158e46cSSpike Du rq_attr.lwm = rxq->lwm; 837158e46cSSpike Du } 844c6d80f1SMichael Baum break; 854c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2ERR: 864c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 874c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_ERR; 884c6d80f1SMichael Baum break; 894c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2RST: 90fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 91fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 924c6d80f1SMichael Baum break; 937158e46cSSpike Du case MLX5_RXQ_MOD_RDY2RDY: 947158e46cSSpike Du rq_attr.rq_state = MLX5_RQC_STATE_RDY; 957158e46cSSpike Du rq_attr.state = MLX5_RQC_STATE_RDY; 967158e46cSSpike Du rq_attr.modify_bitmask |= MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM; 977158e46cSSpike Du rq_attr.lwm = rxq->lwm; 987158e46cSSpike Du break; 994c6d80f1SMichael Baum default: 1004c6d80f1SMichael Baum break; 101fa2c85ccSMichael Baum } 102c06f77aeSMichael Baum if (rxq->ctrl->is_hairpin) 10309c25553SXueming Li return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr); 1045ceb3a02SXueming Li return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); 105fa2c85ccSMichael Baum } 106fa2c85ccSMichael Baum 107fa2c85ccSMichael Baum /** 1085d9f3c3fSMichael Baum * Modify SQ using DevX API. 1095d9f3c3fSMichael Baum * 1105d9f3c3fSMichael Baum * @param txq_obj 1115d9f3c3fSMichael Baum * DevX Tx queue object. 1125d9f3c3fSMichael Baum * @param type 1135d9f3c3fSMichael Baum * Type of change queue state. 1145d9f3c3fSMichael Baum * @param dev_port 1155d9f3c3fSMichael Baum * Unnecessary. 1165d9f3c3fSMichael Baum * 1175d9f3c3fSMichael Baum * @return 1185d9f3c3fSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 1195d9f3c3fSMichael Baum */ 120686d05b6SXueming Li int 121686d05b6SXueming Li mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 1225d9f3c3fSMichael Baum uint8_t dev_port) 1235d9f3c3fSMichael Baum { 1245d9f3c3fSMichael Baum struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 1255d9f3c3fSMichael Baum int ret; 1265d9f3c3fSMichael Baum 1275d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RST2RDY) { 1285d9f3c3fSMichael Baum /* Change queue state to reset. */ 1295d9f3c3fSMichael Baum if (type == MLX5_TXQ_MOD_ERR2RDY) 1305d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_ERR; 1315d9f3c3fSMichael Baum else 1325d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RDY; 1335d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RST; 13474e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1355d9f3c3fSMichael Baum if (ret) { 1365d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET" 1375d9f3c3fSMichael Baum " %s", strerror(errno)); 1385d9f3c3fSMichael Baum rte_errno = errno; 1395d9f3c3fSMichael Baum return ret; 1405d9f3c3fSMichael Baum } 1415d9f3c3fSMichael Baum } 1425d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RDY2RST) { 1435d9f3c3fSMichael Baum /* Change queue state to ready. */ 1445d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RST; 1455d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RDY; 14674e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1475d9f3c3fSMichael Baum if (ret) { 1485d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to READY" 1495d9f3c3fSMichael Baum " %s", strerror(errno)); 1505d9f3c3fSMichael Baum rte_errno = errno; 1515d9f3c3fSMichael Baum return ret; 1525d9f3c3fSMichael Baum } 1535d9f3c3fSMichael Baum } 1545d9f3c3fSMichael Baum /* 1555d9f3c3fSMichael Baum * The dev_port variable is relevant only in Verbs API, and there is a 1565d9f3c3fSMichael Baum * pointer that points to this function and a parallel function in verbs 1575d9f3c3fSMichael Baum * intermittently, so they should have the same parameters. 1585d9f3c3fSMichael Baum */ 1595d9f3c3fSMichael Baum (void)dev_port; 1605d9f3c3fSMichael Baum return 0; 1615d9f3c3fSMichael Baum } 1625d9f3c3fSMichael Baum 1635d9f3c3fSMichael Baum /** 1646deb19e1SMichael Baum * Release an Rx DevX queue object. 1656deb19e1SMichael Baum * 1665ceb3a02SXueming Li * @param rxq 1675ceb3a02SXueming Li * DevX Rx queue. 1686deb19e1SMichael Baum */ 1696deb19e1SMichael Baum static void 1705ceb3a02SXueming Li mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq) 1716deb19e1SMichael Baum { 17209c25553SXueming Li struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj; 1735ceb3a02SXueming Li 17409c25553SXueming Li if (rxq_obj == NULL) 17509c25553SXueming Li return; 176c06f77aeSMichael Baum if (rxq_obj->rxq_ctrl->is_hairpin) { 17709c25553SXueming Li if (rxq_obj->rq == NULL) 17809c25553SXueming Li return; 1795ceb3a02SXueming Li mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST); 180fa2c85ccSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 1816deb19e1SMichael Baum } else { 18209c25553SXueming Li if (rxq->devx_rq.rq == NULL) 18309c25553SXueming Li return; 1845ceb3a02SXueming Li mlx5_devx_rq_destroy(&rxq->devx_rq); 18509c25553SXueming Li if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0) 18609c25553SXueming Li return; 1875ceb3a02SXueming Li mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 1885ceb3a02SXueming Li memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); 1895ceb3a02SXueming Li if (rxq_obj->devx_channel) { 19098174626STal Shnaiderman mlx5_os_devx_destroy_event_channel 1916deb19e1SMichael Baum (rxq_obj->devx_channel); 1925ceb3a02SXueming Li rxq_obj->devx_channel = NULL; 1935ceb3a02SXueming Li } 1946deb19e1SMichael Baum } 19509c25553SXueming Li rxq->ctrl->started = false; 1966deb19e1SMichael Baum } 1976deb19e1SMichael Baum 1986deb19e1SMichael Baum /** 19932287079SMichael Baum * Get event for an Rx DevX queue object. 20032287079SMichael Baum * 20132287079SMichael Baum * @param rxq_obj 20232287079SMichael Baum * DevX Rx queue object. 20332287079SMichael Baum * 20432287079SMichael Baum * @return 20532287079SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 20632287079SMichael Baum */ 20732287079SMichael Baum static int 20832287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 20932287079SMichael Baum { 21032287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT 21132287079SMichael Baum union { 21232287079SMichael Baum struct mlx5dv_devx_async_event_hdr event_resp; 21332287079SMichael Baum uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 21432287079SMichael Baum } out; 21532287079SMichael Baum int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 21632287079SMichael Baum &out.event_resp, 21732287079SMichael Baum sizeof(out.buf)); 21832287079SMichael Baum 21932287079SMichael Baum if (ret < 0) { 22032287079SMichael Baum rte_errno = errno; 22132287079SMichael Baum return -rte_errno; 22232287079SMichael Baum } 2235cd33796SMichael Baum if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) { 22432287079SMichael Baum rte_errno = EINVAL; 22532287079SMichael Baum return -rte_errno; 22632287079SMichael Baum } 22732287079SMichael Baum return 0; 22832287079SMichael Baum #else 22932287079SMichael Baum (void)rxq_obj; 23032287079SMichael Baum rte_errno = ENOTSUP; 23132287079SMichael Baum return -rte_errno; 23232287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */ 23332287079SMichael Baum } 23432287079SMichael Baum 23532287079SMichael Baum /** 23625025da3SSpike Du * Get LWM event for shared context, return the correct port/rxq for this event. 23725025da3SSpike Du * 23825025da3SSpike Du * @param priv 23925025da3SSpike Du * Mlx5_priv object. 24025025da3SSpike Du * @param rxq_idx [out] 24125025da3SSpike Du * Which rxq gets this event. 24225025da3SSpike Du * @param port_id [out] 24325025da3SSpike Du * Which port gets this event. 24425025da3SSpike Du * 24525025da3SSpike Du * @return 24625025da3SSpike Du * 0 on success, a negative errno value otherwise and rte_errno is set. 24725025da3SSpike Du */ 24825025da3SSpike Du static int 24925025da3SSpike Du mlx5_rx_devx_get_event_lwm(struct mlx5_priv *priv, int *rxq_idx, int *port_id) 25025025da3SSpike Du { 25125025da3SSpike Du #ifdef HAVE_IBV_DEVX_EVENT 25225025da3SSpike Du union { 25325025da3SSpike Du struct mlx5dv_devx_async_event_hdr event_resp; 25425025da3SSpike Du uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 25525025da3SSpike Du } out; 25625025da3SSpike Du int ret; 25725025da3SSpike Du 25825025da3SSpike Du memset(&out, 0, sizeof(out)); 25925025da3SSpike Du ret = mlx5_glue->devx_get_event(priv->sh->devx_channel_lwm, 26025025da3SSpike Du &out.event_resp, 26125025da3SSpike Du sizeof(out.buf)); 26225025da3SSpike Du if (ret < 0) { 26325025da3SSpike Du rte_errno = errno; 26425025da3SSpike Du DRV_LOG(WARNING, "%s err\n", __func__); 26525025da3SSpike Du return -rte_errno; 26625025da3SSpike Du } 26725025da3SSpike Du *port_id = (((uint32_t)out.event_resp.cookie) >> 26825025da3SSpike Du LWM_COOKIE_PORTID_OFFSET) & LWM_COOKIE_PORTID_MASK; 26925025da3SSpike Du *rxq_idx = (((uint32_t)out.event_resp.cookie) >> 27025025da3SSpike Du LWM_COOKIE_RXQID_OFFSET) & LWM_COOKIE_RXQID_MASK; 27125025da3SSpike Du return 0; 27225025da3SSpike Du #else 27325025da3SSpike Du (void)priv; 27425025da3SSpike Du (void)rxq_idx; 27525025da3SSpike Du (void)port_id; 27625025da3SSpike Du rte_errno = ENOTSUP; 27725025da3SSpike Du return -rte_errno; 27825025da3SSpike Du #endif /* HAVE_IBV_DEVX_EVENT */ 27925025da3SSpike Du } 28025025da3SSpike Du 28125025da3SSpike Du /** 2826deb19e1SMichael Baum * Create a RQ object using DevX. 2836deb19e1SMichael Baum * 2845ceb3a02SXueming Li * @param rxq 2855ceb3a02SXueming Li * Pointer to Rx queue. 2866deb19e1SMichael Baum * 2876deb19e1SMichael Baum * @return 2886e0a3637SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 2896deb19e1SMichael Baum */ 2906e0a3637SMichael Baum static int 2915ceb3a02SXueming Li mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq) 2926deb19e1SMichael Baum { 2935ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 294fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 2955ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 2965ceb3a02SXueming Li struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq; 2976deb19e1SMichael Baum struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 2986e0a3637SMichael Baum uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; 2996e0a3637SMichael Baum uint32_t wqe_size, log_wqe_size; 3006deb19e1SMichael Baum 3016deb19e1SMichael Baum /* Fill RQ attributes. */ 3026deb19e1SMichael Baum rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 3036deb19e1SMichael Baum rq_attr.flush_in_error_en = 1; 3046e0a3637SMichael Baum rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1; 3056e0a3637SMichael Baum rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id; 3066e0a3637SMichael Baum rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 307fe46b20cSMichael Baum rq_attr.ts_format = 308fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format); 3096deb19e1SMichael Baum /* Fill WQ attributes for this RQ. */ 3106deb19e1SMichael Baum if (mlx5_rxq_mprq_enabled(rxq_data)) { 3116deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 3126deb19e1SMichael Baum /* 3136deb19e1SMichael Baum * Number of strides in each WQE: 3146deb19e1SMichael Baum * 512*2^single_wqe_log_num_of_strides. 3156deb19e1SMichael Baum */ 3166deb19e1SMichael Baum rq_attr.wq_attr.single_wqe_log_num_of_strides = 3170947ed38SMichael Baum rxq_data->log_strd_num - 3186deb19e1SMichael Baum MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 3196deb19e1SMichael Baum /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 3206deb19e1SMichael Baum rq_attr.wq_attr.single_stride_log_num_of_bytes = 3210947ed38SMichael Baum rxq_data->log_strd_sz - 3226deb19e1SMichael Baum MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 3236deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_mprq); 3246deb19e1SMichael Baum } else { 3256deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 3266deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_data_seg); 3276deb19e1SMichael Baum } 3286deb19e1SMichael Baum log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 3296deb19e1SMichael Baum wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 3306e0a3637SMichael Baum rq_attr.wq_attr.log_wq_stride = log_wqe_size; 3316e0a3637SMichael Baum rq_attr.wq_attr.log_wq_sz = log_desc_n; 3326e0a3637SMichael Baum rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ? 3336e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_ALIGN : 3346e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_NONE; 335fe46b20cSMichael Baum rq_attr.wq_attr.pd = cdev->pdn; 336e6988afdSMatan Azrad rq_attr.counter_set_id = priv->counter_set_id; 337febcac7bSBing Zhao rq_attr.delay_drop_en = rxq_data->delay_drop; 33825ed2ebfSViacheslav Ovsiienko rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id); 33909c25553SXueming Li if (rxq_data->shared) /* Create RMP based RQ. */ 34009c25553SXueming Li rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp; 341f6dee900SMichael Baum /* Create RQ using DevX API. */ 3425ceb3a02SXueming Li return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size, 343fe46b20cSMichael Baum log_desc_n, &rq_attr, rxq_ctrl->socket); 3446deb19e1SMichael Baum } 3456deb19e1SMichael Baum 3466deb19e1SMichael Baum /** 3476deb19e1SMichael Baum * Create a DevX CQ object for an Rx queue. 3486deb19e1SMichael Baum * 3495ceb3a02SXueming Li * @param rxq 3505ceb3a02SXueming Li * Pointer to Rx queue. 3516deb19e1SMichael Baum * 3526deb19e1SMichael Baum * @return 3535cd33796SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 3546deb19e1SMichael Baum */ 3555cd33796SMichael Baum static int 3565ceb3a02SXueming Li mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq) 3576deb19e1SMichael Baum { 3585cd33796SMichael Baum struct mlx5_devx_cq *cq_obj = 0; 3596deb19e1SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 3605ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 3615cd33796SMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 3625ceb3a02SXueming Li uint16_t port_id = priv->dev_data->port_id; 3635ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 3645ceb3a02SXueming Li struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; 365f6dee900SMichael Baum unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 3666deb19e1SMichael Baum uint32_t log_cqe_n; 3675cd33796SMichael Baum uint16_t event_nums[1] = { 0 }; 3686deb19e1SMichael Baum int ret = 0; 3696deb19e1SMichael Baum 37009c25553SXueming Li if (rxq_ctrl->started) 37109c25553SXueming Li return 0; 3726deb19e1SMichael Baum if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 3736deb19e1SMichael Baum !rxq_data->lro) { 37438f9369dSDekel Peled cq_attr.cqe_comp_en = 1u; 37554c2d46bSAlexander Kozyrev rxq_data->mcqe_format = priv->config.cqe_comp_fmt; 37654c2d46bSAlexander Kozyrev rxq_data->byte_mask = UINT32_MAX; 37754c2d46bSAlexander Kozyrev switch (priv->config.cqe_comp_fmt) { 37854c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_HASH: 37954c2d46bSAlexander Kozyrev /* fallthrough */ 38054c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM: 3810f20acbfSAlexander Kozyrev /* 38254c2d46bSAlexander Kozyrev * Select CSUM miniCQE format only for non-vectorized 38354c2d46bSAlexander Kozyrev * MPRQ Rx burst, use HASH miniCQE format for others. 3840f20acbfSAlexander Kozyrev */ 3850f20acbfSAlexander Kozyrev if (mlx5_rxq_check_vec_support(rxq_data) < 0 && 3860f20acbfSAlexander Kozyrev mlx5_rxq_mprq_enabled(rxq_data)) 3876deb19e1SMichael Baum cq_attr.mini_cqe_res_format = 3880f20acbfSAlexander Kozyrev MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 3890f20acbfSAlexander Kozyrev else 3900f20acbfSAlexander Kozyrev cq_attr.mini_cqe_res_format = 39138f9369dSDekel Peled MLX5_CQE_RESP_FORMAT_HASH; 39254c2d46bSAlexander Kozyrev rxq_data->mcqe_format = cq_attr.mini_cqe_res_format; 39354c2d46bSAlexander Kozyrev break; 39454c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX: 39554c2d46bSAlexander Kozyrev rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK; 39654c2d46bSAlexander Kozyrev /* fallthrough */ 39754c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX: 39854c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt; 39954c2d46bSAlexander Kozyrev break; 40054c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_L34H_STRIDX: 40154c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = 0; 40254c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format_ext = 1; 40354c2d46bSAlexander Kozyrev break; 40454c2d46bSAlexander Kozyrev } 40554c2d46bSAlexander Kozyrev DRV_LOG(DEBUG, 40654c2d46bSAlexander Kozyrev "Port %u Rx CQE compression is enabled, format %d.", 4075ceb3a02SXueming Li port_id, priv->config.cqe_comp_fmt); 4086deb19e1SMichael Baum /* 4096deb19e1SMichael Baum * For vectorized Rx, it must not be doubled in order to 4106deb19e1SMichael Baum * make cq_ci and rq_ci aligned. 4116deb19e1SMichael Baum */ 4126deb19e1SMichael Baum if (mlx5_rxq_check_vec_support(rxq_data) < 0) 4136deb19e1SMichael Baum cqe_n *= 2; 4146deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 4156deb19e1SMichael Baum DRV_LOG(DEBUG, 4165ceb3a02SXueming Li "Port %u Rx CQE compression is disabled for HW timestamp.", 4175ceb3a02SXueming Li port_id); 4186deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->lro) { 4196deb19e1SMichael Baum DRV_LOG(DEBUG, 4206deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for LRO.", 4215ceb3a02SXueming Li port_id); 4226deb19e1SMichael Baum } 4235dfa003dSMichael Baum cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj); 4246deb19e1SMichael Baum log_cqe_n = log2above(cqe_n); 425f6dee900SMichael Baum /* Create CQ using DevX API. */ 426ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj, 427ca1418ceSMichael Baum log_cqe_n, &cq_attr, sh->numa_node); 4285cd33796SMichael Baum if (ret) 4295cd33796SMichael Baum return ret; 4305cd33796SMichael Baum cq_obj = &rxq_ctrl->obj->cq_obj; 4315cd33796SMichael Baum rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) 4325cd33796SMichael Baum (uintptr_t)cq_obj->cqes; 4335cd33796SMichael Baum rxq_data->cq_db = cq_obj->db_rec; 4345dfa003dSMichael Baum rxq_data->uar_data = sh->rx_uar.cq_db; 4356deb19e1SMichael Baum rxq_data->cqe_n = log_cqe_n; 4365cd33796SMichael Baum rxq_data->cqn = cq_obj->cq->id; 43709c25553SXueming Li rxq_data->cq_ci = 0; 438f6dee900SMichael Baum if (rxq_ctrl->obj->devx_channel) { 43998174626STal Shnaiderman ret = mlx5_os_devx_subscribe_devx_event 440f6dee900SMichael Baum (rxq_ctrl->obj->devx_channel, 4415cd33796SMichael Baum cq_obj->cq->obj, 4426deb19e1SMichael Baum sizeof(event_nums), 4436deb19e1SMichael Baum event_nums, 4445cd33796SMichael Baum (uint64_t)(uintptr_t)cq_obj->cq); 4456deb19e1SMichael Baum if (ret) { 4466deb19e1SMichael Baum DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 4475cd33796SMichael Baum ret = errno; 4485cd33796SMichael Baum mlx5_devx_cq_destroy(cq_obj); 4495cd33796SMichael Baum memset(cq_obj, 0, sizeof(*cq_obj)); 4505cd33796SMichael Baum rte_errno = ret; 4515cd33796SMichael Baum return -ret; 4526deb19e1SMichael Baum } 4536deb19e1SMichael Baum } 4545cd33796SMichael Baum return 0; 4556deb19e1SMichael Baum } 4566deb19e1SMichael Baum 4576deb19e1SMichael Baum /** 4586deb19e1SMichael Baum * Create the Rx hairpin queue object. 4596deb19e1SMichael Baum * 4605ceb3a02SXueming Li * @param rxq 4615ceb3a02SXueming Li * Pointer to Rx queue. 4626deb19e1SMichael Baum * 4636deb19e1SMichael Baum * @return 4641260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 4656deb19e1SMichael Baum */ 4661260a87bSMichael Baum static int 4675ceb3a02SXueming Li mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq) 4686deb19e1SMichael Baum { 4695ceb3a02SXueming Li uint16_t idx = rxq->idx; 4705ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 471*f2d43ff5SDariusz Sosnowski struct mlx5_hca_attr *hca_attr __rte_unused = &priv->sh->cdev->config.hca_attr; 4725ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 473*f2d43ff5SDariusz Sosnowski struct mlx5_devx_create_rq_attr unlocked_attr = { 0 }; 474*f2d43ff5SDariusz Sosnowski struct mlx5_devx_create_rq_attr locked_attr = { 0 }; 4751260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 4766deb19e1SMichael Baum uint32_t max_wq_data; 4776deb19e1SMichael Baum 4785ceb3a02SXueming Li MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL); 4796deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 480*f2d43ff5SDariusz Sosnowski unlocked_attr.hairpin = 1; 48153820561SMichael Baum max_wq_data = 48253820561SMichael Baum priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz; 4836deb19e1SMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 4846deb19e1SMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 4856deb19e1SMichael Baum if (priv->config.log_hp_size > max_wq_data) { 4866deb19e1SMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 4876deb19e1SMichael Baum "too large for hairpin.", 4886deb19e1SMichael Baum priv->config.log_hp_size); 4896deb19e1SMichael Baum rte_errno = ERANGE; 4901260a87bSMichael Baum return -rte_errno; 4916deb19e1SMichael Baum } 492*f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 4936deb19e1SMichael Baum } else { 494*f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_data_sz = 4956deb19e1SMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 4966deb19e1SMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 4976deb19e1SMichael Baum } 4986deb19e1SMichael Baum /* Set the packets number to the maximum value for performance. */ 499*f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_num_packets = 500*f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_data_sz - 5016deb19e1SMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 502*f2d43ff5SDariusz Sosnowski unlocked_attr.counter_set_id = priv->counter_set_id; 503febcac7bSBing Zhao rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop; 504*f2d43ff5SDariusz Sosnowski unlocked_attr.delay_drop_en = priv->config.hp_delay_drop; 505*f2d43ff5SDariusz Sosnowski unlocked_attr.hairpin_data_buffer_type = 506*f2d43ff5SDariusz Sosnowski MLX5_RQC_HAIRPIN_DATA_BUFFER_TYPE_UNLOCKED_INTERNAL_BUFFER; 507*f2d43ff5SDariusz Sosnowski if (rxq->hairpin_conf.use_locked_device_memory) { 508*f2d43ff5SDariusz Sosnowski /* 509*f2d43ff5SDariusz Sosnowski * It is assumed that configuration is verified against capabilities 510*f2d43ff5SDariusz Sosnowski * during queue setup. 511*f2d43ff5SDariusz Sosnowski */ 512*f2d43ff5SDariusz Sosnowski MLX5_ASSERT(hca_attr->hairpin_data_buffer_locked); 513*f2d43ff5SDariusz Sosnowski rte_memcpy(&locked_attr, &unlocked_attr, sizeof(locked_attr)); 514*f2d43ff5SDariusz Sosnowski locked_attr.hairpin_data_buffer_type = 515*f2d43ff5SDariusz Sosnowski MLX5_RQC_HAIRPIN_DATA_BUFFER_TYPE_LOCKED_INTERNAL_BUFFER; 516*f2d43ff5SDariusz Sosnowski tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &locked_attr, 517*f2d43ff5SDariusz Sosnowski rxq_ctrl->socket); 518*f2d43ff5SDariusz Sosnowski if (!tmpl->rq && rxq->hairpin_conf.force_memory) { 519*f2d43ff5SDariusz Sosnowski DRV_LOG(ERR, "Port %u Rx hairpin queue %u can't create RQ object" 520*f2d43ff5SDariusz Sosnowski " with locked memory buffer", 521*f2d43ff5SDariusz Sosnowski priv->dev_data->port_id, idx); 522*f2d43ff5SDariusz Sosnowski return -rte_errno; 523*f2d43ff5SDariusz Sosnowski } else if (!tmpl->rq && !rxq->hairpin_conf.force_memory) { 524*f2d43ff5SDariusz Sosnowski DRV_LOG(WARNING, "Port %u Rx hairpin queue %u can't create RQ object" 525*f2d43ff5SDariusz Sosnowski " with locked memory buffer. Falling back to unlocked" 526*f2d43ff5SDariusz Sosnowski " device memory.", 527*f2d43ff5SDariusz Sosnowski priv->dev_data->port_id, idx); 528*f2d43ff5SDariusz Sosnowski rte_errno = 0; 529*f2d43ff5SDariusz Sosnowski goto create_rq_unlocked; 530*f2d43ff5SDariusz Sosnowski } 531*f2d43ff5SDariusz Sosnowski goto create_rq_set_state; 532*f2d43ff5SDariusz Sosnowski } 533*f2d43ff5SDariusz Sosnowski 534*f2d43ff5SDariusz Sosnowski create_rq_unlocked: 535*f2d43ff5SDariusz Sosnowski tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &unlocked_attr, 5366deb19e1SMichael Baum rxq_ctrl->socket); 5376deb19e1SMichael Baum if (!tmpl->rq) { 5386deb19e1SMichael Baum DRV_LOG(ERR, 5396deb19e1SMichael Baum "Port %u Rx hairpin queue %u can't create rq object.", 5405ceb3a02SXueming Li priv->dev_data->port_id, idx); 5416deb19e1SMichael Baum rte_errno = errno; 5421260a87bSMichael Baum return -rte_errno; 5436deb19e1SMichael Baum } 544*f2d43ff5SDariusz Sosnowski create_rq_set_state: 5455ceb3a02SXueming Li priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 5461260a87bSMichael Baum return 0; 5476deb19e1SMichael Baum } 5486deb19e1SMichael Baum 5496deb19e1SMichael Baum /** 5506deb19e1SMichael Baum * Create the Rx queue DevX object. 5516deb19e1SMichael Baum * 5525ceb3a02SXueming Li * @param rxq 5535ceb3a02SXueming Li * Pointer to Rx queue. 5546deb19e1SMichael Baum * 5556deb19e1SMichael Baum * @return 5561260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 5576deb19e1SMichael Baum */ 5581260a87bSMichael Baum static int 5595ceb3a02SXueming Li mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) 5606deb19e1SMichael Baum { 5615ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 5625ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 5635ceb3a02SXueming Li struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; 5641260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 5656deb19e1SMichael Baum int ret = 0; 5666deb19e1SMichael Baum 5676deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 5681260a87bSMichael Baum MLX5_ASSERT(tmpl); 569c06f77aeSMichael Baum if (rxq_ctrl->is_hairpin) 5705ceb3a02SXueming Li return mlx5_rxq_obj_hairpin_new(rxq); 5716deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 57209c25553SXueming Li if (rxq_ctrl->irq && !rxq_ctrl->started) { 5736deb19e1SMichael Baum int devx_ev_flag = 5746deb19e1SMichael Baum MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 5756deb19e1SMichael Baum 57698174626STal Shnaiderman tmpl->devx_channel = mlx5_os_devx_create_event_channel 577ca1418ceSMichael Baum (priv->sh->cdev->ctx, 5786deb19e1SMichael Baum devx_ev_flag); 5796deb19e1SMichael Baum if (!tmpl->devx_channel) { 5806deb19e1SMichael Baum rte_errno = errno; 5816deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", 5826deb19e1SMichael Baum rte_errno); 5836deb19e1SMichael Baum goto error; 5846deb19e1SMichael Baum } 5856deb19e1SMichael Baum tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 5866deb19e1SMichael Baum } 5876deb19e1SMichael Baum /* Create CQ using DevX API. */ 5885ceb3a02SXueming Li ret = mlx5_rxq_create_devx_cq_resources(rxq); 5895cd33796SMichael Baum if (ret) { 5906deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create CQ."); 5916deb19e1SMichael Baum goto error; 5926deb19e1SMichael Baum } 593febcac7bSBing Zhao rxq_data->delay_drop = priv->config.std_delay_drop; 5946deb19e1SMichael Baum /* Create RQ using DevX API. */ 5955ceb3a02SXueming Li ret = mlx5_rxq_create_devx_rq_resources(rxq); 5966e0a3637SMichael Baum if (ret) { 5976deb19e1SMichael Baum DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 5985ceb3a02SXueming Li priv->dev_data->port_id, rxq->idx); 5996deb19e1SMichael Baum rte_errno = ENOMEM; 6006deb19e1SMichael Baum goto error; 6016deb19e1SMichael Baum } 6026deb19e1SMichael Baum /* Change queue state to ready. */ 6035ceb3a02SXueming Li ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); 6046deb19e1SMichael Baum if (ret) 6056deb19e1SMichael Baum goto error; 60609c25553SXueming Li if (!rxq_data->shared) { 6075ceb3a02SXueming Li rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf; 6085ceb3a02SXueming Li rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec; 60909c25553SXueming Li } else if (!rxq_ctrl->started) { 61009c25553SXueming Li rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf; 61109c25553SXueming Li rxq_data->rq_db = 61209c25553SXueming Li (uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec; 61309c25553SXueming Li } 61409c25553SXueming Li if (!rxq_ctrl->started) { 6156e0a3637SMichael Baum mlx5_rxq_initialize(rxq_data); 6165ceb3a02SXueming Li rxq_ctrl->wqn = rxq->devx_rq.rq->id; 61709c25553SXueming Li } 61809c25553SXueming Li priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED; 6191260a87bSMichael Baum return 0; 6206deb19e1SMichael Baum error: 6216deb19e1SMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 6225ceb3a02SXueming Li mlx5_rxq_devx_obj_release(rxq); 6231260a87bSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 6241260a87bSMichael Baum return -rte_errno; 6256deb19e1SMichael Baum } 6266deb19e1SMichael Baum 62787e2db37SMichael Baum /** 628fa7ad49eSAndrey Vesnovaty * Prepare RQT attribute structure for DevX RQT API. 629fa7ad49eSAndrey Vesnovaty * 630fa7ad49eSAndrey Vesnovaty * @param dev 631fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 632fa7ad49eSAndrey Vesnovaty * @param log_n 633fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 634bc5bee02SDmitry Kozlyuk * @param queues 635bc5bee02SDmitry Kozlyuk * List of RX queue indices or NULL, in which case 636bc5bee02SDmitry Kozlyuk * the attribute will be filled by drop queue ID. 637bc5bee02SDmitry Kozlyuk * @param queues_n 638bc5bee02SDmitry Kozlyuk * Size of @p queues array or 0 if it is NULL. 639fa7ad49eSAndrey Vesnovaty * @param ind_tbl 640fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 641fa7ad49eSAndrey Vesnovaty * 642fa7ad49eSAndrey Vesnovaty * @return 643fa7ad49eSAndrey Vesnovaty * The RQT attr object initialized, NULL otherwise and rte_errno is set. 644fa7ad49eSAndrey Vesnovaty */ 645fa7ad49eSAndrey Vesnovaty static struct mlx5_devx_rqt_attr * 646fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, 647fa7ad49eSAndrey Vesnovaty const unsigned int log_n, 648fa7ad49eSAndrey Vesnovaty const uint16_t *queues, 649fa7ad49eSAndrey Vesnovaty const uint32_t queues_n) 650fa7ad49eSAndrey Vesnovaty { 651fa7ad49eSAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 652fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 653fa7ad49eSAndrey Vesnovaty const unsigned int rqt_n = 1 << log_n; 654fa7ad49eSAndrey Vesnovaty unsigned int i, j; 655fa7ad49eSAndrey Vesnovaty 656fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 657fa7ad49eSAndrey Vesnovaty rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 658fa7ad49eSAndrey Vesnovaty if (!rqt_attr) { 659fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 660fa7ad49eSAndrey Vesnovaty dev->data->port_id); 661fa7ad49eSAndrey Vesnovaty rte_errno = ENOMEM; 662fa7ad49eSAndrey Vesnovaty return NULL; 663fa7ad49eSAndrey Vesnovaty } 66487af0d1eSMichael Baum rqt_attr->rqt_max_size = priv->sh->dev_cap.ind_table_max_size; 665fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_actual_size = rqt_n; 666bc5bee02SDmitry Kozlyuk if (queues == NULL) { 667bc5bee02SDmitry Kozlyuk for (i = 0; i < rqt_n; i++) 6685ceb3a02SXueming Li rqt_attr->rq_list[i] = 6695ceb3a02SXueming Li priv->drop_queue.rxq->devx_rq.rq->id; 670bc5bee02SDmitry Kozlyuk return rqt_attr; 671bc5bee02SDmitry Kozlyuk } 672fa7ad49eSAndrey Vesnovaty for (i = 0; i != queues_n; ++i) { 673311b17e6SMichael Baum if (mlx5_is_external_rxq(dev, queues[i])) { 674311b17e6SMichael Baum struct mlx5_external_rxq *ext_rxq = 675311b17e6SMichael Baum mlx5_ext_rxq_get(dev, queues[i]); 676311b17e6SMichael Baum 677311b17e6SMichael Baum rqt_attr->rq_list[i] = ext_rxq->hw_id; 678311b17e6SMichael Baum } else { 679311b17e6SMichael Baum struct mlx5_rxq_priv *rxq = 680311b17e6SMichael Baum mlx5_rxq_get(dev, queues[i]); 681fa7ad49eSAndrey Vesnovaty 6825ceb3a02SXueming Li MLX5_ASSERT(rxq != NULL); 683c06f77aeSMichael Baum if (rxq->ctrl->is_hairpin) 68409c25553SXueming Li rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id; 68509c25553SXueming Li else 6865ceb3a02SXueming Li rqt_attr->rq_list[i] = rxq->devx_rq.rq->id; 687fa7ad49eSAndrey Vesnovaty } 688311b17e6SMichael Baum } 689fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(i > 0); 690fa7ad49eSAndrey Vesnovaty for (j = 0; i != rqt_n; ++j, ++i) 691fa7ad49eSAndrey Vesnovaty rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 692fa7ad49eSAndrey Vesnovaty return rqt_attr; 693fa7ad49eSAndrey Vesnovaty } 694fa7ad49eSAndrey Vesnovaty 695fa7ad49eSAndrey Vesnovaty /** 69625ae7f1aSMichael Baum * Create RQT using DevX API as a filed of indirection table. 69787e2db37SMichael Baum * 69887e2db37SMichael Baum * @param dev 69987e2db37SMichael Baum * Pointer to Ethernet device. 70025ae7f1aSMichael Baum * @param log_n 70125ae7f1aSMichael Baum * Log of number of queues in the array. 70225ae7f1aSMichael Baum * @param ind_tbl 70325ae7f1aSMichael Baum * DevX indirection table object. 70487e2db37SMichael Baum * 70587e2db37SMichael Baum * @return 70625ae7f1aSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 70787e2db37SMichael Baum */ 70825ae7f1aSMichael Baum static int 70925ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 71025ae7f1aSMichael Baum struct mlx5_ind_table_obj *ind_tbl) 71187e2db37SMichael Baum { 71287e2db37SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 71387e2db37SMichael Baum struct mlx5_devx_rqt_attr *rqt_attr = NULL; 714bc5bee02SDmitry Kozlyuk const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues : 715bc5bee02SDmitry Kozlyuk NULL; 71687e2db37SMichael Baum 71725ae7f1aSMichael Baum MLX5_ASSERT(ind_tbl); 718bc5bee02SDmitry Kozlyuk rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues, 719fa7ad49eSAndrey Vesnovaty ind_tbl->queues_n); 720fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 72125ae7f1aSMichael Baum return -rte_errno; 722ca1418ceSMichael Baum ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr); 72387e2db37SMichael Baum mlx5_free(rqt_attr); 72487e2db37SMichael Baum if (!ind_tbl->rqt) { 72587e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 72687e2db37SMichael Baum dev->data->port_id); 72787e2db37SMichael Baum rte_errno = errno; 72825ae7f1aSMichael Baum return -rte_errno; 72987e2db37SMichael Baum } 73025ae7f1aSMichael Baum return 0; 73187e2db37SMichael Baum } 73287e2db37SMichael Baum 73387e2db37SMichael Baum /** 734fa7ad49eSAndrey Vesnovaty * Modify RQT using DevX API as a filed of indirection table. 735fa7ad49eSAndrey Vesnovaty * 736fa7ad49eSAndrey Vesnovaty * @param dev 737fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 738fa7ad49eSAndrey Vesnovaty * @param log_n 739fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 740fa7ad49eSAndrey Vesnovaty * @param ind_tbl 741fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 742fa7ad49eSAndrey Vesnovaty * 743fa7ad49eSAndrey Vesnovaty * @return 744fa7ad49eSAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 745fa7ad49eSAndrey Vesnovaty */ 746fa7ad49eSAndrey Vesnovaty static int 747fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n, 748fa7ad49eSAndrey Vesnovaty const uint16_t *queues, const uint32_t queues_n, 749fa7ad49eSAndrey Vesnovaty struct mlx5_ind_table_obj *ind_tbl) 750fa7ad49eSAndrey Vesnovaty { 751fa7ad49eSAndrey Vesnovaty int ret = 0; 752fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 753fa7ad49eSAndrey Vesnovaty 754fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(ind_tbl); 755fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 756fa7ad49eSAndrey Vesnovaty queues, 757fa7ad49eSAndrey Vesnovaty queues_n); 758fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 759fa7ad49eSAndrey Vesnovaty return -rte_errno; 760fa7ad49eSAndrey Vesnovaty ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr); 761fa7ad49eSAndrey Vesnovaty mlx5_free(rqt_attr); 762fa7ad49eSAndrey Vesnovaty if (ret) 763fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot modify DevX RQT.", 764fa7ad49eSAndrey Vesnovaty dev->data->port_id); 765fa7ad49eSAndrey Vesnovaty return ret; 766fa7ad49eSAndrey Vesnovaty } 767fa7ad49eSAndrey Vesnovaty 768fa7ad49eSAndrey Vesnovaty /** 76987e2db37SMichael Baum * Destroy the DevX RQT object. 77087e2db37SMichael Baum * 77187e2db37SMichael Baum * @param ind_table 77287e2db37SMichael Baum * Indirection table to release. 77387e2db37SMichael Baum */ 77487e2db37SMichael Baum static void 77525ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 77687e2db37SMichael Baum { 77787e2db37SMichael Baum claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 77887e2db37SMichael Baum } 77987e2db37SMichael Baum 78085552726SMichael Baum /** 781b8cc58c1SAndrey Vesnovaty * Set TIR attribute struct with relevant input values. 78285552726SMichael Baum * 783b8cc58c1SAndrey Vesnovaty * @param[in] dev 78485552726SMichael Baum * Pointer to Ethernet device. 785b8cc58c1SAndrey Vesnovaty * @param[in] rss_key 786b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 787b8cc58c1SAndrey Vesnovaty * @param[in] hash_fields 788b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 789b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 790bc5bee02SDmitry Kozlyuk * Indirection table for TIR. If table queues array is NULL, 791bc5bee02SDmitry Kozlyuk * a TIR for drop queue is assumed. 792b8cc58c1SAndrey Vesnovaty * @param[in] tunnel 79385552726SMichael Baum * Tunnel type. 794b8cc58c1SAndrey Vesnovaty * @param[out] tir_attr 795b8cc58c1SAndrey Vesnovaty * Parameters structure for TIR creation/modification. 79685552726SMichael Baum * 79785552726SMichael Baum * @return 798b8cc58c1SAndrey Vesnovaty * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. 79985552726SMichael Baum */ 800b8cc58c1SAndrey Vesnovaty static void 801b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, 802b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 803b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl, 804b8cc58c1SAndrey Vesnovaty int tunnel, struct mlx5_devx_tir_attr *tir_attr) 80585552726SMichael Baum { 80685552726SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 807c06f77aeSMichael Baum bool is_hairpin; 8087a993368SMichael Baum bool lro = false; 8095a959cbfSMichael Baum uint32_t i; 81085552726SMichael Baum 811bc5bee02SDmitry Kozlyuk /* NULL queues designate drop queue. */ 812311b17e6SMichael Baum if (ind_tbl->queues == NULL) { 813311b17e6SMichael Baum is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin; 814311b17e6SMichael Baum } else if (mlx5_is_external_rxq(dev, ind_tbl->queues[0])) { 815311b17e6SMichael Baum /* External RxQ supports neither Hairpin nor LRO. */ 816311b17e6SMichael Baum is_hairpin = false; 817311b17e6SMichael Baum } else { 818c06f77aeSMichael Baum is_hairpin = mlx5_rxq_is_hairpin(dev, ind_tbl->queues[0]); 8197a993368SMichael Baum lro = true; 82085552726SMichael Baum /* Enable TIR LRO only if all the queues were configured for. */ 8215a959cbfSMichael Baum for (i = 0; i < ind_tbl->queues_n; ++i) { 8225cf0707fSXueming Li struct mlx5_rxq_data *rxq_i = 8235cf0707fSXueming Li mlx5_rxq_data_get(dev, ind_tbl->queues[i]); 8245cf0707fSXueming Li 8255cf0707fSXueming Li if (rxq_i != NULL && !rxq_i->lro) { 82685552726SMichael Baum lro = false; 82785552726SMichael Baum break; 82885552726SMichael Baum } 82985552726SMichael Baum } 830bc5bee02SDmitry Kozlyuk } 831b8cc58c1SAndrey Vesnovaty memset(tir_attr, 0, sizeof(*tir_attr)); 832b8cc58c1SAndrey Vesnovaty tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 833b8cc58c1SAndrey Vesnovaty tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 834b8cc58c1SAndrey Vesnovaty tir_attr->tunneled_offload_en = !!tunnel; 83585552726SMichael Baum /* If needed, translate hash_fields bitmap to PRM format. */ 83685552726SMichael Baum if (hash_fields) { 837b8cc58c1SAndrey Vesnovaty struct mlx5_rx_hash_field_select *rx_hash_field_select = 83885552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 839b8cc58c1SAndrey Vesnovaty hash_fields & IBV_RX_HASH_INNER ? 840b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_inner : 84185552726SMichael Baum #endif 842b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_outer; 84385552726SMichael Baum /* 1 bit: 0: IPv4, 1: IPv6. */ 84485552726SMichael Baum rx_hash_field_select->l3_prot_type = 84585552726SMichael Baum !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 84685552726SMichael Baum /* 1 bit: 0: TCP, 1: UDP. */ 84785552726SMichael Baum rx_hash_field_select->l4_prot_type = 84885552726SMichael Baum !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 84985552726SMichael Baum /* Bitmask which sets which fields to use in RX Hash. */ 85085552726SMichael Baum rx_hash_field_select->selected_fields = 85185552726SMichael Baum ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 85285552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 85385552726SMichael Baum (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 85485552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 85585552726SMichael Baum (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 85685552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 85785552726SMichael Baum (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 85818ca4a4eSRaja Zidane MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT | 85918ca4a4eSRaja Zidane (!!(hash_fields & IBV_RX_HASH_IPSEC_SPI)) << 86018ca4a4eSRaja Zidane MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI; 86185552726SMichael Baum } 862c06f77aeSMichael Baum if (is_hairpin) 863b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->td->id; 86485552726SMichael Baum else 865b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->tdn; 866b8cc58c1SAndrey Vesnovaty memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 867b8cc58c1SAndrey Vesnovaty tir_attr->indirect_table = ind_tbl->rqt->id; 86885552726SMichael Baum if (dev->data->dev_conf.lpbk_mode) 86945a6df80SMichael Baum tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 87085552726SMichael Baum if (lro) { 871593f913aSMichael Baum MLX5_ASSERT(priv->sh->config.lro_allowed); 87287af0d1eSMichael Baum tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout; 873b8cc58c1SAndrey Vesnovaty tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; 874b8cc58c1SAndrey Vesnovaty tir_attr->lro_enable_mask = 875b8cc58c1SAndrey Vesnovaty MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 87685552726SMichael Baum MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 87785552726SMichael Baum } 878b8cc58c1SAndrey Vesnovaty } 879b8cc58c1SAndrey Vesnovaty 880b8cc58c1SAndrey Vesnovaty /** 881b8cc58c1SAndrey Vesnovaty * Create an Rx Hash queue. 882b8cc58c1SAndrey Vesnovaty * 883b8cc58c1SAndrey Vesnovaty * @param dev 884b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 885b8cc58c1SAndrey Vesnovaty * @param hrxq 886b8cc58c1SAndrey Vesnovaty * Pointer to Rx Hash queue. 887b8cc58c1SAndrey Vesnovaty * @param tunnel 888b8cc58c1SAndrey Vesnovaty * Tunnel type. 889b8cc58c1SAndrey Vesnovaty * 890b8cc58c1SAndrey Vesnovaty * @return 891b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 892b8cc58c1SAndrey Vesnovaty */ 893b8cc58c1SAndrey Vesnovaty static int 894b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 895b8cc58c1SAndrey Vesnovaty int tunnel __rte_unused) 896b8cc58c1SAndrey Vesnovaty { 897b8cc58c1SAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 898b8cc58c1SAndrey Vesnovaty struct mlx5_devx_tir_attr tir_attr = {0}; 899b8cc58c1SAndrey Vesnovaty int err; 900b8cc58c1SAndrey Vesnovaty 901b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields, 902b8cc58c1SAndrey Vesnovaty hrxq->ind_table, tunnel, &tir_attr); 903ca1418ceSMichael Baum hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr); 9045a959cbfSMichael Baum if (!hrxq->tir) { 90585552726SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 90685552726SMichael Baum dev->data->port_id); 90785552726SMichael Baum rte_errno = errno; 90885552726SMichael Baum goto error; 90985552726SMichael Baum } 910f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 9113a2f674bSSuanming Mou if (hrxq->hws_flags) { 9123a2f674bSSuanming Mou hrxq->action = mlx5dr_action_create_dest_tir 9133a2f674bSSuanming Mou (priv->dr_ctx, 9143a2f674bSSuanming Mou (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags); 9153a2f674bSSuanming Mou if (!hrxq->action) 9163a2f674bSSuanming Mou goto error; 9173a2f674bSSuanming Mou return 0; 9183a2f674bSSuanming Mou } 91988019723SOphir Munk if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir, 92088019723SOphir Munk &hrxq->action)) { 92185552726SMichael Baum rte_errno = errno; 92285552726SMichael Baum goto error; 92385552726SMichael Baum } 92485552726SMichael Baum #endif 9255a959cbfSMichael Baum return 0; 92685552726SMichael Baum error: 92785552726SMichael Baum err = rte_errno; /* Save rte_errno before cleanup. */ 9285a959cbfSMichael Baum if (hrxq->tir) 9295a959cbfSMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 93085552726SMichael Baum rte_errno = err; /* Restore rte_errno. */ 9315a959cbfSMichael Baum return -rte_errno; 93285552726SMichael Baum } 93385552726SMichael Baum 93485552726SMichael Baum /** 93585552726SMichael Baum * Destroy a DevX TIR object. 93685552726SMichael Baum * 93785552726SMichael Baum * @param hrxq 93885552726SMichael Baum * Hash Rx queue to release its tir. 93985552726SMichael Baum */ 94085552726SMichael Baum static void 94185552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 94285552726SMichael Baum { 94385552726SMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 94485552726SMichael Baum } 94585552726SMichael Baum 9465eaf882eSMichael Baum /** 947b8cc58c1SAndrey Vesnovaty * Modify an Rx Hash queue configuration. 948b8cc58c1SAndrey Vesnovaty * 949b8cc58c1SAndrey Vesnovaty * @param dev 950b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 951b8cc58c1SAndrey Vesnovaty * @param hrxq 952b8cc58c1SAndrey Vesnovaty * Hash Rx queue to modify. 953b8cc58c1SAndrey Vesnovaty * @param rss_key 954b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 955b8cc58c1SAndrey Vesnovaty * @param hash_fields 956b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 957b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 958b8cc58c1SAndrey Vesnovaty * Indirection table for TIR. 959b8cc58c1SAndrey Vesnovaty * 960b8cc58c1SAndrey Vesnovaty * @return 961b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 962b8cc58c1SAndrey Vesnovaty */ 963b8cc58c1SAndrey Vesnovaty static int 964b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 965b8cc58c1SAndrey Vesnovaty const uint8_t *rss_key, 966b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 967b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl) 968b8cc58c1SAndrey Vesnovaty { 969b8cc58c1SAndrey Vesnovaty struct mlx5_devx_modify_tir_attr modify_tir = {0}; 970b8cc58c1SAndrey Vesnovaty 971b8cc58c1SAndrey Vesnovaty /* 972b8cc58c1SAndrey Vesnovaty * untested for modification fields: 973b8cc58c1SAndrey Vesnovaty * - rx_hash_symmetric not set in hrxq_new(), 974b8cc58c1SAndrey Vesnovaty * - rx_hash_fn set hard-coded in hrxq_new(), 975b8cc58c1SAndrey Vesnovaty * - lro_xxx not set after rxq setup 976b8cc58c1SAndrey Vesnovaty */ 977b8cc58c1SAndrey Vesnovaty if (ind_tbl != hrxq->ind_table) 978b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 979b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE; 980b8cc58c1SAndrey Vesnovaty if (hash_fields != hrxq->hash_fields || 981b8cc58c1SAndrey Vesnovaty memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN)) 982b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 983b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH; 984b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl, 985b8cc58c1SAndrey Vesnovaty 0, /* N/A - tunnel modification unsupported */ 986b8cc58c1SAndrey Vesnovaty &modify_tir.tir); 987b8cc58c1SAndrey Vesnovaty modify_tir.tirn = hrxq->tir->id; 988b8cc58c1SAndrey Vesnovaty if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) { 989b8cc58c1SAndrey Vesnovaty DRV_LOG(ERR, "port %u cannot modify DevX TIR", 990b8cc58c1SAndrey Vesnovaty dev->data->port_id); 991b8cc58c1SAndrey Vesnovaty rte_errno = errno; 992b8cc58c1SAndrey Vesnovaty return -rte_errno; 993b8cc58c1SAndrey Vesnovaty } 994b8cc58c1SAndrey Vesnovaty return 0; 995b8cc58c1SAndrey Vesnovaty } 996b8cc58c1SAndrey Vesnovaty 997b8cc58c1SAndrey Vesnovaty /** 998bc5bee02SDmitry Kozlyuk * Create a DevX drop Rx queue. 9995eaf882eSMichael Baum * 10005eaf882eSMichael Baum * @param dev 10015eaf882eSMichael Baum * Pointer to Ethernet device. 10025eaf882eSMichael Baum * 10035eaf882eSMichael Baum * @return 10040c762e81SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 10055eaf882eSMichael Baum */ 10060c762e81SMichael Baum static int 1007bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) 10085eaf882eSMichael Baum { 1009bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1010bc5bee02SDmitry Kozlyuk int socket_id = dev->device->numa_node; 10115ceb3a02SXueming Li struct mlx5_rxq_priv *rxq; 10125ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = NULL; 10135ceb3a02SXueming Li struct mlx5_rxq_obj *rxq_obj = NULL; 1014bc5bee02SDmitry Kozlyuk int ret; 1015bc5bee02SDmitry Kozlyuk 1016bc5bee02SDmitry Kozlyuk /* 1017bc5bee02SDmitry Kozlyuk * Initialize dummy control structures. 1018bc5bee02SDmitry Kozlyuk * They are required to hold pointers for cleanup 1019bc5bee02SDmitry Kozlyuk * and are only accessible via drop queue DevX objects. 1020bc5bee02SDmitry Kozlyuk */ 10215ceb3a02SXueming Li rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id); 10225ceb3a02SXueming Li if (rxq == NULL) { 10235ceb3a02SXueming Li DRV_LOG(ERR, "Port %u could not allocate drop queue private", 10245ceb3a02SXueming Li dev->data->port_id); 10255ceb3a02SXueming Li rte_errno = ENOMEM; 10265ceb3a02SXueming Li goto error; 10275ceb3a02SXueming Li } 1028bc5bee02SDmitry Kozlyuk rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 1029bc5bee02SDmitry Kozlyuk 0, socket_id); 1030bc5bee02SDmitry Kozlyuk if (rxq_ctrl == NULL) { 1031bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u could not allocate drop queue control", 1032bc5bee02SDmitry Kozlyuk dev->data->port_id); 1033bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 1034bc5bee02SDmitry Kozlyuk goto error; 1035bc5bee02SDmitry Kozlyuk } 10365ceb3a02SXueming Li rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id); 10375ceb3a02SXueming Li if (rxq_obj == NULL) { 1038bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u could not allocate drop queue object", 1039bc5bee02SDmitry Kozlyuk dev->data->port_id); 1040bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 1041bc5bee02SDmitry Kozlyuk goto error; 1042bc5bee02SDmitry Kozlyuk } 10439011af71SThinh Tran /* set the CPU socket ID where the rxq_ctrl was allocated */ 10449011af71SThinh Tran rxq_ctrl->socket = socket_id; 10455ceb3a02SXueming Li rxq_obj->rxq_ctrl = rxq_ctrl; 1046c06f77aeSMichael Baum rxq_ctrl->is_hairpin = false; 10475db77fefSXueming Li rxq_ctrl->sh = priv->sh; 10485ceb3a02SXueming Li rxq_ctrl->obj = rxq_obj; 10495ceb3a02SXueming Li rxq->ctrl = rxq_ctrl; 10505ceb3a02SXueming Li rxq->priv = priv; 10515ceb3a02SXueming Li LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); 1052bc5bee02SDmitry Kozlyuk /* Create CQ using DevX API. */ 10535ceb3a02SXueming Li ret = mlx5_rxq_create_devx_cq_resources(rxq); 1054bc5bee02SDmitry Kozlyuk if (ret != 0) { 1055bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u drop queue CQ creation failed.", 1056bc5bee02SDmitry Kozlyuk dev->data->port_id); 1057bc5bee02SDmitry Kozlyuk goto error; 1058bc5bee02SDmitry Kozlyuk } 1059febcac7bSBing Zhao rxq_ctrl->rxq.delay_drop = 0; 1060bc5bee02SDmitry Kozlyuk /* Create RQ using DevX API. */ 10615ceb3a02SXueming Li ret = mlx5_rxq_create_devx_rq_resources(rxq); 1062bc5bee02SDmitry Kozlyuk if (ret != 0) { 1063bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u drop queue RQ creation failed.", 1064bc5bee02SDmitry Kozlyuk dev->data->port_id); 1065bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 1066bc5bee02SDmitry Kozlyuk goto error; 1067bc5bee02SDmitry Kozlyuk } 1068bc5bee02SDmitry Kozlyuk /* Change queue state to ready. */ 1069bc5bee02SDmitry Kozlyuk ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); 1070bc5bee02SDmitry Kozlyuk if (ret != 0) 1071bc5bee02SDmitry Kozlyuk goto error; 1072bc5bee02SDmitry Kozlyuk /* Initialize drop queue. */ 1073bc5bee02SDmitry Kozlyuk priv->drop_queue.rxq = rxq; 1074bc5bee02SDmitry Kozlyuk return 0; 1075bc5bee02SDmitry Kozlyuk error: 1076bc5bee02SDmitry Kozlyuk ret = rte_errno; /* Save rte_errno before cleanup. */ 10775ceb3a02SXueming Li if (rxq != NULL && rxq->devx_rq.rq != NULL) 10785ceb3a02SXueming Li mlx5_devx_rq_destroy(&rxq->devx_rq); 10795ceb3a02SXueming Li if (rxq_obj != NULL) { 10805ceb3a02SXueming Li if (rxq_obj->cq_obj.cq != NULL) 10815ceb3a02SXueming Li mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 10825ceb3a02SXueming Li if (rxq_obj->devx_channel) 1083bc5bee02SDmitry Kozlyuk mlx5_os_devx_destroy_event_channel 10845ceb3a02SXueming Li (rxq_obj->devx_channel); 10855ceb3a02SXueming Li mlx5_free(rxq_obj); 1086bc5bee02SDmitry Kozlyuk } 1087bc5bee02SDmitry Kozlyuk if (rxq_ctrl != NULL) 1088bc5bee02SDmitry Kozlyuk mlx5_free(rxq_ctrl); 10895ceb3a02SXueming Li if (rxq != NULL) 10905ceb3a02SXueming Li mlx5_free(rxq); 1091bc5bee02SDmitry Kozlyuk rte_errno = ret; /* Restore rte_errno. */ 10920c762e81SMichael Baum return -rte_errno; 10935eaf882eSMichael Baum } 10945eaf882eSMichael Baum 10955eaf882eSMichael Baum /** 1096bc5bee02SDmitry Kozlyuk * Release drop Rx queue resources. 1097bc5bee02SDmitry Kozlyuk * 1098bc5bee02SDmitry Kozlyuk * @param dev 1099bc5bee02SDmitry Kozlyuk * Pointer to Ethernet device. 1100bc5bee02SDmitry Kozlyuk */ 1101bc5bee02SDmitry Kozlyuk static void 1102bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev) 1103bc5bee02SDmitry Kozlyuk { 1104bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 11055ceb3a02SXueming Li struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq; 11065ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 1107bc5bee02SDmitry Kozlyuk 1108bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_release(rxq); 11095ceb3a02SXueming Li mlx5_free(rxq_ctrl->obj); 1110bc5bee02SDmitry Kozlyuk mlx5_free(rxq_ctrl); 11115ceb3a02SXueming Li mlx5_free(rxq); 1112bc5bee02SDmitry Kozlyuk priv->drop_queue.rxq = NULL; 1113bc5bee02SDmitry Kozlyuk } 1114bc5bee02SDmitry Kozlyuk 1115bc5bee02SDmitry Kozlyuk /** 11165eaf882eSMichael Baum * Release a drop hash Rx queue. 11175eaf882eSMichael Baum * 11185eaf882eSMichael Baum * @param dev 11195eaf882eSMichael Baum * Pointer to Ethernet device. 11205eaf882eSMichael Baum */ 11215eaf882eSMichael Baum static void 11220c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 11235eaf882eSMichael Baum { 1124bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1125bc5bee02SDmitry Kozlyuk struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 1126bc5bee02SDmitry Kozlyuk 1127bc5bee02SDmitry Kozlyuk if (hrxq->tir != NULL) 1128bc5bee02SDmitry Kozlyuk mlx5_devx_tir_destroy(hrxq); 1129bc5bee02SDmitry Kozlyuk if (hrxq->ind_table->ind_table != NULL) 1130bc5bee02SDmitry Kozlyuk mlx5_devx_ind_table_destroy(hrxq->ind_table); 11315ceb3a02SXueming Li if (priv->drop_queue.rxq->devx_rq.rq != NULL) 1132bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_release(dev); 1133bc5bee02SDmitry Kozlyuk } 1134bc5bee02SDmitry Kozlyuk 1135bc5bee02SDmitry Kozlyuk /** 1136bc5bee02SDmitry Kozlyuk * Create a DevX drop action for Rx Hash queue. 1137bc5bee02SDmitry Kozlyuk * 1138bc5bee02SDmitry Kozlyuk * @param dev 1139bc5bee02SDmitry Kozlyuk * Pointer to Ethernet device. 1140bc5bee02SDmitry Kozlyuk * 1141bc5bee02SDmitry Kozlyuk * @return 1142bc5bee02SDmitry Kozlyuk * 0 on success, a negative errno value otherwise and rte_errno is set. 1143bc5bee02SDmitry Kozlyuk */ 1144bc5bee02SDmitry Kozlyuk static int 1145bc5bee02SDmitry Kozlyuk mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 1146bc5bee02SDmitry Kozlyuk { 1147bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1148bc5bee02SDmitry Kozlyuk struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 1149bc5bee02SDmitry Kozlyuk int ret; 1150bc5bee02SDmitry Kozlyuk 1151bc5bee02SDmitry Kozlyuk ret = mlx5_rxq_devx_obj_drop_create(dev); 1152bc5bee02SDmitry Kozlyuk if (ret != 0) { 1153bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop RX queue"); 1154bc5bee02SDmitry Kozlyuk return ret; 1155bc5bee02SDmitry Kozlyuk } 11563a2f674bSSuanming Mou if (priv->sh->config.dv_flow_en == 2) 11573a2f674bSSuanming Mou return 0; 1158bc5bee02SDmitry Kozlyuk /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */ 1159bc5bee02SDmitry Kozlyuk ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table); 1160bc5bee02SDmitry Kozlyuk if (ret != 0) { 1161bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table"); 1162bc5bee02SDmitry Kozlyuk goto error; 1163bc5bee02SDmitry Kozlyuk } 1164bc5bee02SDmitry Kozlyuk ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false); 1165bc5bee02SDmitry Kozlyuk if (ret != 0) { 1166bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop hash RX queue"); 1167bc5bee02SDmitry Kozlyuk goto error; 1168bc5bee02SDmitry Kozlyuk } 1169bc5bee02SDmitry Kozlyuk return 0; 1170bc5bee02SDmitry Kozlyuk error: 1171bc5bee02SDmitry Kozlyuk mlx5_devx_drop_action_destroy(dev); 1172bc5bee02SDmitry Kozlyuk return ret; 11735eaf882eSMichael Baum } 11745eaf882eSMichael Baum 117586d259ceSMichael Baum /** 1176a89f6433SRongwei Liu * Select TXQ TIS number. 1177a89f6433SRongwei Liu * 1178a89f6433SRongwei Liu * @param dev 1179a89f6433SRongwei Liu * Pointer to Ethernet device. 1180a89f6433SRongwei Liu * @param queue_idx 1181a89f6433SRongwei Liu * Queue index in DPDK Tx queue array. 1182a89f6433SRongwei Liu * 1183a89f6433SRongwei Liu * @return 1184a89f6433SRongwei Liu * > 0 on success, a negative errno value otherwise. 1185a89f6433SRongwei Liu */ 1186a89f6433SRongwei Liu static uint32_t 1187a89f6433SRongwei Liu mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx) 1188a89f6433SRongwei Liu { 1189a89f6433SRongwei Liu struct mlx5_priv *priv = dev->data->dev_private; 1190a89f6433SRongwei Liu int tis_idx; 1191a89f6433SRongwei Liu 1192a89f6433SRongwei Liu if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode == 1193a89f6433SRongwei Liu MLX5_LAG_MODE_TIS) { 1194a89f6433SRongwei Liu tis_idx = (priv->lag_affinity_idx + queue_idx) % 1195a89f6433SRongwei Liu priv->sh->bond.n_port; 1196a89f6433SRongwei Liu DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.", 1197a89f6433SRongwei Liu dev->data->port_id, queue_idx, tis_idx + 1, 1198a89f6433SRongwei Liu priv->sh->lag.tx_remap_affinity[tis_idx]); 1199a89f6433SRongwei Liu } else { 1200a89f6433SRongwei Liu tis_idx = 0; 1201a89f6433SRongwei Liu } 1202a89f6433SRongwei Liu MLX5_ASSERT(priv->sh->tis[tis_idx]); 1203a89f6433SRongwei Liu return priv->sh->tis[tis_idx]->id; 1204a89f6433SRongwei Liu } 1205a89f6433SRongwei Liu 1206a89f6433SRongwei Liu /** 120786d259ceSMichael Baum * Create the Tx hairpin queue object. 120886d259ceSMichael Baum * 120986d259ceSMichael Baum * @param dev 121086d259ceSMichael Baum * Pointer to Ethernet device. 121186d259ceSMichael Baum * @param idx 121286d259ceSMichael Baum * Queue index in DPDK Tx queue array. 121386d259ceSMichael Baum * 121486d259ceSMichael Baum * @return 1215f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 121686d259ceSMichael Baum */ 1217f49f4483SMichael Baum static int 121886d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 121986d259ceSMichael Baum { 122086d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 12217274b417SDariusz Sosnowski struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr; 122286d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 122386d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 122486d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 12257274b417SDariusz Sosnowski struct mlx5_devx_create_sq_attr dev_mem_attr = { 0 }; 12267274b417SDariusz Sosnowski struct mlx5_devx_create_sq_attr host_mem_attr = { 0 }; 1227f49f4483SMichael Baum struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 12287274b417SDariusz Sosnowski void *umem_buf = NULL; 12297274b417SDariusz Sosnowski void *umem_obj = NULL; 123086d259ceSMichael Baum uint32_t max_wq_data; 123186d259ceSMichael Baum 123286d259ceSMichael Baum MLX5_ASSERT(txq_data); 1233f49f4483SMichael Baum MLX5_ASSERT(tmpl); 123486d259ceSMichael Baum tmpl->txq_ctrl = txq_ctrl; 12357274b417SDariusz Sosnowski dev_mem_attr.hairpin = 1; 12367274b417SDariusz Sosnowski dev_mem_attr.tis_lst_sz = 1; 12377274b417SDariusz Sosnowski dev_mem_attr.tis_num = mlx5_get_txq_tis_num(dev, idx); 123853820561SMichael Baum max_wq_data = 123953820561SMichael Baum priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz; 124086d259ceSMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 124186d259ceSMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 124286d259ceSMichael Baum if (priv->config.log_hp_size > max_wq_data) { 124386d259ceSMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 124486d259ceSMichael Baum "too large for hairpin.", 124586d259ceSMichael Baum priv->config.log_hp_size); 124686d259ceSMichael Baum rte_errno = ERANGE; 1247f49f4483SMichael Baum return -rte_errno; 124886d259ceSMichael Baum } 12497274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 125086d259ceSMichael Baum } else { 12517274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_data_sz = 125286d259ceSMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 125386d259ceSMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 125486d259ceSMichael Baum } 125586d259ceSMichael Baum /* Set the packets number to the maximum value for performance. */ 12567274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_num_packets = 12577274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_data_sz - 125886d259ceSMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 12597274b417SDariusz Sosnowski dev_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_INTERNAL_BUFFER; 12607274b417SDariusz Sosnowski if (txq_ctrl->hairpin_conf.use_rte_memory) { 12617274b417SDariusz Sosnowski uint32_t umem_size; 12627274b417SDariusz Sosnowski uint32_t umem_dbrec; 12637274b417SDariusz Sosnowski size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 1264a89f6433SRongwei Liu 12657274b417SDariusz Sosnowski if (alignment == (size_t)-1) { 12667274b417SDariusz Sosnowski DRV_LOG(ERR, "Failed to get WQE buf alignment."); 12677274b417SDariusz Sosnowski rte_errno = ENOMEM; 12687274b417SDariusz Sosnowski return -rte_errno; 12697274b417SDariusz Sosnowski } 12707274b417SDariusz Sosnowski /* 12717274b417SDariusz Sosnowski * It is assumed that configuration is verified against capabilities 12727274b417SDariusz Sosnowski * during queue setup. 12737274b417SDariusz Sosnowski */ 12747274b417SDariusz Sosnowski MLX5_ASSERT(hca_attr->hairpin_sq_wq_in_host_mem); 12757274b417SDariusz Sosnowski MLX5_ASSERT(hca_attr->hairpin_sq_wqe_bb_size > 0); 12767274b417SDariusz Sosnowski rte_memcpy(&host_mem_attr, &dev_mem_attr, sizeof(host_mem_attr)); 12777274b417SDariusz Sosnowski umem_size = MLX5_WQE_SIZE * 12787274b417SDariusz Sosnowski RTE_BIT32(host_mem_attr.wq_attr.log_hairpin_num_packets); 12797274b417SDariusz Sosnowski umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 12807274b417SDariusz Sosnowski umem_size += MLX5_DBR_SIZE; 12817274b417SDariusz Sosnowski umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 12827274b417SDariusz Sosnowski alignment, priv->sh->numa_node); 12837274b417SDariusz Sosnowski if (umem_buf == NULL && txq_ctrl->hairpin_conf.force_memory) { 12847274b417SDariusz Sosnowski DRV_LOG(ERR, "Failed to allocate memory for hairpin TX queue"); 12857274b417SDariusz Sosnowski rte_errno = ENOMEM; 12867274b417SDariusz Sosnowski return -rte_errno; 12877274b417SDariusz Sosnowski } else if (umem_buf == NULL && !txq_ctrl->hairpin_conf.force_memory) { 12887274b417SDariusz Sosnowski DRV_LOG(WARNING, "Failed to allocate memory for hairpin TX queue." 12897274b417SDariusz Sosnowski " Falling back to TX queue located on the device."); 12907274b417SDariusz Sosnowski goto create_sq_on_device; 12917274b417SDariusz Sosnowski } 12927274b417SDariusz Sosnowski umem_obj = mlx5_os_umem_reg(priv->sh->cdev->ctx, 12937274b417SDariusz Sosnowski (void *)(uintptr_t)umem_buf, 12947274b417SDariusz Sosnowski umem_size, 12957274b417SDariusz Sosnowski IBV_ACCESS_LOCAL_WRITE); 12967274b417SDariusz Sosnowski if (umem_obj == NULL && txq_ctrl->hairpin_conf.force_memory) { 12977274b417SDariusz Sosnowski DRV_LOG(ERR, "Failed to register UMEM for hairpin TX queue"); 12987274b417SDariusz Sosnowski mlx5_free(umem_buf); 12997274b417SDariusz Sosnowski return -rte_errno; 13007274b417SDariusz Sosnowski } else if (umem_obj == NULL && !txq_ctrl->hairpin_conf.force_memory) { 13017274b417SDariusz Sosnowski DRV_LOG(WARNING, "Failed to register UMEM for hairpin TX queue." 13027274b417SDariusz Sosnowski " Falling back to TX queue located on the device."); 13037274b417SDariusz Sosnowski rte_errno = 0; 13047274b417SDariusz Sosnowski mlx5_free(umem_buf); 13057274b417SDariusz Sosnowski goto create_sq_on_device; 13067274b417SDariusz Sosnowski } 13077274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 13087274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_umem_valid = 1; 13097274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj); 13107274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_umem_offset = 0; 13117274b417SDariusz Sosnowski host_mem_attr.wq_attr.dbr_umem_valid = 1; 13127274b417SDariusz Sosnowski host_mem_attr.wq_attr.dbr_umem_id = host_mem_attr.wq_attr.wq_umem_id; 13137274b417SDariusz Sosnowski host_mem_attr.wq_attr.dbr_addr = umem_dbrec; 13147274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 13157274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_wq_sz = 13167274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_hairpin_num_packets * 13177274b417SDariusz Sosnowski hca_attr->hairpin_sq_wqe_bb_size; 13187274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 13197274b417SDariusz Sosnowski host_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_HOST_MEMORY; 13207274b417SDariusz Sosnowski tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &host_mem_attr); 13217274b417SDariusz Sosnowski if (!tmpl->sq && txq_ctrl->hairpin_conf.force_memory) { 13227274b417SDariusz Sosnowski DRV_LOG(ERR, 13237274b417SDariusz Sosnowski "Port %u tx hairpin queue %u can't create SQ object.", 13247274b417SDariusz Sosnowski dev->data->port_id, idx); 13257274b417SDariusz Sosnowski claim_zero(mlx5_os_umem_dereg(umem_obj)); 13267274b417SDariusz Sosnowski mlx5_free(umem_buf); 13277274b417SDariusz Sosnowski return -rte_errno; 13287274b417SDariusz Sosnowski } else if (!tmpl->sq && !txq_ctrl->hairpin_conf.force_memory) { 13297274b417SDariusz Sosnowski DRV_LOG(WARNING, 13307274b417SDariusz Sosnowski "Port %u tx hairpin queue %u failed to allocate SQ object" 13317274b417SDariusz Sosnowski " using host memory. Falling back to TX queue located" 13327274b417SDariusz Sosnowski " on the device", 13337274b417SDariusz Sosnowski dev->data->port_id, idx); 13347274b417SDariusz Sosnowski rte_errno = 0; 13357274b417SDariusz Sosnowski claim_zero(mlx5_os_umem_dereg(umem_obj)); 13367274b417SDariusz Sosnowski mlx5_free(umem_buf); 13377274b417SDariusz Sosnowski goto create_sq_on_device; 13387274b417SDariusz Sosnowski } 13397274b417SDariusz Sosnowski tmpl->umem_buf_wq_buffer = umem_buf; 13407274b417SDariusz Sosnowski tmpl->umem_obj_wq_buffer = umem_obj; 13417274b417SDariusz Sosnowski return 0; 13427274b417SDariusz Sosnowski } 13437274b417SDariusz Sosnowski 13447274b417SDariusz Sosnowski create_sq_on_device: 13457274b417SDariusz Sosnowski tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &dev_mem_attr); 134686d259ceSMichael Baum if (!tmpl->sq) { 134786d259ceSMichael Baum DRV_LOG(ERR, 134886d259ceSMichael Baum "Port %u tx hairpin queue %u can't create SQ object.", 134986d259ceSMichael Baum dev->data->port_id, idx); 135086d259ceSMichael Baum rte_errno = errno; 1351f49f4483SMichael Baum return -rte_errno; 135286d259ceSMichael Baum } 1353f49f4483SMichael Baum return 0; 135486d259ceSMichael Baum } 135586d259ceSMichael Baum 1356f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 135786d259ceSMichael Baum /** 135886d259ceSMichael Baum * Destroy the Tx queue DevX object. 135986d259ceSMichael Baum * 136086d259ceSMichael Baum * @param txq_obj 136186d259ceSMichael Baum * Txq object to destroy. 136286d259ceSMichael Baum */ 136386d259ceSMichael Baum static void 136488f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 136586d259ceSMichael Baum { 136674e91860SMichael Baum mlx5_devx_sq_destroy(&txq_obj->sq_obj); 136774e91860SMichael Baum memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj)); 13685f04f70cSMichael Baum mlx5_devx_cq_destroy(&txq_obj->cq_obj); 13695f04f70cSMichael Baum memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj)); 137086d259ceSMichael Baum } 137186d259ceSMichael Baum 137286d259ceSMichael Baum /** 137388f2e3f1SMichael Baum * Create a SQ object and its resources using DevX. 137486d259ceSMichael Baum * 137586d259ceSMichael Baum * @param dev 137686d259ceSMichael Baum * Pointer to Ethernet device. 137786d259ceSMichael Baum * @param idx 137886d259ceSMichael Baum * Queue index in DPDK Tx queue array. 137974e91860SMichael Baum * @param[in] log_desc_n 138074e91860SMichael Baum * Log of number of descriptors in queue. 138186d259ceSMichael Baum * 138286d259ceSMichael Baum * @return 138374e91860SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 138486d259ceSMichael Baum */ 138574e91860SMichael Baum static int 138674e91860SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, 138774e91860SMichael Baum uint16_t log_desc_n) 138886d259ceSMichael Baum { 138986d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 1390fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 13915dfa003dSMichael Baum struct mlx5_uar *uar = &priv->sh->tx_uar; 139286d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 139388f2e3f1SMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 139488f2e3f1SMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 139588f2e3f1SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 139674e91860SMichael Baum struct mlx5_devx_create_sq_attr sq_attr = { 139774e91860SMichael Baum .flush_in_error_en = 1, 139874e91860SMichael Baum .allow_multi_pkt_send_wqe = !!priv->config.mps, 139953820561SMichael Baum .min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode, 140087af0d1eSMichael Baum .allow_swp = !!priv->sh->dev_cap.swp, 140174e91860SMichael Baum .cqn = txq_obj->cq_obj.cq->id, 140274e91860SMichael Baum .tis_lst_sz = 1, 140374e91860SMichael Baum .wq_attr = (struct mlx5_devx_wq_attr){ 1404fe46b20cSMichael Baum .pd = cdev->pdn, 14055dfa003dSMichael Baum .uar_page = mlx5_os_get_devx_uar_page_id(uar->obj), 140674e91860SMichael Baum }, 1407fe46b20cSMichael Baum .ts_format = 1408fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format), 1409a89f6433SRongwei Liu .tis_num = mlx5_get_txq_tis_num(dev, idx), 141074e91860SMichael Baum }; 1411a89f6433SRongwei Liu 141286d259ceSMichael Baum /* Create Send Queue object with DevX. */ 1413fe46b20cSMichael Baum return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj, 1414ca1418ceSMichael Baum log_desc_n, &sq_attr, priv->sh->numa_node); 141586d259ceSMichael Baum } 141686d259ceSMichael Baum #endif 141786d259ceSMichael Baum 141886d259ceSMichael Baum /** 141986d259ceSMichael Baum * Create the Tx queue DevX object. 142086d259ceSMichael Baum * 142186d259ceSMichael Baum * @param dev 142286d259ceSMichael Baum * Pointer to Ethernet device. 142386d259ceSMichael Baum * @param idx 142486d259ceSMichael Baum * Queue index in DPDK Tx queue array. 142586d259ceSMichael Baum * 142686d259ceSMichael Baum * @return 1427f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 142886d259ceSMichael Baum */ 1429f49f4483SMichael Baum int 143086d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 143186d259ceSMichael Baum { 143286d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 143386d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 143486d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 143586d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 143686d259ceSMichael Baum 1437c06f77aeSMichael Baum if (txq_ctrl->is_hairpin) 143886d259ceSMichael Baum return mlx5_txq_obj_hairpin_new(dev, idx); 1439f1ae0b35SOphir Munk #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H) 144086d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 144186d259ceSMichael Baum dev->data->port_id, idx); 144286d259ceSMichael Baum rte_errno = ENOMEM; 1443f49f4483SMichael Baum return -rte_errno; 144486d259ceSMichael Baum #else 14455dfa003dSMichael Baum struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); 144686d259ceSMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 1447f49f4483SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 14485f04f70cSMichael Baum struct mlx5_devx_cq_attr cq_attr = { 14495dfa003dSMichael Baum .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), 14505f04f70cSMichael Baum }; 14515f04f70cSMichael Baum uint32_t cqe_n, log_desc_n; 145200984de5SViacheslav Ovsiienko uint32_t wqe_n, wqe_size; 145386d259ceSMichael Baum int ret = 0; 145486d259ceSMichael Baum 145586d259ceSMichael Baum MLX5_ASSERT(txq_data); 1456f49f4483SMichael Baum MLX5_ASSERT(txq_obj); 14575dfa003dSMichael Baum MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 14585dfa003dSMichael Baum MLX5_ASSERT(ppriv); 145986d259ceSMichael Baum txq_obj->txq_ctrl = txq_ctrl; 146086d259ceSMichael Baum txq_obj->dev = dev; 14615f04f70cSMichael Baum cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 14625f04f70cSMichael Baum 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 14635f04f70cSMichael Baum log_desc_n = log2above(cqe_n); 14645f04f70cSMichael Baum cqe_n = 1UL << log_desc_n; 14655f04f70cSMichael Baum if (cqe_n > UINT16_MAX) { 14665f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.", 14675f04f70cSMichael Baum dev->data->port_id, txq_data->idx, cqe_n); 14685f04f70cSMichael Baum rte_errno = EINVAL; 14695f04f70cSMichael Baum return 0; 14705f04f70cSMichael Baum } 14715f04f70cSMichael Baum /* Create completion queue object with DevX. */ 1472ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n, 14735f04f70cSMichael Baum &cq_attr, priv->sh->numa_node); 14745f04f70cSMichael Baum if (ret) { 14755f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 14765f04f70cSMichael Baum dev->data->port_id, idx); 147786d259ceSMichael Baum goto error; 147886d259ceSMichael Baum } 14795f04f70cSMichael Baum txq_data->cqe_n = log_desc_n; 14805f04f70cSMichael Baum txq_data->cqe_s = cqe_n; 148186d259ceSMichael Baum txq_data->cqe_m = txq_data->cqe_s - 1; 14825f04f70cSMichael Baum txq_data->cqes = txq_obj->cq_obj.cqes; 148386d259ceSMichael Baum txq_data->cq_ci = 0; 148486d259ceSMichael Baum txq_data->cq_pi = 0; 14855f04f70cSMichael Baum txq_data->cq_db = txq_obj->cq_obj.db_rec; 148686d259ceSMichael Baum *txq_data->cq_db = 0; 148700984de5SViacheslav Ovsiienko /* 148800984de5SViacheslav Ovsiienko * Adjust the amount of WQEs depending on inline settings. 148900984de5SViacheslav Ovsiienko * The number of descriptors should be enough to handle 149000984de5SViacheslav Ovsiienko * the specified number of packets. If queue is being created 149100984de5SViacheslav Ovsiienko * with Verbs the rdma-core does queue size adjustment 149200984de5SViacheslav Ovsiienko * internally in the mlx5_calc_sq_size(), we do the same 149300984de5SViacheslav Ovsiienko * for the queue being created with DevX at this point. 149400984de5SViacheslav Ovsiienko */ 149500984de5SViacheslav Ovsiienko wqe_size = txq_data->tso_en ? 149600984de5SViacheslav Ovsiienko RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0; 149700984de5SViacheslav Ovsiienko wqe_size += sizeof(struct mlx5_wqe_cseg) + 149800984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 149900984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_dseg); 150000984de5SViacheslav Ovsiienko if (txq_data->inlen_send) 150100984de5SViacheslav Ovsiienko wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) + 150200984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 150300984de5SViacheslav Ovsiienko RTE_ALIGN(txq_data->inlen_send + 150400984de5SViacheslav Ovsiienko sizeof(uint32_t), 150500984de5SViacheslav Ovsiienko MLX5_WSEG_SIZE)); 150600984de5SViacheslav Ovsiienko wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 150786d259ceSMichael Baum /* Create Send Queue object with DevX. */ 150800984de5SViacheslav Ovsiienko wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size, 150991d1cfafSMichael Baum (uint32_t)priv->sh->dev_cap.max_qp_wr); 151074e91860SMichael Baum log_desc_n = log2above(wqe_n); 151174e91860SMichael Baum ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n); 151274e91860SMichael Baum if (ret) { 151374e91860SMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 151474e91860SMichael Baum dev->data->port_id, idx); 151586d259ceSMichael Baum rte_errno = errno; 151686d259ceSMichael Baum goto error; 151786d259ceSMichael Baum } 151886d259ceSMichael Baum /* Create the Work Queue. */ 151974e91860SMichael Baum txq_data->wqe_n = log_desc_n; 152086d259ceSMichael Baum txq_data->wqe_s = 1 << txq_data->wqe_n; 152186d259ceSMichael Baum txq_data->wqe_m = txq_data->wqe_s - 1; 152274e91860SMichael Baum txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes; 152386d259ceSMichael Baum txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 152486d259ceSMichael Baum txq_data->wqe_ci = 0; 152586d259ceSMichael Baum txq_data->wqe_pi = 0; 152686d259ceSMichael Baum txq_data->wqe_comp = 0; 152786d259ceSMichael Baum txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 152831625e62SViacheslav Ovsiienko txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; 152986d259ceSMichael Baum *txq_data->qp_db = 0; 153074e91860SMichael Baum txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; 1531a6b9d5a5SMichael Baum txq_data->db_heu = sh->cdev->config.dbnc == MLX5_SQ_DB_HEURISTIC; 15325dfa003dSMichael Baum txq_data->db_nc = sh->tx_uar.dbnc; 15332f5122dfSViacheslav Ovsiienko txq_data->wait_on_time = !!(!sh->config.tx_pp && 15342f5122dfSViacheslav Ovsiienko sh->cdev->config.hca_attr.wait_on_time); 153586d259ceSMichael Baum /* Change Send Queue state to Ready-to-Send. */ 1536686d05b6SXueming Li ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); 153786d259ceSMichael Baum if (ret) { 153886d259ceSMichael Baum rte_errno = errno; 153986d259ceSMichael Baum DRV_LOG(ERR, 1540a9c79306SMichael Baum "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.", 154186d259ceSMichael Baum dev->data->port_id, idx); 154286d259ceSMichael Baum goto error; 154386d259ceSMichael Baum } 154486d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 154586d259ceSMichael Baum /* 154686d259ceSMichael Baum * If using DevX need to query and store TIS transport domain value. 154786d259ceSMichael Baum * This is done once per port. 154886d259ceSMichael Baum * Will use this value on Rx, when creating matching TIR. 154986d259ceSMichael Baum */ 155086d259ceSMichael Baum if (!priv->sh->tdn) 155186d259ceSMichael Baum priv->sh->tdn = priv->sh->td->id; 155286d259ceSMichael Baum #endif 155386d259ceSMichael Baum txq_ctrl->uar_mmap_offset = 15545dfa003dSMichael Baum mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj); 15555dfa003dSMichael Baum ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db; 1556876b5d52SMatan Azrad dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 1557f49f4483SMichael Baum return 0; 155886d259ceSMichael Baum error: 155986d259ceSMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 156088f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 156186d259ceSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 1562f49f4483SMichael Baum return -rte_errno; 156386d259ceSMichael Baum #endif 156486d259ceSMichael Baum } 156586d259ceSMichael Baum 156686d259ceSMichael Baum /** 156786d259ceSMichael Baum * Release an Tx DevX queue object. 156886d259ceSMichael Baum * 156986d259ceSMichael Baum * @param txq_obj 157086d259ceSMichael Baum * DevX Tx queue object. 157186d259ceSMichael Baum */ 157286d259ceSMichael Baum void 157386d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 157486d259ceSMichael Baum { 157586d259ceSMichael Baum MLX5_ASSERT(txq_obj); 1576c06f77aeSMichael Baum if (txq_obj->txq_ctrl->is_hairpin) { 15777274b417SDariusz Sosnowski if (txq_obj->sq) { 15787274b417SDariusz Sosnowski claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq)); 15797274b417SDariusz Sosnowski txq_obj->sq = NULL; 15807274b417SDariusz Sosnowski } 158186d259ceSMichael Baum if (txq_obj->tis) 158286d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 15837274b417SDariusz Sosnowski if (txq_obj->umem_obj_wq_buffer) { 15847274b417SDariusz Sosnowski claim_zero(mlx5_os_umem_dereg(txq_obj->umem_obj_wq_buffer)); 15857274b417SDariusz Sosnowski txq_obj->umem_obj_wq_buffer = NULL; 15867274b417SDariusz Sosnowski } 15877274b417SDariusz Sosnowski if (txq_obj->umem_buf_wq_buffer) { 15887274b417SDariusz Sosnowski mlx5_free(txq_obj->umem_buf_wq_buffer); 15897274b417SDariusz Sosnowski txq_obj->umem_buf_wq_buffer = NULL; 15907274b417SDariusz Sosnowski } 1591f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 159286d259ceSMichael Baum } else { 159388f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 159486d259ceSMichael Baum #endif 159586d259ceSMichael Baum } 159686d259ceSMichael Baum } 159786d259ceSMichael Baum 15988bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = { 15998bb2410eSOphir Munk .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 16006deb19e1SMichael Baum .rxq_obj_new = mlx5_rxq_devx_obj_new, 160132287079SMichael Baum .rxq_event_get = mlx5_rx_devx_get_event, 1602c279f187SMichael Baum .rxq_obj_modify = mlx5_devx_modify_rq, 16036deb19e1SMichael Baum .rxq_obj_release = mlx5_rxq_devx_obj_release, 160425025da3SSpike Du .rxq_event_get_lwm = mlx5_rx_devx_get_event_lwm, 160525ae7f1aSMichael Baum .ind_table_new = mlx5_devx_ind_table_new, 1606fa7ad49eSAndrey Vesnovaty .ind_table_modify = mlx5_devx_ind_table_modify, 160725ae7f1aSMichael Baum .ind_table_destroy = mlx5_devx_ind_table_destroy, 160885552726SMichael Baum .hrxq_new = mlx5_devx_hrxq_new, 160985552726SMichael Baum .hrxq_destroy = mlx5_devx_tir_destroy, 1610b8cc58c1SAndrey Vesnovaty .hrxq_modify = mlx5_devx_hrxq_modify, 16110c762e81SMichael Baum .drop_action_create = mlx5_devx_drop_action_create, 16120c762e81SMichael Baum .drop_action_destroy = mlx5_devx_drop_action_destroy, 161386d259ceSMichael Baum .txq_obj_new = mlx5_txq_devx_obj_new, 1614686d05b6SXueming Li .txq_obj_modify = mlx5_txq_devx_modify, 161586d259ceSMichael Baum .txq_obj_release = mlx5_txq_devx_obj_release, 161623233fd6SBing Zhao .lb_dummy_queue_create = NULL, 161723233fd6SBing Zhao .lb_dummy_queue_release = NULL, 16188bb2410eSOphir Munk }; 1619