18bb2410eSOphir Munk /* SPDX-License-Identifier: BSD-3-Clause 28bb2410eSOphir Munk * Copyright 2020 Mellanox Technologies, Ltd 38bb2410eSOphir Munk */ 48bb2410eSOphir Munk 58bb2410eSOphir Munk #include <stddef.h> 68bb2410eSOphir Munk #include <errno.h> 7c279f187SMichael Baum #include <stdbool.h> 88bb2410eSOphir Munk #include <string.h> 98bb2410eSOphir Munk #include <stdint.h> 108bb2410eSOphir Munk #include <sys/queue.h> 118bb2410eSOphir Munk 128bb2410eSOphir Munk #include <rte_malloc.h> 138bb2410eSOphir Munk #include <rte_common.h> 148bb2410eSOphir Munk #include <rte_eal_paging.h> 158bb2410eSOphir Munk 168bb2410eSOphir Munk #include <mlx5_glue.h> 178bb2410eSOphir Munk #include <mlx5_devx_cmds.h> 185f04f70cSMichael Baum #include <mlx5_common_devx.h> 198bb2410eSOphir Munk #include <mlx5_malloc.h> 208bb2410eSOphir Munk 218bb2410eSOphir Munk #include "mlx5.h" 228bb2410eSOphir Munk #include "mlx5_common_os.h" 23377b69fbSMichael Baum #include "mlx5_tx.h" 24151cbe3aSMichael Baum #include "mlx5_rx.h" 258bb2410eSOphir Munk #include "mlx5_utils.h" 268bb2410eSOphir Munk #include "mlx5_devx.h" 2787e2db37SMichael Baum #include "mlx5_flow.h" 2888019723SOphir Munk #include "mlx5_flow_os.h" 29f6dee900SMichael Baum 30f6dee900SMichael Baum /** 311944fbc3SSuanming Mou * Validate given external queue's port is valid or not. 321944fbc3SSuanming Mou * 331944fbc3SSuanming Mou * @param[in] port_id 341944fbc3SSuanming Mou * The port identifier of the Ethernet device. 351944fbc3SSuanming Mou * 361944fbc3SSuanming Mou * @return 371944fbc3SSuanming Mou * 0 on success, non-0 otherwise 381944fbc3SSuanming Mou */ 391944fbc3SSuanming Mou int 401944fbc3SSuanming Mou mlx5_devx_extq_port_validate(uint16_t port_id) 411944fbc3SSuanming Mou { 421944fbc3SSuanming Mou struct rte_eth_dev *dev; 431944fbc3SSuanming Mou struct mlx5_priv *priv; 441944fbc3SSuanming Mou 451944fbc3SSuanming Mou if (rte_eth_dev_is_valid_port(port_id) < 0) { 461944fbc3SSuanming Mou DRV_LOG(ERR, "There is no Ethernet device for port %u.", 471944fbc3SSuanming Mou port_id); 481944fbc3SSuanming Mou rte_errno = ENODEV; 491944fbc3SSuanming Mou return -rte_errno; 501944fbc3SSuanming Mou } 511944fbc3SSuanming Mou dev = &rte_eth_devices[port_id]; 521944fbc3SSuanming Mou priv = dev->data->dev_private; 531944fbc3SSuanming Mou if (!mlx5_imported_pd_and_ctx(priv->sh->cdev)) { 541944fbc3SSuanming Mou DRV_LOG(ERR, "Port %u " 551944fbc3SSuanming Mou "external queue isn't supported on local PD and CTX.", 561944fbc3SSuanming Mou port_id); 571944fbc3SSuanming Mou rte_errno = ENOTSUP; 581944fbc3SSuanming Mou return -rte_errno; 591944fbc3SSuanming Mou } 601944fbc3SSuanming Mou if (!mlx5_devx_obj_ops_en(priv->sh)) { 611944fbc3SSuanming Mou DRV_LOG(ERR, 621944fbc3SSuanming Mou "Port %u external queue isn't supported by Verbs API.", 631944fbc3SSuanming Mou port_id); 641944fbc3SSuanming Mou rte_errno = ENOTSUP; 651944fbc3SSuanming Mou return -rte_errno; 661944fbc3SSuanming Mou } 671944fbc3SSuanming Mou return 0; 681944fbc3SSuanming Mou } 691944fbc3SSuanming Mou 701944fbc3SSuanming Mou /** 718bb2410eSOphir Munk * Modify RQ vlan stripping offload 728bb2410eSOphir Munk * 735ceb3a02SXueming Li * @param rxq 745ceb3a02SXueming Li * Rx queue. 755ceb3a02SXueming Li * @param on 765ceb3a02SXueming Li * Enable/disable VLAN stripping. 778bb2410eSOphir Munk * 78f6dee900SMichael Baum * @return 79f6dee900SMichael Baum * 0 on success, non-0 otherwise 808bb2410eSOphir Munk */ 818bb2410eSOphir Munk static int 825ceb3a02SXueming Li mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on) 838bb2410eSOphir Munk { 848bb2410eSOphir Munk struct mlx5_devx_modify_rq_attr rq_attr; 858bb2410eSOphir Munk 868bb2410eSOphir Munk memset(&rq_attr, 0, sizeof(rq_attr)); 878bb2410eSOphir Munk rq_attr.rq_state = MLX5_RQC_STATE_RDY; 888bb2410eSOphir Munk rq_attr.state = MLX5_RQC_STATE_RDY; 898bb2410eSOphir Munk rq_attr.vsd = (on ? 0 : 1); 908bb2410eSOphir Munk rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 915ceb3a02SXueming Li return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); 928bb2410eSOphir Munk } 938bb2410eSOphir Munk 946deb19e1SMichael Baum /** 95fa2c85ccSMichael Baum * Modify RQ using DevX API. 96fa2c85ccSMichael Baum * 975ceb3a02SXueming Li * @param rxq 985ceb3a02SXueming Li * DevX rx queue. 994c6d80f1SMichael Baum * @param type 1004c6d80f1SMichael Baum * Type of change queue state. 101fa2c85ccSMichael Baum * 102fa2c85ccSMichael Baum * @return 103fa2c85ccSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 104fa2c85ccSMichael Baum */ 1057158e46cSSpike Du int 1065ceb3a02SXueming Li mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type) 107fa2c85ccSMichael Baum { 108fa2c85ccSMichael Baum struct mlx5_devx_modify_rq_attr rq_attr; 109fa2c85ccSMichael Baum 110fa2c85ccSMichael Baum memset(&rq_attr, 0, sizeof(rq_attr)); 1114c6d80f1SMichael Baum switch (type) { 1124c6d80f1SMichael Baum case MLX5_RXQ_MOD_ERR2RST: 1134c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_ERR; 1144c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 1154c6d80f1SMichael Baum break; 1164c6d80f1SMichael Baum case MLX5_RXQ_MOD_RST2RDY: 117fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RST; 118fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RDY; 1197158e46cSSpike Du if (rxq->lwm) { 1207158e46cSSpike Du rq_attr.modify_bitmask |= 1217158e46cSSpike Du MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM; 1227158e46cSSpike Du rq_attr.lwm = rxq->lwm; 1237158e46cSSpike Du } 1244c6d80f1SMichael Baum break; 1254c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2ERR: 1264c6d80f1SMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 1274c6d80f1SMichael Baum rq_attr.state = MLX5_RQC_STATE_ERR; 1284c6d80f1SMichael Baum break; 1294c6d80f1SMichael Baum case MLX5_RXQ_MOD_RDY2RST: 130fa2c85ccSMichael Baum rq_attr.rq_state = MLX5_RQC_STATE_RDY; 131fa2c85ccSMichael Baum rq_attr.state = MLX5_RQC_STATE_RST; 1324c6d80f1SMichael Baum break; 1337158e46cSSpike Du case MLX5_RXQ_MOD_RDY2RDY: 1347158e46cSSpike Du rq_attr.rq_state = MLX5_RQC_STATE_RDY; 1357158e46cSSpike Du rq_attr.state = MLX5_RQC_STATE_RDY; 1367158e46cSSpike Du rq_attr.modify_bitmask |= MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM; 1377158e46cSSpike Du rq_attr.lwm = rxq->lwm; 1387158e46cSSpike Du break; 1394c6d80f1SMichael Baum default: 1404c6d80f1SMichael Baum break; 141fa2c85ccSMichael Baum } 142c06f77aeSMichael Baum if (rxq->ctrl->is_hairpin) 14309c25553SXueming Li return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr); 1445ceb3a02SXueming Li return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr); 145fa2c85ccSMichael Baum } 146fa2c85ccSMichael Baum 147fa2c85ccSMichael Baum /** 1485d9f3c3fSMichael Baum * Modify SQ using DevX API. 1495d9f3c3fSMichael Baum * 1505d9f3c3fSMichael Baum * @param txq_obj 1515d9f3c3fSMichael Baum * DevX Tx queue object. 1525d9f3c3fSMichael Baum * @param type 1535d9f3c3fSMichael Baum * Type of change queue state. 1545d9f3c3fSMichael Baum * @param dev_port 1555d9f3c3fSMichael Baum * Unnecessary. 1565d9f3c3fSMichael Baum * 1575d9f3c3fSMichael Baum * @return 1585d9f3c3fSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 1595d9f3c3fSMichael Baum */ 160686d05b6SXueming Li int 161686d05b6SXueming Li mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 1625d9f3c3fSMichael Baum uint8_t dev_port) 1635d9f3c3fSMichael Baum { 1645d9f3c3fSMichael Baum struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 1655d9f3c3fSMichael Baum int ret; 1665d9f3c3fSMichael Baum 1675d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RST2RDY) { 1685d9f3c3fSMichael Baum /* Change queue state to reset. */ 1695d9f3c3fSMichael Baum if (type == MLX5_TXQ_MOD_ERR2RDY) 1705d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_ERR; 1715d9f3c3fSMichael Baum else 1725d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RDY; 1735d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RST; 17474e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1755d9f3c3fSMichael Baum if (ret) { 1765d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET" 1775d9f3c3fSMichael Baum " %s", strerror(errno)); 1785d9f3c3fSMichael Baum rte_errno = errno; 1795d9f3c3fSMichael Baum return ret; 1805d9f3c3fSMichael Baum } 1815d9f3c3fSMichael Baum } 1825d9f3c3fSMichael Baum if (type != MLX5_TXQ_MOD_RDY2RST) { 1835d9f3c3fSMichael Baum /* Change queue state to ready. */ 1845d9f3c3fSMichael Baum msq_attr.sq_state = MLX5_SQC_STATE_RST; 1855d9f3c3fSMichael Baum msq_attr.state = MLX5_SQC_STATE_RDY; 18674e91860SMichael Baum ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr); 1875d9f3c3fSMichael Baum if (ret) { 1885d9f3c3fSMichael Baum DRV_LOG(ERR, "Cannot change the Tx SQ state to READY" 1895d9f3c3fSMichael Baum " %s", strerror(errno)); 1905d9f3c3fSMichael Baum rte_errno = errno; 1915d9f3c3fSMichael Baum return ret; 1925d9f3c3fSMichael Baum } 1935d9f3c3fSMichael Baum } 1945d9f3c3fSMichael Baum /* 1955d9f3c3fSMichael Baum * The dev_port variable is relevant only in Verbs API, and there is a 1965d9f3c3fSMichael Baum * pointer that points to this function and a parallel function in verbs 1975d9f3c3fSMichael Baum * intermittently, so they should have the same parameters. 1985d9f3c3fSMichael Baum */ 1995d9f3c3fSMichael Baum (void)dev_port; 2005d9f3c3fSMichael Baum return 0; 2015d9f3c3fSMichael Baum } 2025d9f3c3fSMichael Baum 2035d9f3c3fSMichael Baum /** 2046deb19e1SMichael Baum * Release an Rx DevX queue object. 2056deb19e1SMichael Baum * 2065ceb3a02SXueming Li * @param rxq 2075ceb3a02SXueming Li * DevX Rx queue. 2086deb19e1SMichael Baum */ 2096deb19e1SMichael Baum static void 2105ceb3a02SXueming Li mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq) 2116deb19e1SMichael Baum { 21209c25553SXueming Li struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj; 2135ceb3a02SXueming Li 21409c25553SXueming Li if (rxq_obj == NULL) 21509c25553SXueming Li return; 216c06f77aeSMichael Baum if (rxq_obj->rxq_ctrl->is_hairpin) { 21709c25553SXueming Li if (rxq_obj->rq == NULL) 21809c25553SXueming Li return; 2195ceb3a02SXueming Li mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST); 220fa2c85ccSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 2216deb19e1SMichael Baum } else { 22209c25553SXueming Li if (rxq->devx_rq.rq == NULL) 22309c25553SXueming Li return; 2245ceb3a02SXueming Li mlx5_devx_rq_destroy(&rxq->devx_rq); 22509c25553SXueming Li if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0) 22609c25553SXueming Li return; 2275ceb3a02SXueming Li mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 2285ceb3a02SXueming Li memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj)); 2295ceb3a02SXueming Li if (rxq_obj->devx_channel) { 23098174626STal Shnaiderman mlx5_os_devx_destroy_event_channel 2316deb19e1SMichael Baum (rxq_obj->devx_channel); 2325ceb3a02SXueming Li rxq_obj->devx_channel = NULL; 2335ceb3a02SXueming Li } 2346deb19e1SMichael Baum } 23509c25553SXueming Li rxq->ctrl->started = false; 2366deb19e1SMichael Baum } 2376deb19e1SMichael Baum 2386deb19e1SMichael Baum /** 23932287079SMichael Baum * Get event for an Rx DevX queue object. 24032287079SMichael Baum * 24132287079SMichael Baum * @param rxq_obj 24232287079SMichael Baum * DevX Rx queue object. 24332287079SMichael Baum * 24432287079SMichael Baum * @return 24532287079SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 24632287079SMichael Baum */ 24732287079SMichael Baum static int 24832287079SMichael Baum mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 24932287079SMichael Baum { 25032287079SMichael Baum #ifdef HAVE_IBV_DEVX_EVENT 25132287079SMichael Baum union { 25232287079SMichael Baum struct mlx5dv_devx_async_event_hdr event_resp; 25332287079SMichael Baum uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 25432287079SMichael Baum } out; 25532287079SMichael Baum int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 25632287079SMichael Baum &out.event_resp, 25732287079SMichael Baum sizeof(out.buf)); 25832287079SMichael Baum 25932287079SMichael Baum if (ret < 0) { 26032287079SMichael Baum rte_errno = errno; 26132287079SMichael Baum return -rte_errno; 26232287079SMichael Baum } 2635cd33796SMichael Baum if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) { 26432287079SMichael Baum rte_errno = EINVAL; 26532287079SMichael Baum return -rte_errno; 26632287079SMichael Baum } 26732287079SMichael Baum return 0; 26832287079SMichael Baum #else 26932287079SMichael Baum (void)rxq_obj; 27032287079SMichael Baum rte_errno = ENOTSUP; 27132287079SMichael Baum return -rte_errno; 27232287079SMichael Baum #endif /* HAVE_IBV_DEVX_EVENT */ 27332287079SMichael Baum } 27432287079SMichael Baum 27532287079SMichael Baum /** 27625025da3SSpike Du * Get LWM event for shared context, return the correct port/rxq for this event. 27725025da3SSpike Du * 27825025da3SSpike Du * @param priv 27925025da3SSpike Du * Mlx5_priv object. 28025025da3SSpike Du * @param rxq_idx [out] 28125025da3SSpike Du * Which rxq gets this event. 28225025da3SSpike Du * @param port_id [out] 28325025da3SSpike Du * Which port gets this event. 28425025da3SSpike Du * 28525025da3SSpike Du * @return 28625025da3SSpike Du * 0 on success, a negative errno value otherwise and rte_errno is set. 28725025da3SSpike Du */ 28825025da3SSpike Du static int 28925025da3SSpike Du mlx5_rx_devx_get_event_lwm(struct mlx5_priv *priv, int *rxq_idx, int *port_id) 29025025da3SSpike Du { 29125025da3SSpike Du #ifdef HAVE_IBV_DEVX_EVENT 29225025da3SSpike Du union { 29325025da3SSpike Du struct mlx5dv_devx_async_event_hdr event_resp; 29425025da3SSpike Du uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 29525025da3SSpike Du } out; 29625025da3SSpike Du int ret; 29725025da3SSpike Du 29825025da3SSpike Du memset(&out, 0, sizeof(out)); 29925025da3SSpike Du ret = mlx5_glue->devx_get_event(priv->sh->devx_channel_lwm, 30025025da3SSpike Du &out.event_resp, 30125025da3SSpike Du sizeof(out.buf)); 30225025da3SSpike Du if (ret < 0) { 30325025da3SSpike Du rte_errno = errno; 30425025da3SSpike Du DRV_LOG(WARNING, "%s err\n", __func__); 30525025da3SSpike Du return -rte_errno; 30625025da3SSpike Du } 30725025da3SSpike Du *port_id = (((uint32_t)out.event_resp.cookie) >> 30825025da3SSpike Du LWM_COOKIE_PORTID_OFFSET) & LWM_COOKIE_PORTID_MASK; 30925025da3SSpike Du *rxq_idx = (((uint32_t)out.event_resp.cookie) >> 31025025da3SSpike Du LWM_COOKIE_RXQID_OFFSET) & LWM_COOKIE_RXQID_MASK; 31125025da3SSpike Du return 0; 31225025da3SSpike Du #else 31325025da3SSpike Du (void)priv; 31425025da3SSpike Du (void)rxq_idx; 31525025da3SSpike Du (void)port_id; 31625025da3SSpike Du rte_errno = ENOTSUP; 31725025da3SSpike Du return -rte_errno; 31825025da3SSpike Du #endif /* HAVE_IBV_DEVX_EVENT */ 31925025da3SSpike Du } 32025025da3SSpike Du 32125025da3SSpike Du /** 3226deb19e1SMichael Baum * Create a RQ object using DevX. 3236deb19e1SMichael Baum * 3245ceb3a02SXueming Li * @param rxq 3255ceb3a02SXueming Li * Pointer to Rx queue. 3266deb19e1SMichael Baum * 3276deb19e1SMichael Baum * @return 3286e0a3637SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 3296deb19e1SMichael Baum */ 3306e0a3637SMichael Baum static int 3315ceb3a02SXueming Li mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq) 3326deb19e1SMichael Baum { 3335ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 334fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 3355ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 3365ceb3a02SXueming Li struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq; 3376deb19e1SMichael Baum struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 3386e0a3637SMichael Baum uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n; 3396e0a3637SMichael Baum uint32_t wqe_size, log_wqe_size; 3406deb19e1SMichael Baum 3416deb19e1SMichael Baum /* Fill RQ attributes. */ 3426deb19e1SMichael Baum rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 3436deb19e1SMichael Baum rq_attr.flush_in_error_en = 1; 3446e0a3637SMichael Baum rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1; 3456e0a3637SMichael Baum rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id; 3466e0a3637SMichael Baum rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 347fe46b20cSMichael Baum rq_attr.ts_format = 348fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format); 3496deb19e1SMichael Baum /* Fill WQ attributes for this RQ. */ 3506deb19e1SMichael Baum if (mlx5_rxq_mprq_enabled(rxq_data)) { 3516deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 3526deb19e1SMichael Baum /* 3536deb19e1SMichael Baum * Number of strides in each WQE: 3546deb19e1SMichael Baum * 512*2^single_wqe_log_num_of_strides. 3556deb19e1SMichael Baum */ 3566deb19e1SMichael Baum rq_attr.wq_attr.single_wqe_log_num_of_strides = 3570947ed38SMichael Baum rxq_data->log_strd_num - 3586deb19e1SMichael Baum MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 3596deb19e1SMichael Baum /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 3606deb19e1SMichael Baum rq_attr.wq_attr.single_stride_log_num_of_bytes = 3610947ed38SMichael Baum rxq_data->log_strd_sz - 3626deb19e1SMichael Baum MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 3636deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_mprq); 3646deb19e1SMichael Baum } else { 3656deb19e1SMichael Baum rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 3666deb19e1SMichael Baum wqe_size = sizeof(struct mlx5_wqe_data_seg); 3676deb19e1SMichael Baum } 3686deb19e1SMichael Baum log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 3696deb19e1SMichael Baum wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 3706e0a3637SMichael Baum rq_attr.wq_attr.log_wq_stride = log_wqe_size; 3716e0a3637SMichael Baum rq_attr.wq_attr.log_wq_sz = log_desc_n; 3726e0a3637SMichael Baum rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ? 3736e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_ALIGN : 3746e0a3637SMichael Baum MLX5_WQ_END_PAD_MODE_NONE; 375fe46b20cSMichael Baum rq_attr.wq_attr.pd = cdev->pdn; 376e6988afdSMatan Azrad rq_attr.counter_set_id = priv->counter_set_id; 377febcac7bSBing Zhao rq_attr.delay_drop_en = rxq_data->delay_drop; 37825ed2ebfSViacheslav Ovsiienko rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id); 37909c25553SXueming Li if (rxq_data->shared) /* Create RMP based RQ. */ 38009c25553SXueming Li rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp; 381f6dee900SMichael Baum /* Create RQ using DevX API. */ 3825ceb3a02SXueming Li return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size, 383fe46b20cSMichael Baum log_desc_n, &rq_attr, rxq_ctrl->socket); 3846deb19e1SMichael Baum } 3856deb19e1SMichael Baum 3866deb19e1SMichael Baum /** 3876deb19e1SMichael Baum * Create a DevX CQ object for an Rx queue. 3886deb19e1SMichael Baum * 3895ceb3a02SXueming Li * @param rxq 3905ceb3a02SXueming Li * Pointer to Rx queue. 3916deb19e1SMichael Baum * 3926deb19e1SMichael Baum * @return 3935cd33796SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 3946deb19e1SMichael Baum */ 3955cd33796SMichael Baum static int 3965ceb3a02SXueming Li mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq) 3976deb19e1SMichael Baum { 3985cd33796SMichael Baum struct mlx5_devx_cq *cq_obj = 0; 3996deb19e1SMichael Baum struct mlx5_devx_cq_attr cq_attr = { 0 }; 4005ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 4015cd33796SMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 4025ceb3a02SXueming Li uint16_t port_id = priv->dev_data->port_id; 4035ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 4045ceb3a02SXueming Li struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; 405f6dee900SMichael Baum unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 4066deb19e1SMichael Baum uint32_t log_cqe_n; 4075cd33796SMichael Baum uint16_t event_nums[1] = { 0 }; 4086deb19e1SMichael Baum int ret = 0; 4096deb19e1SMichael Baum 41009c25553SXueming Li if (rxq_ctrl->started) 41109c25553SXueming Li return 0; 4126deb19e1SMichael Baum if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 4136deb19e1SMichael Baum !rxq_data->lro) { 41438f9369dSDekel Peled cq_attr.cqe_comp_en = 1u; 41599532fb1SAlexander Kozyrev cq_attr.cqe_comp_layout = priv->config.enh_cqe_comp; 41699532fb1SAlexander Kozyrev rxq_data->cqe_comp_layout = cq_attr.cqe_comp_layout; 41754c2d46bSAlexander Kozyrev rxq_data->mcqe_format = priv->config.cqe_comp_fmt; 41854c2d46bSAlexander Kozyrev rxq_data->byte_mask = UINT32_MAX; 41954c2d46bSAlexander Kozyrev switch (priv->config.cqe_comp_fmt) { 42054c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_HASH: 42154c2d46bSAlexander Kozyrev /* fallthrough */ 42254c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM: 4230f20acbfSAlexander Kozyrev /* 42454c2d46bSAlexander Kozyrev * Select CSUM miniCQE format only for non-vectorized 42554c2d46bSAlexander Kozyrev * MPRQ Rx burst, use HASH miniCQE format for others. 4260f20acbfSAlexander Kozyrev */ 4270f20acbfSAlexander Kozyrev if (mlx5_rxq_check_vec_support(rxq_data) < 0 && 4280f20acbfSAlexander Kozyrev mlx5_rxq_mprq_enabled(rxq_data)) 4296deb19e1SMichael Baum cq_attr.mini_cqe_res_format = 4300f20acbfSAlexander Kozyrev MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 4310f20acbfSAlexander Kozyrev else 4320f20acbfSAlexander Kozyrev cq_attr.mini_cqe_res_format = 43338f9369dSDekel Peled MLX5_CQE_RESP_FORMAT_HASH; 43454c2d46bSAlexander Kozyrev rxq_data->mcqe_format = cq_attr.mini_cqe_res_format; 43554c2d46bSAlexander Kozyrev break; 43654c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX: 43754c2d46bSAlexander Kozyrev rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK; 43854c2d46bSAlexander Kozyrev /* fallthrough */ 43954c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX: 44054c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt; 44154c2d46bSAlexander Kozyrev break; 44254c2d46bSAlexander Kozyrev case MLX5_CQE_RESP_FORMAT_L34H_STRIDX: 44354c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format = 0; 44454c2d46bSAlexander Kozyrev cq_attr.mini_cqe_res_format_ext = 1; 44554c2d46bSAlexander Kozyrev break; 44654c2d46bSAlexander Kozyrev } 44754c2d46bSAlexander Kozyrev DRV_LOG(DEBUG, 44854c2d46bSAlexander Kozyrev "Port %u Rx CQE compression is enabled, format %d.", 4495ceb3a02SXueming Li port_id, priv->config.cqe_comp_fmt); 4506deb19e1SMichael Baum /* 4516deb19e1SMichael Baum * For vectorized Rx, it must not be doubled in order to 4526deb19e1SMichael Baum * make cq_ci and rq_ci aligned. 4536deb19e1SMichael Baum */ 4546deb19e1SMichael Baum if (mlx5_rxq_check_vec_support(rxq_data) < 0) 4556deb19e1SMichael Baum cqe_n *= 2; 4566deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 4576deb19e1SMichael Baum DRV_LOG(DEBUG, 4585ceb3a02SXueming Li "Port %u Rx CQE compression is disabled for HW timestamp.", 4595ceb3a02SXueming Li port_id); 4606deb19e1SMichael Baum } else if (priv->config.cqe_comp && rxq_data->lro) { 4616deb19e1SMichael Baum DRV_LOG(DEBUG, 4626deb19e1SMichael Baum "Port %u Rx CQE compression is disabled for LRO.", 4635ceb3a02SXueming Li port_id); 4646deb19e1SMichael Baum } 4655dfa003dSMichael Baum cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj); 4666deb19e1SMichael Baum log_cqe_n = log2above(cqe_n); 467f6dee900SMichael Baum /* Create CQ using DevX API. */ 468ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj, 469ca1418ceSMichael Baum log_cqe_n, &cq_attr, sh->numa_node); 4705cd33796SMichael Baum if (ret) 4715cd33796SMichael Baum return ret; 4725cd33796SMichael Baum cq_obj = &rxq_ctrl->obj->cq_obj; 4735cd33796SMichael Baum rxq_data->cqes = (volatile struct mlx5_cqe (*)[]) 4745cd33796SMichael Baum (uintptr_t)cq_obj->cqes; 4755cd33796SMichael Baum rxq_data->cq_db = cq_obj->db_rec; 4765dfa003dSMichael Baum rxq_data->uar_data = sh->rx_uar.cq_db; 4776deb19e1SMichael Baum rxq_data->cqe_n = log_cqe_n; 4785cd33796SMichael Baum rxq_data->cqn = cq_obj->cq->id; 47909c25553SXueming Li rxq_data->cq_ci = 0; 480f6dee900SMichael Baum if (rxq_ctrl->obj->devx_channel) { 48198174626STal Shnaiderman ret = mlx5_os_devx_subscribe_devx_event 482f6dee900SMichael Baum (rxq_ctrl->obj->devx_channel, 4835cd33796SMichael Baum cq_obj->cq->obj, 4846deb19e1SMichael Baum sizeof(event_nums), 4856deb19e1SMichael Baum event_nums, 4865cd33796SMichael Baum (uint64_t)(uintptr_t)cq_obj->cq); 4876deb19e1SMichael Baum if (ret) { 4886deb19e1SMichael Baum DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 4895cd33796SMichael Baum ret = errno; 4905cd33796SMichael Baum mlx5_devx_cq_destroy(cq_obj); 4915cd33796SMichael Baum memset(cq_obj, 0, sizeof(*cq_obj)); 4925cd33796SMichael Baum rte_errno = ret; 4935cd33796SMichael Baum return -ret; 4946deb19e1SMichael Baum } 4956deb19e1SMichael Baum } 4965cd33796SMichael Baum return 0; 4976deb19e1SMichael Baum } 4986deb19e1SMichael Baum 4996deb19e1SMichael Baum /** 500cd00dce6SShani Peretz * Create a global queue counter for all the port hairpin queues. 501cd00dce6SShani Peretz * 502cd00dce6SShani Peretz * @param priv 503cd00dce6SShani Peretz * Device private data. 504cd00dce6SShani Peretz * 505cd00dce6SShani Peretz * @return 506cd00dce6SShani Peretz * The counter_set_id of the queue counter object, 0 otherwise. 507cd00dce6SShani Peretz */ 508cd00dce6SShani Peretz static uint32_t 509cd00dce6SShani Peretz mlx5_set_hairpin_queue_counter_obj(struct mlx5_priv *priv) 510cd00dce6SShani Peretz { 511cd00dce6SShani Peretz if (priv->q_counters_hairpin != NULL) 512cd00dce6SShani Peretz return priv->q_counters_hairpin->id; 513cd00dce6SShani Peretz 514cd00dce6SShani Peretz /* Queue counter allocation failed in the past - don't try again. */ 515cd00dce6SShani Peretz if (priv->q_counters_allocation_failure != 0) 516cd00dce6SShani Peretz return 0; 517cd00dce6SShani Peretz 518cd00dce6SShani Peretz if (priv->pci_dev == NULL) { 519cd00dce6SShani Peretz DRV_LOG(DEBUG, "Hairpin out of buffer counter is " 520cd00dce6SShani Peretz "only supported on PCI device."); 521cd00dce6SShani Peretz priv->q_counters_allocation_failure = 1; 522cd00dce6SShani Peretz return 0; 523cd00dce6SShani Peretz } 524cd00dce6SShani Peretz 525cd00dce6SShani Peretz switch (priv->pci_dev->id.device_id) { 526cd00dce6SShani Peretz /* Counting out of buffer drops on hairpin queues is supported only on CX7 and up. */ 527cd00dce6SShani Peretz case PCI_DEVICE_ID_MELLANOX_CONNECTX7: 528cd00dce6SShani Peretz case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: 529cd00dce6SShani Peretz case PCI_DEVICE_ID_MELLANOX_BLUEFIELD3: 530cd00dce6SShani Peretz case PCI_DEVICE_ID_MELLANOX_BLUEFIELDVF: 531cd00dce6SShani Peretz 532cd00dce6SShani Peretz priv->q_counters_hairpin = mlx5_devx_cmd_queue_counter_alloc(priv->sh->cdev->ctx); 533cd00dce6SShani Peretz if (priv->q_counters_hairpin == NULL) { 534cd00dce6SShani Peretz /* Failed to allocate */ 535cd00dce6SShani Peretz DRV_LOG(DEBUG, "Some of the statistics of port %d " 536cd00dce6SShani Peretz "will not be available.", priv->dev_data->port_id); 537cd00dce6SShani Peretz priv->q_counters_allocation_failure = 1; 538cd00dce6SShani Peretz return 0; 539cd00dce6SShani Peretz } 540cd00dce6SShani Peretz return priv->q_counters_hairpin->id; 541cd00dce6SShani Peretz default: 542cd00dce6SShani Peretz DRV_LOG(DEBUG, "Hairpin out of buffer counter " 543cd00dce6SShani Peretz "is not available on this NIC."); 544cd00dce6SShani Peretz priv->q_counters_allocation_failure = 1; 545cd00dce6SShani Peretz return 0; 546cd00dce6SShani Peretz } 547cd00dce6SShani Peretz } 548cd00dce6SShani Peretz 549cd00dce6SShani Peretz /** 5506deb19e1SMichael Baum * Create the Rx hairpin queue object. 5516deb19e1SMichael Baum * 5525ceb3a02SXueming Li * @param rxq 5535ceb3a02SXueming Li * Pointer to Rx queue. 5546deb19e1SMichael Baum * 5556deb19e1SMichael Baum * @return 5561260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 5576deb19e1SMichael Baum */ 5581260a87bSMichael Baum static int 5595ceb3a02SXueming Li mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq) 5606deb19e1SMichael Baum { 5615ceb3a02SXueming Li uint16_t idx = rxq->idx; 5625ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 563f2d43ff5SDariusz Sosnowski struct mlx5_hca_attr *hca_attr __rte_unused = &priv->sh->cdev->config.hca_attr; 5645ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 565f2d43ff5SDariusz Sosnowski struct mlx5_devx_create_rq_attr unlocked_attr = { 0 }; 566f2d43ff5SDariusz Sosnowski struct mlx5_devx_create_rq_attr locked_attr = { 0 }; 5671260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 5686deb19e1SMichael Baum uint32_t max_wq_data; 5696deb19e1SMichael Baum 5705ceb3a02SXueming Li MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL); 5716deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 572f2d43ff5SDariusz Sosnowski unlocked_attr.hairpin = 1; 57353820561SMichael Baum max_wq_data = 57453820561SMichael Baum priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz; 5756deb19e1SMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 5766deb19e1SMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 5776deb19e1SMichael Baum if (priv->config.log_hp_size > max_wq_data) { 5786deb19e1SMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 5796deb19e1SMichael Baum "too large for hairpin.", 5806deb19e1SMichael Baum priv->config.log_hp_size); 5816deb19e1SMichael Baum rte_errno = ERANGE; 5821260a87bSMichael Baum return -rte_errno; 5836deb19e1SMichael Baum } 584f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 5856deb19e1SMichael Baum } else { 586f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_data_sz = 5876deb19e1SMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 5886deb19e1SMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 5896deb19e1SMichael Baum } 5906deb19e1SMichael Baum /* Set the packets number to the maximum value for performance. */ 591f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_num_packets = 592f2d43ff5SDariusz Sosnowski unlocked_attr.wq_attr.log_hairpin_data_sz - 5936deb19e1SMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 594cd00dce6SShani Peretz 595cd00dce6SShani Peretz unlocked_attr.counter_set_id = mlx5_set_hairpin_queue_counter_obj(priv); 596cd00dce6SShani Peretz 597febcac7bSBing Zhao rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop; 598f2d43ff5SDariusz Sosnowski unlocked_attr.delay_drop_en = priv->config.hp_delay_drop; 599f2d43ff5SDariusz Sosnowski unlocked_attr.hairpin_data_buffer_type = 600f2d43ff5SDariusz Sosnowski MLX5_RQC_HAIRPIN_DATA_BUFFER_TYPE_UNLOCKED_INTERNAL_BUFFER; 601f2d43ff5SDariusz Sosnowski if (rxq->hairpin_conf.use_locked_device_memory) { 602f2d43ff5SDariusz Sosnowski /* 603f2d43ff5SDariusz Sosnowski * It is assumed that configuration is verified against capabilities 604f2d43ff5SDariusz Sosnowski * during queue setup. 605f2d43ff5SDariusz Sosnowski */ 606f2d43ff5SDariusz Sosnowski MLX5_ASSERT(hca_attr->hairpin_data_buffer_locked); 607f2d43ff5SDariusz Sosnowski rte_memcpy(&locked_attr, &unlocked_attr, sizeof(locked_attr)); 608f2d43ff5SDariusz Sosnowski locked_attr.hairpin_data_buffer_type = 609f2d43ff5SDariusz Sosnowski MLX5_RQC_HAIRPIN_DATA_BUFFER_TYPE_LOCKED_INTERNAL_BUFFER; 610f2d43ff5SDariusz Sosnowski tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &locked_attr, 611f2d43ff5SDariusz Sosnowski rxq_ctrl->socket); 612f2d43ff5SDariusz Sosnowski if (!tmpl->rq && rxq->hairpin_conf.force_memory) { 613f2d43ff5SDariusz Sosnowski DRV_LOG(ERR, "Port %u Rx hairpin queue %u can't create RQ object" 614f2d43ff5SDariusz Sosnowski " with locked memory buffer", 615f2d43ff5SDariusz Sosnowski priv->dev_data->port_id, idx); 616f2d43ff5SDariusz Sosnowski return -rte_errno; 617f2d43ff5SDariusz Sosnowski } else if (!tmpl->rq && !rxq->hairpin_conf.force_memory) { 618f2d43ff5SDariusz Sosnowski DRV_LOG(WARNING, "Port %u Rx hairpin queue %u can't create RQ object" 619f2d43ff5SDariusz Sosnowski " with locked memory buffer. Falling back to unlocked" 620f2d43ff5SDariusz Sosnowski " device memory.", 621f2d43ff5SDariusz Sosnowski priv->dev_data->port_id, idx); 622f2d43ff5SDariusz Sosnowski rte_errno = 0; 623f2d43ff5SDariusz Sosnowski goto create_rq_unlocked; 624f2d43ff5SDariusz Sosnowski } 625f2d43ff5SDariusz Sosnowski goto create_rq_set_state; 626f2d43ff5SDariusz Sosnowski } 627f2d43ff5SDariusz Sosnowski 628f2d43ff5SDariusz Sosnowski create_rq_unlocked: 629f2d43ff5SDariusz Sosnowski tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &unlocked_attr, 6306deb19e1SMichael Baum rxq_ctrl->socket); 6316deb19e1SMichael Baum if (!tmpl->rq) { 6326deb19e1SMichael Baum DRV_LOG(ERR, 6336deb19e1SMichael Baum "Port %u Rx hairpin queue %u can't create rq object.", 6345ceb3a02SXueming Li priv->dev_data->port_id, idx); 6356deb19e1SMichael Baum rte_errno = errno; 6361260a87bSMichael Baum return -rte_errno; 6376deb19e1SMichael Baum } 638f2d43ff5SDariusz Sosnowski create_rq_set_state: 6395ceb3a02SXueming Li priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 6401260a87bSMichael Baum return 0; 6416deb19e1SMichael Baum } 6426deb19e1SMichael Baum 6436deb19e1SMichael Baum /** 6446deb19e1SMichael Baum * Create the Rx queue DevX object. 6456deb19e1SMichael Baum * 6465ceb3a02SXueming Li * @param rxq 6475ceb3a02SXueming Li * Pointer to Rx queue. 6486deb19e1SMichael Baum * 6496deb19e1SMichael Baum * @return 6501260a87bSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 6516deb19e1SMichael Baum */ 6521260a87bSMichael Baum static int 6535ceb3a02SXueming Li mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) 6546deb19e1SMichael Baum { 6555ceb3a02SXueming Li struct mlx5_priv *priv = rxq->priv; 6565ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 6575ceb3a02SXueming Li struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq; 6581260a87bSMichael Baum struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 6596deb19e1SMichael Baum int ret = 0; 6606deb19e1SMichael Baum 6616deb19e1SMichael Baum MLX5_ASSERT(rxq_data); 6621260a87bSMichael Baum MLX5_ASSERT(tmpl); 663c06f77aeSMichael Baum if (rxq_ctrl->is_hairpin) 6645ceb3a02SXueming Li return mlx5_rxq_obj_hairpin_new(rxq); 6656deb19e1SMichael Baum tmpl->rxq_ctrl = rxq_ctrl; 66609c25553SXueming Li if (rxq_ctrl->irq && !rxq_ctrl->started) { 6676deb19e1SMichael Baum int devx_ev_flag = 6686deb19e1SMichael Baum MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 6696deb19e1SMichael Baum 67098174626STal Shnaiderman tmpl->devx_channel = mlx5_os_devx_create_event_channel 671ca1418ceSMichael Baum (priv->sh->cdev->ctx, 6726deb19e1SMichael Baum devx_ev_flag); 6736deb19e1SMichael Baum if (!tmpl->devx_channel) { 6746deb19e1SMichael Baum rte_errno = errno; 6756deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create event channel %d.", 6766deb19e1SMichael Baum rte_errno); 6776deb19e1SMichael Baum goto error; 6786deb19e1SMichael Baum } 6796deb19e1SMichael Baum tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 6806deb19e1SMichael Baum } 6816deb19e1SMichael Baum /* Create CQ using DevX API. */ 6825ceb3a02SXueming Li ret = mlx5_rxq_create_devx_cq_resources(rxq); 6835cd33796SMichael Baum if (ret) { 6846deb19e1SMichael Baum DRV_LOG(ERR, "Failed to create CQ."); 6856deb19e1SMichael Baum goto error; 6866deb19e1SMichael Baum } 687*2d876343SJiawei Wang if (!rxq_data->shared || !rxq_ctrl->started) 688febcac7bSBing Zhao rxq_data->delay_drop = priv->config.std_delay_drop; 6896deb19e1SMichael Baum /* Create RQ using DevX API. */ 6905ceb3a02SXueming Li ret = mlx5_rxq_create_devx_rq_resources(rxq); 6916e0a3637SMichael Baum if (ret) { 6926deb19e1SMichael Baum DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 6935ceb3a02SXueming Li priv->dev_data->port_id, rxq->idx); 6946deb19e1SMichael Baum rte_errno = ENOMEM; 6956deb19e1SMichael Baum goto error; 6966deb19e1SMichael Baum } 6976deb19e1SMichael Baum /* Change queue state to ready. */ 6985ceb3a02SXueming Li ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); 6996deb19e1SMichael Baum if (ret) 7006deb19e1SMichael Baum goto error; 70109c25553SXueming Li if (!rxq_data->shared) { 7025ceb3a02SXueming Li rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf; 7035ceb3a02SXueming Li rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec; 70409c25553SXueming Li } else if (!rxq_ctrl->started) { 70509c25553SXueming Li rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf; 70609c25553SXueming Li rxq_data->rq_db = 70709c25553SXueming Li (uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec; 70809c25553SXueming Li } 70909c25553SXueming Li if (!rxq_ctrl->started) { 7106e0a3637SMichael Baum mlx5_rxq_initialize(rxq_data); 7115ceb3a02SXueming Li rxq_ctrl->wqn = rxq->devx_rq.rq->id; 71209c25553SXueming Li } 71309c25553SXueming Li priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED; 7141260a87bSMichael Baum return 0; 7156deb19e1SMichael Baum error: 7166deb19e1SMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 7175ceb3a02SXueming Li mlx5_rxq_devx_obj_release(rxq); 7181260a87bSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 7191260a87bSMichael Baum return -rte_errno; 7206deb19e1SMichael Baum } 7216deb19e1SMichael Baum 72287e2db37SMichael Baum /** 723fa7ad49eSAndrey Vesnovaty * Prepare RQT attribute structure for DevX RQT API. 724fa7ad49eSAndrey Vesnovaty * 725fa7ad49eSAndrey Vesnovaty * @param dev 726fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 727fa7ad49eSAndrey Vesnovaty * @param log_n 728fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 729bc5bee02SDmitry Kozlyuk * @param queues 730bc5bee02SDmitry Kozlyuk * List of RX queue indices or NULL, in which case 731bc5bee02SDmitry Kozlyuk * the attribute will be filled by drop queue ID. 732bc5bee02SDmitry Kozlyuk * @param queues_n 733bc5bee02SDmitry Kozlyuk * Size of @p queues array or 0 if it is NULL. 734fa7ad49eSAndrey Vesnovaty * @param ind_tbl 735fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 736fa7ad49eSAndrey Vesnovaty * 737fa7ad49eSAndrey Vesnovaty * @return 738fa7ad49eSAndrey Vesnovaty * The RQT attr object initialized, NULL otherwise and rte_errno is set. 739fa7ad49eSAndrey Vesnovaty */ 740fa7ad49eSAndrey Vesnovaty static struct mlx5_devx_rqt_attr * 741fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev, 742fa7ad49eSAndrey Vesnovaty const unsigned int log_n, 743fa7ad49eSAndrey Vesnovaty const uint16_t *queues, 744fa7ad49eSAndrey Vesnovaty const uint32_t queues_n) 745fa7ad49eSAndrey Vesnovaty { 746fa7ad49eSAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 747fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 748fa7ad49eSAndrey Vesnovaty const unsigned int rqt_n = 1 << log_n; 749fa7ad49eSAndrey Vesnovaty unsigned int i, j; 750fa7ad49eSAndrey Vesnovaty 751fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 752fa7ad49eSAndrey Vesnovaty rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 753fa7ad49eSAndrey Vesnovaty if (!rqt_attr) { 754fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 755fa7ad49eSAndrey Vesnovaty dev->data->port_id); 756fa7ad49eSAndrey Vesnovaty rte_errno = ENOMEM; 757fa7ad49eSAndrey Vesnovaty return NULL; 758fa7ad49eSAndrey Vesnovaty } 75987af0d1eSMichael Baum rqt_attr->rqt_max_size = priv->sh->dev_cap.ind_table_max_size; 760fa7ad49eSAndrey Vesnovaty rqt_attr->rqt_actual_size = rqt_n; 761bc5bee02SDmitry Kozlyuk if (queues == NULL) { 762bc5bee02SDmitry Kozlyuk for (i = 0; i < rqt_n; i++) 7635ceb3a02SXueming Li rqt_attr->rq_list[i] = 7645ceb3a02SXueming Li priv->drop_queue.rxq->devx_rq.rq->id; 765bc5bee02SDmitry Kozlyuk return rqt_attr; 766bc5bee02SDmitry Kozlyuk } 767fa7ad49eSAndrey Vesnovaty for (i = 0; i != queues_n; ++i) { 768311b17e6SMichael Baum if (mlx5_is_external_rxq(dev, queues[i])) { 7698e8b44f2SSuanming Mou struct mlx5_external_q *ext_rxq = 770311b17e6SMichael Baum mlx5_ext_rxq_get(dev, queues[i]); 771311b17e6SMichael Baum 772311b17e6SMichael Baum rqt_attr->rq_list[i] = ext_rxq->hw_id; 773311b17e6SMichael Baum } else { 774311b17e6SMichael Baum struct mlx5_rxq_priv *rxq = 775311b17e6SMichael Baum mlx5_rxq_get(dev, queues[i]); 776fa7ad49eSAndrey Vesnovaty 7775ceb3a02SXueming Li MLX5_ASSERT(rxq != NULL); 778c06f77aeSMichael Baum if (rxq->ctrl->is_hairpin) 77909c25553SXueming Li rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id; 78009c25553SXueming Li else 7815ceb3a02SXueming Li rqt_attr->rq_list[i] = rxq->devx_rq.rq->id; 782fa7ad49eSAndrey Vesnovaty } 783311b17e6SMichael Baum } 784fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(i > 0); 785fa7ad49eSAndrey Vesnovaty for (j = 0; i != rqt_n; ++j, ++i) 786fa7ad49eSAndrey Vesnovaty rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 787fa7ad49eSAndrey Vesnovaty return rqt_attr; 788fa7ad49eSAndrey Vesnovaty } 789fa7ad49eSAndrey Vesnovaty 790fa7ad49eSAndrey Vesnovaty /** 79125ae7f1aSMichael Baum * Create RQT using DevX API as a filed of indirection table. 79287e2db37SMichael Baum * 79387e2db37SMichael Baum * @param dev 79487e2db37SMichael Baum * Pointer to Ethernet device. 79525ae7f1aSMichael Baum * @param log_n 79625ae7f1aSMichael Baum * Log of number of queues in the array. 79725ae7f1aSMichael Baum * @param ind_tbl 79825ae7f1aSMichael Baum * DevX indirection table object. 79987e2db37SMichael Baum * 80087e2db37SMichael Baum * @return 80125ae7f1aSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 80287e2db37SMichael Baum */ 80325ae7f1aSMichael Baum static int 80425ae7f1aSMichael Baum mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 80525ae7f1aSMichael Baum struct mlx5_ind_table_obj *ind_tbl) 80687e2db37SMichael Baum { 80787e2db37SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 80887e2db37SMichael Baum struct mlx5_devx_rqt_attr *rqt_attr = NULL; 809bc5bee02SDmitry Kozlyuk const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues : 810bc5bee02SDmitry Kozlyuk NULL; 81187e2db37SMichael Baum 81225ae7f1aSMichael Baum MLX5_ASSERT(ind_tbl); 813bc5bee02SDmitry Kozlyuk rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues, 814fa7ad49eSAndrey Vesnovaty ind_tbl->queues_n); 815fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 81625ae7f1aSMichael Baum return -rte_errno; 817ca1418ceSMichael Baum ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr); 81887e2db37SMichael Baum mlx5_free(rqt_attr); 81987e2db37SMichael Baum if (!ind_tbl->rqt) { 82087e2db37SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 82187e2db37SMichael Baum dev->data->port_id); 82287e2db37SMichael Baum rte_errno = errno; 82325ae7f1aSMichael Baum return -rte_errno; 82487e2db37SMichael Baum } 82525ae7f1aSMichael Baum return 0; 82687e2db37SMichael Baum } 82787e2db37SMichael Baum 82887e2db37SMichael Baum /** 829fa7ad49eSAndrey Vesnovaty * Modify RQT using DevX API as a filed of indirection table. 830fa7ad49eSAndrey Vesnovaty * 831fa7ad49eSAndrey Vesnovaty * @param dev 832fa7ad49eSAndrey Vesnovaty * Pointer to Ethernet device. 833fa7ad49eSAndrey Vesnovaty * @param log_n 834fa7ad49eSAndrey Vesnovaty * Log of number of queues in the array. 835fa7ad49eSAndrey Vesnovaty * @param ind_tbl 836fa7ad49eSAndrey Vesnovaty * DevX indirection table object. 837fa7ad49eSAndrey Vesnovaty * 838fa7ad49eSAndrey Vesnovaty * @return 839fa7ad49eSAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 840fa7ad49eSAndrey Vesnovaty */ 841fa7ad49eSAndrey Vesnovaty static int 842fa7ad49eSAndrey Vesnovaty mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n, 843fa7ad49eSAndrey Vesnovaty const uint16_t *queues, const uint32_t queues_n, 844fa7ad49eSAndrey Vesnovaty struct mlx5_ind_table_obj *ind_tbl) 845fa7ad49eSAndrey Vesnovaty { 846fa7ad49eSAndrey Vesnovaty int ret = 0; 847fa7ad49eSAndrey Vesnovaty struct mlx5_devx_rqt_attr *rqt_attr = NULL; 848fa7ad49eSAndrey Vesnovaty 849fa7ad49eSAndrey Vesnovaty MLX5_ASSERT(ind_tbl); 850fa7ad49eSAndrey Vesnovaty rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, 851fa7ad49eSAndrey Vesnovaty queues, 852fa7ad49eSAndrey Vesnovaty queues_n); 853fa7ad49eSAndrey Vesnovaty if (!rqt_attr) 854fa7ad49eSAndrey Vesnovaty return -rte_errno; 855fa7ad49eSAndrey Vesnovaty ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr); 856fa7ad49eSAndrey Vesnovaty mlx5_free(rqt_attr); 857fa7ad49eSAndrey Vesnovaty if (ret) 858fa7ad49eSAndrey Vesnovaty DRV_LOG(ERR, "Port %u cannot modify DevX RQT.", 859fa7ad49eSAndrey Vesnovaty dev->data->port_id); 860fa7ad49eSAndrey Vesnovaty return ret; 861fa7ad49eSAndrey Vesnovaty } 862fa7ad49eSAndrey Vesnovaty 863fa7ad49eSAndrey Vesnovaty /** 86487e2db37SMichael Baum * Destroy the DevX RQT object. 86587e2db37SMichael Baum * 86687e2db37SMichael Baum * @param ind_table 86787e2db37SMichael Baum * Indirection table to release. 86887e2db37SMichael Baum */ 86987e2db37SMichael Baum static void 87025ae7f1aSMichael Baum mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 87187e2db37SMichael Baum { 87287e2db37SMichael Baum claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 87387e2db37SMichael Baum } 87487e2db37SMichael Baum 87585552726SMichael Baum /** 876b8cc58c1SAndrey Vesnovaty * Set TIR attribute struct with relevant input values. 87785552726SMichael Baum * 878b8cc58c1SAndrey Vesnovaty * @param[in] dev 87985552726SMichael Baum * Pointer to Ethernet device. 880b8cc58c1SAndrey Vesnovaty * @param[in] rss_key 881b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 882b8cc58c1SAndrey Vesnovaty * @param[in] hash_fields 883b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 884b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 885bc5bee02SDmitry Kozlyuk * Indirection table for TIR. If table queues array is NULL, 886bc5bee02SDmitry Kozlyuk * a TIR for drop queue is assumed. 887b8cc58c1SAndrey Vesnovaty * @param[in] tunnel 88885552726SMichael Baum * Tunnel type. 889b8cc58c1SAndrey Vesnovaty * @param[out] tir_attr 890b8cc58c1SAndrey Vesnovaty * Parameters structure for TIR creation/modification. 89185552726SMichael Baum * 89285552726SMichael Baum * @return 893b8cc58c1SAndrey Vesnovaty * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. 89485552726SMichael Baum */ 895b8cc58c1SAndrey Vesnovaty static void 896b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key, 897b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 898b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl, 8990e04e1e2SXueming Li int tunnel, bool symmetric_hash_function, 9000e04e1e2SXueming Li struct mlx5_devx_tir_attr *tir_attr) 90185552726SMichael Baum { 90285552726SMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 903c06f77aeSMichael Baum bool is_hairpin; 9047a993368SMichael Baum bool lro = false; 9055a959cbfSMichael Baum uint32_t i; 90685552726SMichael Baum 907bc5bee02SDmitry Kozlyuk /* NULL queues designate drop queue. */ 908311b17e6SMichael Baum if (ind_tbl->queues == NULL) { 909311b17e6SMichael Baum is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin; 910311b17e6SMichael Baum } else if (mlx5_is_external_rxq(dev, ind_tbl->queues[0])) { 911311b17e6SMichael Baum /* External RxQ supports neither Hairpin nor LRO. */ 912311b17e6SMichael Baum is_hairpin = false; 913311b17e6SMichael Baum } else { 914c06f77aeSMichael Baum is_hairpin = mlx5_rxq_is_hairpin(dev, ind_tbl->queues[0]); 9157a993368SMichael Baum lro = true; 91685552726SMichael Baum /* Enable TIR LRO only if all the queues were configured for. */ 9175a959cbfSMichael Baum for (i = 0; i < ind_tbl->queues_n; ++i) { 9185cf0707fSXueming Li struct mlx5_rxq_data *rxq_i = 9195cf0707fSXueming Li mlx5_rxq_data_get(dev, ind_tbl->queues[i]); 9205cf0707fSXueming Li 9215cf0707fSXueming Li if (rxq_i != NULL && !rxq_i->lro) { 92285552726SMichael Baum lro = false; 92385552726SMichael Baum break; 92485552726SMichael Baum } 92585552726SMichael Baum } 926bc5bee02SDmitry Kozlyuk } 927b8cc58c1SAndrey Vesnovaty memset(tir_attr, 0, sizeof(*tir_attr)); 928b8cc58c1SAndrey Vesnovaty tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 929b8cc58c1SAndrey Vesnovaty tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 930b8cc58c1SAndrey Vesnovaty tir_attr->tunneled_offload_en = !!tunnel; 9310e04e1e2SXueming Li tir_attr->rx_hash_symmetric = symmetric_hash_function; 93285552726SMichael Baum /* If needed, translate hash_fields bitmap to PRM format. */ 93385552726SMichael Baum if (hash_fields) { 934b8cc58c1SAndrey Vesnovaty struct mlx5_rx_hash_field_select *rx_hash_field_select = 93585552726SMichael Baum #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 936b8cc58c1SAndrey Vesnovaty hash_fields & IBV_RX_HASH_INNER ? 937b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_inner : 93885552726SMichael Baum #endif 939b8cc58c1SAndrey Vesnovaty &tir_attr->rx_hash_field_selector_outer; 94085552726SMichael Baum /* 1 bit: 0: IPv4, 1: IPv6. */ 94185552726SMichael Baum rx_hash_field_select->l3_prot_type = 94285552726SMichael Baum !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 94385552726SMichael Baum /* 1 bit: 0: TCP, 1: UDP. */ 94485552726SMichael Baum rx_hash_field_select->l4_prot_type = 94585552726SMichael Baum !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 94685552726SMichael Baum /* Bitmask which sets which fields to use in RX Hash. */ 94785552726SMichael Baum rx_hash_field_select->selected_fields = 94885552726SMichael Baum ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 94985552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 95085552726SMichael Baum (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 95185552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 95285552726SMichael Baum (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 95385552726SMichael Baum MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 95485552726SMichael Baum (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 95518ca4a4eSRaja Zidane MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT | 95618ca4a4eSRaja Zidane (!!(hash_fields & IBV_RX_HASH_IPSEC_SPI)) << 95718ca4a4eSRaja Zidane MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI; 95885552726SMichael Baum } 959c06f77aeSMichael Baum if (is_hairpin) 960b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->td->id; 96185552726SMichael Baum else 962b8cc58c1SAndrey Vesnovaty tir_attr->transport_domain = priv->sh->tdn; 963b8cc58c1SAndrey Vesnovaty memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 964b8cc58c1SAndrey Vesnovaty tir_attr->indirect_table = ind_tbl->rqt->id; 96585552726SMichael Baum if (dev->data->dev_conf.lpbk_mode) 96645a6df80SMichael Baum tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 96785552726SMichael Baum if (lro) { 968593f913aSMichael Baum MLX5_ASSERT(priv->sh->config.lro_allowed); 96987af0d1eSMichael Baum tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout; 970a2364004SGregory Etelson tir_attr->lro_max_msg_sz = 971a2364004SGregory Etelson priv->max_lro_msg_size / MLX5_LRO_SEG_CHUNK_SIZE; 972b8cc58c1SAndrey Vesnovaty tir_attr->lro_enable_mask = 973b8cc58c1SAndrey Vesnovaty MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 97485552726SMichael Baum MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 97585552726SMichael Baum } 976b8cc58c1SAndrey Vesnovaty } 977b8cc58c1SAndrey Vesnovaty 978b8cc58c1SAndrey Vesnovaty /** 979b8cc58c1SAndrey Vesnovaty * Create an Rx Hash queue. 980b8cc58c1SAndrey Vesnovaty * 981b8cc58c1SAndrey Vesnovaty * @param dev 982b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 983b8cc58c1SAndrey Vesnovaty * @param hrxq 984b8cc58c1SAndrey Vesnovaty * Pointer to Rx Hash queue. 985b8cc58c1SAndrey Vesnovaty * @param tunnel 986b8cc58c1SAndrey Vesnovaty * Tunnel type. 987b8cc58c1SAndrey Vesnovaty * 988b8cc58c1SAndrey Vesnovaty * @return 989b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 990b8cc58c1SAndrey Vesnovaty */ 991b8cc58c1SAndrey Vesnovaty static int 992b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 993b8cc58c1SAndrey Vesnovaty int tunnel __rte_unused) 994b8cc58c1SAndrey Vesnovaty { 995b8cc58c1SAndrey Vesnovaty struct mlx5_priv *priv = dev->data->dev_private; 996b8cc58c1SAndrey Vesnovaty struct mlx5_devx_tir_attr tir_attr = {0}; 997b8cc58c1SAndrey Vesnovaty int err; 998b8cc58c1SAndrey Vesnovaty 999b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields, 10000e04e1e2SXueming Li hrxq->ind_table, tunnel, hrxq->symmetric_hash_function, 10010e04e1e2SXueming Li &tir_attr); 1002ca1418ceSMichael Baum hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr); 10035a959cbfSMichael Baum if (!hrxq->tir) { 100485552726SMichael Baum DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 100585552726SMichael Baum dev->data->port_id); 100685552726SMichael Baum rte_errno = errno; 100785552726SMichael Baum goto error; 100885552726SMichael Baum } 1009d9bad050SSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 101022681deeSAlex Vesker #ifdef HAVE_MLX5_HWS_SUPPORT 10113a2f674bSSuanming Mou if (hrxq->hws_flags) { 10123a2f674bSSuanming Mou hrxq->action = mlx5dr_action_create_dest_tir 10133a2f674bSSuanming Mou (priv->dr_ctx, 10145d542232SErez Shitrit (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags, true); 10153a2f674bSSuanming Mou if (!hrxq->action) 10163a2f674bSSuanming Mou goto error; 10173a2f674bSSuanming Mou return 0; 10183a2f674bSSuanming Mou } 1019d9bad050SSuanming Mou #endif 102088019723SOphir Munk if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir, 102188019723SOphir Munk &hrxq->action)) { 102285552726SMichael Baum rte_errno = errno; 102385552726SMichael Baum goto error; 102485552726SMichael Baum } 102585552726SMichael Baum #endif 10265a959cbfSMichael Baum return 0; 102785552726SMichael Baum error: 102885552726SMichael Baum err = rte_errno; /* Save rte_errno before cleanup. */ 10295a959cbfSMichael Baum if (hrxq->tir) 10305a959cbfSMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 103185552726SMichael Baum rte_errno = err; /* Restore rte_errno. */ 10325a959cbfSMichael Baum return -rte_errno; 103385552726SMichael Baum } 103485552726SMichael Baum 103585552726SMichael Baum /** 103685552726SMichael Baum * Destroy a DevX TIR object. 103785552726SMichael Baum * 103885552726SMichael Baum * @param hrxq 103985552726SMichael Baum * Hash Rx queue to release its tir. 104085552726SMichael Baum */ 104185552726SMichael Baum static void 104285552726SMichael Baum mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 104385552726SMichael Baum { 104485552726SMichael Baum claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 104585552726SMichael Baum } 104685552726SMichael Baum 10475eaf882eSMichael Baum /** 1048b8cc58c1SAndrey Vesnovaty * Modify an Rx Hash queue configuration. 1049b8cc58c1SAndrey Vesnovaty * 1050b8cc58c1SAndrey Vesnovaty * @param dev 1051b8cc58c1SAndrey Vesnovaty * Pointer to Ethernet device. 1052b8cc58c1SAndrey Vesnovaty * @param hrxq 1053b8cc58c1SAndrey Vesnovaty * Hash Rx queue to modify. 1054b8cc58c1SAndrey Vesnovaty * @param rss_key 1055b8cc58c1SAndrey Vesnovaty * RSS key for the Rx hash queue. 1056b8cc58c1SAndrey Vesnovaty * @param hash_fields 1057b8cc58c1SAndrey Vesnovaty * Verbs protocol hash field to make the RSS on. 1058b8cc58c1SAndrey Vesnovaty * @param[in] ind_tbl 1059b8cc58c1SAndrey Vesnovaty * Indirection table for TIR. 1060b8cc58c1SAndrey Vesnovaty * 1061b8cc58c1SAndrey Vesnovaty * @return 1062b8cc58c1SAndrey Vesnovaty * 0 on success, a negative errno value otherwise and rte_errno is set. 1063b8cc58c1SAndrey Vesnovaty */ 1064b8cc58c1SAndrey Vesnovaty static int 1065b8cc58c1SAndrey Vesnovaty mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 1066b8cc58c1SAndrey Vesnovaty const uint8_t *rss_key, 1067b8cc58c1SAndrey Vesnovaty uint64_t hash_fields, 10680e04e1e2SXueming Li bool symmetric_hash_function, 1069b8cc58c1SAndrey Vesnovaty const struct mlx5_ind_table_obj *ind_tbl) 1070b8cc58c1SAndrey Vesnovaty { 1071b8cc58c1SAndrey Vesnovaty struct mlx5_devx_modify_tir_attr modify_tir = {0}; 1072b8cc58c1SAndrey Vesnovaty 1073b8cc58c1SAndrey Vesnovaty /* 1074b8cc58c1SAndrey Vesnovaty * untested for modification fields: 1075b8cc58c1SAndrey Vesnovaty * - rx_hash_fn set hard-coded in hrxq_new(), 1076b8cc58c1SAndrey Vesnovaty * - lro_xxx not set after rxq setup 1077b8cc58c1SAndrey Vesnovaty */ 1078b8cc58c1SAndrey Vesnovaty if (ind_tbl != hrxq->ind_table) 1079b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 1080b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE; 1081b8cc58c1SAndrey Vesnovaty if (hash_fields != hrxq->hash_fields || 10820e04e1e2SXueming Li symmetric_hash_function != hrxq->symmetric_hash_function || 1083b8cc58c1SAndrey Vesnovaty memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN)) 1084b8cc58c1SAndrey Vesnovaty modify_tir.modify_bitmask |= 1085b8cc58c1SAndrey Vesnovaty MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH; 1086b8cc58c1SAndrey Vesnovaty mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl, 1087b8cc58c1SAndrey Vesnovaty 0, /* N/A - tunnel modification unsupported */ 10880e04e1e2SXueming Li symmetric_hash_function, 1089b8cc58c1SAndrey Vesnovaty &modify_tir.tir); 1090b8cc58c1SAndrey Vesnovaty modify_tir.tirn = hrxq->tir->id; 1091b8cc58c1SAndrey Vesnovaty if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) { 1092b8cc58c1SAndrey Vesnovaty DRV_LOG(ERR, "port %u cannot modify DevX TIR", 1093b8cc58c1SAndrey Vesnovaty dev->data->port_id); 1094b8cc58c1SAndrey Vesnovaty rte_errno = errno; 1095b8cc58c1SAndrey Vesnovaty return -rte_errno; 1096b8cc58c1SAndrey Vesnovaty } 1097b8cc58c1SAndrey Vesnovaty return 0; 1098b8cc58c1SAndrey Vesnovaty } 1099b8cc58c1SAndrey Vesnovaty 1100b8cc58c1SAndrey Vesnovaty /** 1101bc5bee02SDmitry Kozlyuk * Create a DevX drop Rx queue. 11025eaf882eSMichael Baum * 11035eaf882eSMichael Baum * @param dev 11045eaf882eSMichael Baum * Pointer to Ethernet device. 11055eaf882eSMichael Baum * 11065eaf882eSMichael Baum * @return 11070c762e81SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 11085eaf882eSMichael Baum */ 11090c762e81SMichael Baum static int 1110bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev) 11115eaf882eSMichael Baum { 1112bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1113bc5bee02SDmitry Kozlyuk int socket_id = dev->device->numa_node; 11145ceb3a02SXueming Li struct mlx5_rxq_priv *rxq; 11155ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = NULL; 11165ceb3a02SXueming Li struct mlx5_rxq_obj *rxq_obj = NULL; 1117bc5bee02SDmitry Kozlyuk int ret; 1118bc5bee02SDmitry Kozlyuk 1119bc5bee02SDmitry Kozlyuk /* 1120bc5bee02SDmitry Kozlyuk * Initialize dummy control structures. 1121bc5bee02SDmitry Kozlyuk * They are required to hold pointers for cleanup 1122bc5bee02SDmitry Kozlyuk * and are only accessible via drop queue DevX objects. 1123bc5bee02SDmitry Kozlyuk */ 11245ceb3a02SXueming Li rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id); 11255ceb3a02SXueming Li if (rxq == NULL) { 11265ceb3a02SXueming Li DRV_LOG(ERR, "Port %u could not allocate drop queue private", 11275ceb3a02SXueming Li dev->data->port_id); 11285ceb3a02SXueming Li rte_errno = ENOMEM; 11295ceb3a02SXueming Li goto error; 11305ceb3a02SXueming Li } 1131bc5bee02SDmitry Kozlyuk rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 1132bc5bee02SDmitry Kozlyuk 0, socket_id); 1133bc5bee02SDmitry Kozlyuk if (rxq_ctrl == NULL) { 1134bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u could not allocate drop queue control", 1135bc5bee02SDmitry Kozlyuk dev->data->port_id); 1136bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 1137bc5bee02SDmitry Kozlyuk goto error; 1138bc5bee02SDmitry Kozlyuk } 11395ceb3a02SXueming Li rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id); 11405ceb3a02SXueming Li if (rxq_obj == NULL) { 1141bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u could not allocate drop queue object", 1142bc5bee02SDmitry Kozlyuk dev->data->port_id); 1143bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 1144bc5bee02SDmitry Kozlyuk goto error; 1145bc5bee02SDmitry Kozlyuk } 11469011af71SThinh Tran /* set the CPU socket ID where the rxq_ctrl was allocated */ 11479011af71SThinh Tran rxq_ctrl->socket = socket_id; 11485ceb3a02SXueming Li rxq_obj->rxq_ctrl = rxq_ctrl; 1149c06f77aeSMichael Baum rxq_ctrl->is_hairpin = false; 11505db77fefSXueming Li rxq_ctrl->sh = priv->sh; 11515ceb3a02SXueming Li rxq_ctrl->obj = rxq_obj; 11525ceb3a02SXueming Li rxq->ctrl = rxq_ctrl; 11535ceb3a02SXueming Li rxq->priv = priv; 11545ceb3a02SXueming Li LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); 1155bc5bee02SDmitry Kozlyuk /* Create CQ using DevX API. */ 11565ceb3a02SXueming Li ret = mlx5_rxq_create_devx_cq_resources(rxq); 1157bc5bee02SDmitry Kozlyuk if (ret != 0) { 1158bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u drop queue CQ creation failed.", 1159bc5bee02SDmitry Kozlyuk dev->data->port_id); 1160bc5bee02SDmitry Kozlyuk goto error; 1161bc5bee02SDmitry Kozlyuk } 1162febcac7bSBing Zhao rxq_ctrl->rxq.delay_drop = 0; 1163bc5bee02SDmitry Kozlyuk /* Create RQ using DevX API. */ 11645ceb3a02SXueming Li ret = mlx5_rxq_create_devx_rq_resources(rxq); 1165bc5bee02SDmitry Kozlyuk if (ret != 0) { 1166bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Port %u drop queue RQ creation failed.", 1167bc5bee02SDmitry Kozlyuk dev->data->port_id); 1168bc5bee02SDmitry Kozlyuk rte_errno = ENOMEM; 1169bc5bee02SDmitry Kozlyuk goto error; 1170bc5bee02SDmitry Kozlyuk } 1171bc5bee02SDmitry Kozlyuk /* Change queue state to ready. */ 1172bc5bee02SDmitry Kozlyuk ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY); 1173bc5bee02SDmitry Kozlyuk if (ret != 0) 1174bc5bee02SDmitry Kozlyuk goto error; 1175bc5bee02SDmitry Kozlyuk /* Initialize drop queue. */ 1176bc5bee02SDmitry Kozlyuk priv->drop_queue.rxq = rxq; 1177bc5bee02SDmitry Kozlyuk return 0; 1178bc5bee02SDmitry Kozlyuk error: 1179bc5bee02SDmitry Kozlyuk ret = rte_errno; /* Save rte_errno before cleanup. */ 11805ceb3a02SXueming Li if (rxq != NULL && rxq->devx_rq.rq != NULL) 11815ceb3a02SXueming Li mlx5_devx_rq_destroy(&rxq->devx_rq); 11825ceb3a02SXueming Li if (rxq_obj != NULL) { 11835ceb3a02SXueming Li if (rxq_obj->cq_obj.cq != NULL) 11845ceb3a02SXueming Li mlx5_devx_cq_destroy(&rxq_obj->cq_obj); 11855ceb3a02SXueming Li if (rxq_obj->devx_channel) 1186bc5bee02SDmitry Kozlyuk mlx5_os_devx_destroy_event_channel 11875ceb3a02SXueming Li (rxq_obj->devx_channel); 11885ceb3a02SXueming Li mlx5_free(rxq_obj); 1189bc5bee02SDmitry Kozlyuk } 1190bc5bee02SDmitry Kozlyuk if (rxq_ctrl != NULL) 1191bc5bee02SDmitry Kozlyuk mlx5_free(rxq_ctrl); 11925ceb3a02SXueming Li if (rxq != NULL) 11935ceb3a02SXueming Li mlx5_free(rxq); 1194bc5bee02SDmitry Kozlyuk rte_errno = ret; /* Restore rte_errno. */ 11950c762e81SMichael Baum return -rte_errno; 11965eaf882eSMichael Baum } 11975eaf882eSMichael Baum 11985eaf882eSMichael Baum /** 1199bc5bee02SDmitry Kozlyuk * Release drop Rx queue resources. 1200bc5bee02SDmitry Kozlyuk * 1201bc5bee02SDmitry Kozlyuk * @param dev 1202bc5bee02SDmitry Kozlyuk * Pointer to Ethernet device. 1203bc5bee02SDmitry Kozlyuk */ 1204bc5bee02SDmitry Kozlyuk static void 1205bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev) 1206bc5bee02SDmitry Kozlyuk { 1207bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 12085ceb3a02SXueming Li struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq; 12095ceb3a02SXueming Li struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 1210bc5bee02SDmitry Kozlyuk 1211bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_release(rxq); 12125ceb3a02SXueming Li mlx5_free(rxq_ctrl->obj); 1213bc5bee02SDmitry Kozlyuk mlx5_free(rxq_ctrl); 12145ceb3a02SXueming Li mlx5_free(rxq); 1215bc5bee02SDmitry Kozlyuk priv->drop_queue.rxq = NULL; 1216bc5bee02SDmitry Kozlyuk } 1217bc5bee02SDmitry Kozlyuk 1218bc5bee02SDmitry Kozlyuk /** 12195eaf882eSMichael Baum * Release a drop hash Rx queue. 12205eaf882eSMichael Baum * 12215eaf882eSMichael Baum * @param dev 12225eaf882eSMichael Baum * Pointer to Ethernet device. 12235eaf882eSMichael Baum */ 12245eaf882eSMichael Baum static void 12250c762e81SMichael Baum mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 12265eaf882eSMichael Baum { 1227bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1228bc5bee02SDmitry Kozlyuk struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 1229bc5bee02SDmitry Kozlyuk 1230177d90ddSBing Zhao #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 1231177d90ddSBing Zhao if (hrxq->action != NULL) 1232177d90ddSBing Zhao mlx5_flow_os_destroy_flow_action(hrxq->action); 1233177d90ddSBing Zhao #endif 1234bc5bee02SDmitry Kozlyuk if (hrxq->tir != NULL) 1235bc5bee02SDmitry Kozlyuk mlx5_devx_tir_destroy(hrxq); 1236bc5bee02SDmitry Kozlyuk if (hrxq->ind_table->ind_table != NULL) 1237bc5bee02SDmitry Kozlyuk mlx5_devx_ind_table_destroy(hrxq->ind_table); 12385ceb3a02SXueming Li if (priv->drop_queue.rxq->devx_rq.rq != NULL) 1239bc5bee02SDmitry Kozlyuk mlx5_rxq_devx_obj_drop_release(dev); 1240bc5bee02SDmitry Kozlyuk } 1241bc5bee02SDmitry Kozlyuk 1242bc5bee02SDmitry Kozlyuk /** 1243bc5bee02SDmitry Kozlyuk * Create a DevX drop action for Rx Hash queue. 1244bc5bee02SDmitry Kozlyuk * 1245bc5bee02SDmitry Kozlyuk * @param dev 1246bc5bee02SDmitry Kozlyuk * Pointer to Ethernet device. 1247bc5bee02SDmitry Kozlyuk * 1248bc5bee02SDmitry Kozlyuk * @return 1249bc5bee02SDmitry Kozlyuk * 0 on success, a negative errno value otherwise and rte_errno is set. 1250bc5bee02SDmitry Kozlyuk */ 1251bc5bee02SDmitry Kozlyuk static int 1252bc5bee02SDmitry Kozlyuk mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 1253bc5bee02SDmitry Kozlyuk { 1254bc5bee02SDmitry Kozlyuk struct mlx5_priv *priv = dev->data->dev_private; 1255bc5bee02SDmitry Kozlyuk struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 1256bc5bee02SDmitry Kozlyuk int ret; 1257bc5bee02SDmitry Kozlyuk 1258bc5bee02SDmitry Kozlyuk ret = mlx5_rxq_devx_obj_drop_create(dev); 1259bc5bee02SDmitry Kozlyuk if (ret != 0) { 1260bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop RX queue"); 1261bc5bee02SDmitry Kozlyuk return ret; 1262bc5bee02SDmitry Kozlyuk } 12633a2f674bSSuanming Mou if (priv->sh->config.dv_flow_en == 2) 12643a2f674bSSuanming Mou return 0; 1265bc5bee02SDmitry Kozlyuk /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */ 1266bc5bee02SDmitry Kozlyuk ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table); 1267bc5bee02SDmitry Kozlyuk if (ret != 0) { 1268bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table"); 1269bc5bee02SDmitry Kozlyuk goto error; 1270bc5bee02SDmitry Kozlyuk } 1271bc5bee02SDmitry Kozlyuk ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false); 1272bc5bee02SDmitry Kozlyuk if (ret != 0) { 1273bc5bee02SDmitry Kozlyuk DRV_LOG(ERR, "Cannot create drop hash RX queue"); 1274bc5bee02SDmitry Kozlyuk goto error; 1275bc5bee02SDmitry Kozlyuk } 1276bc5bee02SDmitry Kozlyuk return 0; 1277bc5bee02SDmitry Kozlyuk error: 1278bc5bee02SDmitry Kozlyuk mlx5_devx_drop_action_destroy(dev); 1279bc5bee02SDmitry Kozlyuk return ret; 12805eaf882eSMichael Baum } 12815eaf882eSMichael Baum 128286d259ceSMichael Baum /** 1283a89f6433SRongwei Liu * Select TXQ TIS number. 1284a89f6433SRongwei Liu * 1285a89f6433SRongwei Liu * @param dev 1286a89f6433SRongwei Liu * Pointer to Ethernet device. 1287a89f6433SRongwei Liu * @param queue_idx 1288a89f6433SRongwei Liu * Queue index in DPDK Tx queue array. 1289a89f6433SRongwei Liu * 1290a89f6433SRongwei Liu * @return 1291a89f6433SRongwei Liu * > 0 on success, a negative errno value otherwise. 1292a89f6433SRongwei Liu */ 1293a89f6433SRongwei Liu static uint32_t 1294a89f6433SRongwei Liu mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx) 1295a89f6433SRongwei Liu { 1296a89f6433SRongwei Liu struct mlx5_priv *priv = dev->data->dev_private; 1297ce306af6SJiawei Wang struct mlx5_txq_data *txq_data = (*priv->txqs)[queue_idx]; 1298ce306af6SJiawei Wang int tis_idx = 0; 1299a89f6433SRongwei Liu 1300ce306af6SJiawei Wang if (priv->sh->bond.n_port) { 1301ce306af6SJiawei Wang if (txq_data->tx_aggr_affinity) { 1302ce306af6SJiawei Wang tis_idx = txq_data->tx_aggr_affinity; 1303ce306af6SJiawei Wang } else if (priv->sh->lag.affinity_mode == MLX5_LAG_MODE_TIS) { 1304a89f6433SRongwei Liu tis_idx = (priv->lag_affinity_idx + queue_idx) % 1305ce306af6SJiawei Wang priv->sh->bond.n_port + 1; 1306a89f6433SRongwei Liu DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.", 1307ce306af6SJiawei Wang dev->data->port_id, queue_idx, tis_idx, 1308ce306af6SJiawei Wang priv->sh->lag.tx_remap_affinity[tis_idx - 1]); 1309ce306af6SJiawei Wang } 1310a89f6433SRongwei Liu } 1311a89f6433SRongwei Liu MLX5_ASSERT(priv->sh->tis[tis_idx]); 1312a89f6433SRongwei Liu return priv->sh->tis[tis_idx]->id; 1313a89f6433SRongwei Liu } 1314a89f6433SRongwei Liu 1315a89f6433SRongwei Liu /** 131686d259ceSMichael Baum * Create the Tx hairpin queue object. 131786d259ceSMichael Baum * 131886d259ceSMichael Baum * @param dev 131986d259ceSMichael Baum * Pointer to Ethernet device. 132086d259ceSMichael Baum * @param idx 132186d259ceSMichael Baum * Queue index in DPDK Tx queue array. 132286d259ceSMichael Baum * 132386d259ceSMichael Baum * @return 1324f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 132586d259ceSMichael Baum */ 1326f49f4483SMichael Baum static int 132786d259ceSMichael Baum mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 132886d259ceSMichael Baum { 132986d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 13307274b417SDariusz Sosnowski struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr; 133186d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 133286d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 133386d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 13347274b417SDariusz Sosnowski struct mlx5_devx_create_sq_attr dev_mem_attr = { 0 }; 13357274b417SDariusz Sosnowski struct mlx5_devx_create_sq_attr host_mem_attr = { 0 }; 1336f49f4483SMichael Baum struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 13377274b417SDariusz Sosnowski void *umem_buf = NULL; 13387274b417SDariusz Sosnowski void *umem_obj = NULL; 133986d259ceSMichael Baum uint32_t max_wq_data; 134086d259ceSMichael Baum 134186d259ceSMichael Baum MLX5_ASSERT(txq_data); 1342f49f4483SMichael Baum MLX5_ASSERT(tmpl); 134386d259ceSMichael Baum tmpl->txq_ctrl = txq_ctrl; 13447274b417SDariusz Sosnowski dev_mem_attr.hairpin = 1; 13457274b417SDariusz Sosnowski dev_mem_attr.tis_lst_sz = 1; 13467274b417SDariusz Sosnowski dev_mem_attr.tis_num = mlx5_get_txq_tis_num(dev, idx); 134753820561SMichael Baum max_wq_data = 134853820561SMichael Baum priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz; 134986d259ceSMichael Baum /* Jumbo frames > 9KB should be supported, and more packets. */ 135086d259ceSMichael Baum if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 135186d259ceSMichael Baum if (priv->config.log_hp_size > max_wq_data) { 135286d259ceSMichael Baum DRV_LOG(ERR, "Total data size %u power of 2 is " 135386d259ceSMichael Baum "too large for hairpin.", 135486d259ceSMichael Baum priv->config.log_hp_size); 135586d259ceSMichael Baum rte_errno = ERANGE; 1356f49f4483SMichael Baum return -rte_errno; 135786d259ceSMichael Baum } 13587274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 135986d259ceSMichael Baum } else { 13607274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_data_sz = 136186d259ceSMichael Baum (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 136286d259ceSMichael Baum max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 136386d259ceSMichael Baum } 136486d259ceSMichael Baum /* Set the packets number to the maximum value for performance. */ 13657274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_num_packets = 13667274b417SDariusz Sosnowski dev_mem_attr.wq_attr.log_hairpin_data_sz - 136786d259ceSMichael Baum MLX5_HAIRPIN_QUEUE_STRIDE; 13687274b417SDariusz Sosnowski dev_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_INTERNAL_BUFFER; 13697274b417SDariusz Sosnowski if (txq_ctrl->hairpin_conf.use_rte_memory) { 13707274b417SDariusz Sosnowski uint32_t umem_size; 13717274b417SDariusz Sosnowski uint32_t umem_dbrec; 13727274b417SDariusz Sosnowski size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 1373a89f6433SRongwei Liu 13747274b417SDariusz Sosnowski if (alignment == (size_t)-1) { 13757274b417SDariusz Sosnowski DRV_LOG(ERR, "Failed to get WQE buf alignment."); 13767274b417SDariusz Sosnowski rte_errno = ENOMEM; 13777274b417SDariusz Sosnowski return -rte_errno; 13787274b417SDariusz Sosnowski } 13797274b417SDariusz Sosnowski /* 13807274b417SDariusz Sosnowski * It is assumed that configuration is verified against capabilities 13817274b417SDariusz Sosnowski * during queue setup. 13827274b417SDariusz Sosnowski */ 13837274b417SDariusz Sosnowski MLX5_ASSERT(hca_attr->hairpin_sq_wq_in_host_mem); 13847274b417SDariusz Sosnowski MLX5_ASSERT(hca_attr->hairpin_sq_wqe_bb_size > 0); 13857274b417SDariusz Sosnowski rte_memcpy(&host_mem_attr, &dev_mem_attr, sizeof(host_mem_attr)); 13867274b417SDariusz Sosnowski umem_size = MLX5_WQE_SIZE * 13877274b417SDariusz Sosnowski RTE_BIT32(host_mem_attr.wq_attr.log_hairpin_num_packets); 13887274b417SDariusz Sosnowski umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 13897274b417SDariusz Sosnowski umem_size += MLX5_DBR_SIZE; 13907274b417SDariusz Sosnowski umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 13917274b417SDariusz Sosnowski alignment, priv->sh->numa_node); 13927274b417SDariusz Sosnowski if (umem_buf == NULL && txq_ctrl->hairpin_conf.force_memory) { 13937274b417SDariusz Sosnowski DRV_LOG(ERR, "Failed to allocate memory for hairpin TX queue"); 13947274b417SDariusz Sosnowski rte_errno = ENOMEM; 13957274b417SDariusz Sosnowski return -rte_errno; 13967274b417SDariusz Sosnowski } else if (umem_buf == NULL && !txq_ctrl->hairpin_conf.force_memory) { 13977274b417SDariusz Sosnowski DRV_LOG(WARNING, "Failed to allocate memory for hairpin TX queue." 13987274b417SDariusz Sosnowski " Falling back to TX queue located on the device."); 13997274b417SDariusz Sosnowski goto create_sq_on_device; 14007274b417SDariusz Sosnowski } 14017274b417SDariusz Sosnowski umem_obj = mlx5_os_umem_reg(priv->sh->cdev->ctx, 14027274b417SDariusz Sosnowski (void *)(uintptr_t)umem_buf, 14037274b417SDariusz Sosnowski umem_size, 14047274b417SDariusz Sosnowski IBV_ACCESS_LOCAL_WRITE); 14057274b417SDariusz Sosnowski if (umem_obj == NULL && txq_ctrl->hairpin_conf.force_memory) { 14067274b417SDariusz Sosnowski DRV_LOG(ERR, "Failed to register UMEM for hairpin TX queue"); 14077274b417SDariusz Sosnowski mlx5_free(umem_buf); 14087274b417SDariusz Sosnowski return -rte_errno; 14097274b417SDariusz Sosnowski } else if (umem_obj == NULL && !txq_ctrl->hairpin_conf.force_memory) { 14107274b417SDariusz Sosnowski DRV_LOG(WARNING, "Failed to register UMEM for hairpin TX queue." 14117274b417SDariusz Sosnowski " Falling back to TX queue located on the device."); 14127274b417SDariusz Sosnowski rte_errno = 0; 14137274b417SDariusz Sosnowski mlx5_free(umem_buf); 14147274b417SDariusz Sosnowski goto create_sq_on_device; 14157274b417SDariusz Sosnowski } 14167274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 14177274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_umem_valid = 1; 14187274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj); 14197274b417SDariusz Sosnowski host_mem_attr.wq_attr.wq_umem_offset = 0; 14207274b417SDariusz Sosnowski host_mem_attr.wq_attr.dbr_umem_valid = 1; 14217274b417SDariusz Sosnowski host_mem_attr.wq_attr.dbr_umem_id = host_mem_attr.wq_attr.wq_umem_id; 14227274b417SDariusz Sosnowski host_mem_attr.wq_attr.dbr_addr = umem_dbrec; 14237274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 14247274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_wq_sz = 14257274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_hairpin_num_packets * 14267274b417SDariusz Sosnowski hca_attr->hairpin_sq_wqe_bb_size; 14277274b417SDariusz Sosnowski host_mem_attr.wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 14287274b417SDariusz Sosnowski host_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_HOST_MEMORY; 14297274b417SDariusz Sosnowski tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &host_mem_attr); 14307274b417SDariusz Sosnowski if (!tmpl->sq && txq_ctrl->hairpin_conf.force_memory) { 14317274b417SDariusz Sosnowski DRV_LOG(ERR, 14327274b417SDariusz Sosnowski "Port %u tx hairpin queue %u can't create SQ object.", 14337274b417SDariusz Sosnowski dev->data->port_id, idx); 14347274b417SDariusz Sosnowski claim_zero(mlx5_os_umem_dereg(umem_obj)); 14357274b417SDariusz Sosnowski mlx5_free(umem_buf); 14367274b417SDariusz Sosnowski return -rte_errno; 14377274b417SDariusz Sosnowski } else if (!tmpl->sq && !txq_ctrl->hairpin_conf.force_memory) { 14387274b417SDariusz Sosnowski DRV_LOG(WARNING, 14397274b417SDariusz Sosnowski "Port %u tx hairpin queue %u failed to allocate SQ object" 14407274b417SDariusz Sosnowski " using host memory. Falling back to TX queue located" 14417274b417SDariusz Sosnowski " on the device", 14427274b417SDariusz Sosnowski dev->data->port_id, idx); 14437274b417SDariusz Sosnowski rte_errno = 0; 14447274b417SDariusz Sosnowski claim_zero(mlx5_os_umem_dereg(umem_obj)); 14457274b417SDariusz Sosnowski mlx5_free(umem_buf); 14467274b417SDariusz Sosnowski goto create_sq_on_device; 14477274b417SDariusz Sosnowski } 14487274b417SDariusz Sosnowski tmpl->umem_buf_wq_buffer = umem_buf; 14497274b417SDariusz Sosnowski tmpl->umem_obj_wq_buffer = umem_obj; 14507274b417SDariusz Sosnowski return 0; 14517274b417SDariusz Sosnowski } 14527274b417SDariusz Sosnowski 14537274b417SDariusz Sosnowski create_sq_on_device: 14547274b417SDariusz Sosnowski tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &dev_mem_attr); 145586d259ceSMichael Baum if (!tmpl->sq) { 145686d259ceSMichael Baum DRV_LOG(ERR, 145786d259ceSMichael Baum "Port %u tx hairpin queue %u can't create SQ object.", 145886d259ceSMichael Baum dev->data->port_id, idx); 145986d259ceSMichael Baum rte_errno = errno; 1460f49f4483SMichael Baum return -rte_errno; 146186d259ceSMichael Baum } 1462f49f4483SMichael Baum return 0; 146386d259ceSMichael Baum } 146486d259ceSMichael Baum 1465f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 146686d259ceSMichael Baum /** 146786d259ceSMichael Baum * Destroy the Tx queue DevX object. 146886d259ceSMichael Baum * 146986d259ceSMichael Baum * @param txq_obj 147086d259ceSMichael Baum * Txq object to destroy. 147186d259ceSMichael Baum */ 147286d259ceSMichael Baum static void 147388f2e3f1SMichael Baum mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 147486d259ceSMichael Baum { 147574e91860SMichael Baum mlx5_devx_sq_destroy(&txq_obj->sq_obj); 147674e91860SMichael Baum memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj)); 14775f04f70cSMichael Baum mlx5_devx_cq_destroy(&txq_obj->cq_obj); 14785f04f70cSMichael Baum memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj)); 147986d259ceSMichael Baum } 148086d259ceSMichael Baum 148186d259ceSMichael Baum /** 148288f2e3f1SMichael Baum * Create a SQ object and its resources using DevX. 148386d259ceSMichael Baum * 148486d259ceSMichael Baum * @param dev 148586d259ceSMichael Baum * Pointer to Ethernet device. 148686d259ceSMichael Baum * @param idx 148786d259ceSMichael Baum * Queue index in DPDK Tx queue array. 148874e91860SMichael Baum * @param[in] log_desc_n 148974e91860SMichael Baum * Log of number of descriptors in queue. 149086d259ceSMichael Baum * 149186d259ceSMichael Baum * @return 149274e91860SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 149386d259ceSMichael Baum */ 149474e91860SMichael Baum static int 149574e91860SMichael Baum mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx, 149674e91860SMichael Baum uint16_t log_desc_n) 149786d259ceSMichael Baum { 149886d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 1499fe46b20cSMichael Baum struct mlx5_common_device *cdev = priv->sh->cdev; 15005dfa003dSMichael Baum struct mlx5_uar *uar = &priv->sh->tx_uar; 150186d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 150288f2e3f1SMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 150388f2e3f1SMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 150488f2e3f1SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 150574e91860SMichael Baum struct mlx5_devx_create_sq_attr sq_attr = { 150674e91860SMichael Baum .flush_in_error_en = 1, 150774e91860SMichael Baum .allow_multi_pkt_send_wqe = !!priv->config.mps, 150853820561SMichael Baum .min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode, 150987af0d1eSMichael Baum .allow_swp = !!priv->sh->dev_cap.swp, 151074e91860SMichael Baum .cqn = txq_obj->cq_obj.cq->id, 151174e91860SMichael Baum .tis_lst_sz = 1, 151274e91860SMichael Baum .wq_attr = (struct mlx5_devx_wq_attr){ 1513fe46b20cSMichael Baum .pd = cdev->pdn, 15145dfa003dSMichael Baum .uar_page = mlx5_os_get_devx_uar_page_id(uar->obj), 151574e91860SMichael Baum }, 1516fe46b20cSMichael Baum .ts_format = 1517fe46b20cSMichael Baum mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format), 1518a89f6433SRongwei Liu .tis_num = mlx5_get_txq_tis_num(dev, idx), 151974e91860SMichael Baum }; 1520a89f6433SRongwei Liu 152186d259ceSMichael Baum /* Create Send Queue object with DevX. */ 1522fe46b20cSMichael Baum return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj, 1523ca1418ceSMichael Baum log_desc_n, &sq_attr, priv->sh->numa_node); 152486d259ceSMichael Baum } 152586d259ceSMichael Baum #endif 152686d259ceSMichael Baum 152786d259ceSMichael Baum /** 152886d259ceSMichael Baum * Create the Tx queue DevX object. 152986d259ceSMichael Baum * 153086d259ceSMichael Baum * @param dev 153186d259ceSMichael Baum * Pointer to Ethernet device. 153286d259ceSMichael Baum * @param idx 153386d259ceSMichael Baum * Queue index in DPDK Tx queue array. 153486d259ceSMichael Baum * 153586d259ceSMichael Baum * @return 1536f49f4483SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 153786d259ceSMichael Baum */ 1538f49f4483SMichael Baum int 153986d259ceSMichael Baum mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 154086d259ceSMichael Baum { 154186d259ceSMichael Baum struct mlx5_priv *priv = dev->data->dev_private; 154286d259ceSMichael Baum struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 154386d259ceSMichael Baum struct mlx5_txq_ctrl *txq_ctrl = 154486d259ceSMichael Baum container_of(txq_data, struct mlx5_txq_ctrl, txq); 154586d259ceSMichael Baum 1546c06f77aeSMichael Baum if (txq_ctrl->is_hairpin) 154786d259ceSMichael Baum return mlx5_txq_obj_hairpin_new(dev, idx); 1548f1ae0b35SOphir Munk #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H) 154986d259ceSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 155086d259ceSMichael Baum dev->data->port_id, idx); 155186d259ceSMichael Baum rte_errno = ENOMEM; 1552f49f4483SMichael Baum return -rte_errno; 155386d259ceSMichael Baum #else 15545dfa003dSMichael Baum struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv)); 155586d259ceSMichael Baum struct mlx5_dev_ctx_shared *sh = priv->sh; 1556f49f4483SMichael Baum struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 15575f04f70cSMichael Baum struct mlx5_devx_cq_attr cq_attr = { 15585dfa003dSMichael Baum .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj), 15595f04f70cSMichael Baum }; 15605f04f70cSMichael Baum uint32_t cqe_n, log_desc_n; 156100984de5SViacheslav Ovsiienko uint32_t wqe_n, wqe_size; 156286d259ceSMichael Baum int ret = 0; 156386d259ceSMichael Baum 156486d259ceSMichael Baum MLX5_ASSERT(txq_data); 1565f49f4483SMichael Baum MLX5_ASSERT(txq_obj); 15665dfa003dSMichael Baum MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 15675dfa003dSMichael Baum MLX5_ASSERT(ppriv); 156886d259ceSMichael Baum txq_obj->txq_ctrl = txq_ctrl; 156986d259ceSMichael Baum txq_obj->dev = dev; 15708fa8d147SViacheslav Ovsiienko if (__rte_trace_point_fp_is_enabled() && 15718fa8d147SViacheslav Ovsiienko txq_data->offloads & RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) 15728fa8d147SViacheslav Ovsiienko cqe_n = UINT16_MAX / 2 - 1; 15738fa8d147SViacheslav Ovsiienko else 15745f04f70cSMichael Baum cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 15755f04f70cSMichael Baum 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 15765f04f70cSMichael Baum log_desc_n = log2above(cqe_n); 15775f04f70cSMichael Baum cqe_n = 1UL << log_desc_n; 15785f04f70cSMichael Baum if (cqe_n > UINT16_MAX) { 15795f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.", 15805f04f70cSMichael Baum dev->data->port_id, txq_data->idx, cqe_n); 15815f04f70cSMichael Baum rte_errno = EINVAL; 15825f04f70cSMichael Baum return 0; 15835f04f70cSMichael Baum } 15845f04f70cSMichael Baum /* Create completion queue object with DevX. */ 1585ca1418ceSMichael Baum ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n, 15865f04f70cSMichael Baum &cq_attr, priv->sh->numa_node); 15875f04f70cSMichael Baum if (ret) { 15885f04f70cSMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 15895f04f70cSMichael Baum dev->data->port_id, idx); 159086d259ceSMichael Baum goto error; 159186d259ceSMichael Baum } 15925f04f70cSMichael Baum txq_data->cqe_n = log_desc_n; 15935f04f70cSMichael Baum txq_data->cqe_s = cqe_n; 159486d259ceSMichael Baum txq_data->cqe_m = txq_data->cqe_s - 1; 15955f04f70cSMichael Baum txq_data->cqes = txq_obj->cq_obj.cqes; 159686d259ceSMichael Baum txq_data->cq_ci = 0; 159786d259ceSMichael Baum txq_data->cq_pi = 0; 15985f04f70cSMichael Baum txq_data->cq_db = txq_obj->cq_obj.db_rec; 159986d259ceSMichael Baum *txq_data->cq_db = 0; 160000984de5SViacheslav Ovsiienko /* 160100984de5SViacheslav Ovsiienko * Adjust the amount of WQEs depending on inline settings. 160200984de5SViacheslav Ovsiienko * The number of descriptors should be enough to handle 160300984de5SViacheslav Ovsiienko * the specified number of packets. If queue is being created 160400984de5SViacheslav Ovsiienko * with Verbs the rdma-core does queue size adjustment 160500984de5SViacheslav Ovsiienko * internally in the mlx5_calc_sq_size(), we do the same 160600984de5SViacheslav Ovsiienko * for the queue being created with DevX at this point. 160700984de5SViacheslav Ovsiienko */ 160800984de5SViacheslav Ovsiienko wqe_size = txq_data->tso_en ? 160900984de5SViacheslav Ovsiienko RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0; 161000984de5SViacheslav Ovsiienko wqe_size += sizeof(struct mlx5_wqe_cseg) + 161100984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 161200984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_dseg); 161300984de5SViacheslav Ovsiienko if (txq_data->inlen_send) 161400984de5SViacheslav Ovsiienko wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) + 161500984de5SViacheslav Ovsiienko sizeof(struct mlx5_wqe_eseg) + 161600984de5SViacheslav Ovsiienko RTE_ALIGN(txq_data->inlen_send + 161700984de5SViacheslav Ovsiienko sizeof(uint32_t), 161800984de5SViacheslav Ovsiienko MLX5_WSEG_SIZE)); 161900984de5SViacheslav Ovsiienko wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE; 162086d259ceSMichael Baum /* Create Send Queue object with DevX. */ 162100984de5SViacheslav Ovsiienko wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size, 162291d1cfafSMichael Baum (uint32_t)priv->sh->dev_cap.max_qp_wr); 162374e91860SMichael Baum log_desc_n = log2above(wqe_n); 162474e91860SMichael Baum ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n); 162574e91860SMichael Baum if (ret) { 162674e91860SMichael Baum DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 162774e91860SMichael Baum dev->data->port_id, idx); 162886d259ceSMichael Baum rte_errno = errno; 162986d259ceSMichael Baum goto error; 163086d259ceSMichael Baum } 163186d259ceSMichael Baum /* Create the Work Queue. */ 163274e91860SMichael Baum txq_data->wqe_n = log_desc_n; 163386d259ceSMichael Baum txq_data->wqe_s = 1 << txq_data->wqe_n; 163486d259ceSMichael Baum txq_data->wqe_m = txq_data->wqe_s - 1; 163574e91860SMichael Baum txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes; 163686d259ceSMichael Baum txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 163786d259ceSMichael Baum txq_data->wqe_ci = 0; 163886d259ceSMichael Baum txq_data->wqe_pi = 0; 163986d259ceSMichael Baum txq_data->wqe_comp = 0; 164086d259ceSMichael Baum txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 164131625e62SViacheslav Ovsiienko txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR]; 164286d259ceSMichael Baum *txq_data->qp_db = 0; 164374e91860SMichael Baum txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8; 1644a6b9d5a5SMichael Baum txq_data->db_heu = sh->cdev->config.dbnc == MLX5_SQ_DB_HEURISTIC; 16455dfa003dSMichael Baum txq_data->db_nc = sh->tx_uar.dbnc; 16462f5122dfSViacheslav Ovsiienko txq_data->wait_on_time = !!(!sh->config.tx_pp && 16472f5122dfSViacheslav Ovsiienko sh->cdev->config.hca_attr.wait_on_time); 164886d259ceSMichael Baum /* Change Send Queue state to Ready-to-Send. */ 1649686d05b6SXueming Li ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); 165086d259ceSMichael Baum if (ret) { 165186d259ceSMichael Baum rte_errno = errno; 165286d259ceSMichael Baum DRV_LOG(ERR, 1653a9c79306SMichael Baum "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.", 165486d259ceSMichael Baum dev->data->port_id, idx); 165586d259ceSMichael Baum goto error; 165686d259ceSMichael Baum } 165786d259ceSMichael Baum #ifdef HAVE_IBV_FLOW_DV_SUPPORT 165886d259ceSMichael Baum /* 165986d259ceSMichael Baum * If using DevX need to query and store TIS transport domain value. 166086d259ceSMichael Baum * This is done once per port. 166186d259ceSMichael Baum * Will use this value on Rx, when creating matching TIR. 166286d259ceSMichael Baum */ 166386d259ceSMichael Baum if (!priv->sh->tdn) 166486d259ceSMichael Baum priv->sh->tdn = priv->sh->td->id; 166586d259ceSMichael Baum #endif 166686d259ceSMichael Baum txq_ctrl->uar_mmap_offset = 16675dfa003dSMichael Baum mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj); 16685dfa003dSMichael Baum ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db; 1669876b5d52SMatan Azrad dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 1670f49f4483SMichael Baum return 0; 167186d259ceSMichael Baum error: 167286d259ceSMichael Baum ret = rte_errno; /* Save rte_errno before cleanup. */ 167388f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 167486d259ceSMichael Baum rte_errno = ret; /* Restore rte_errno. */ 1675f49f4483SMichael Baum return -rte_errno; 167686d259ceSMichael Baum #endif 167786d259ceSMichael Baum } 167886d259ceSMichael Baum 167986d259ceSMichael Baum /** 168086d259ceSMichael Baum * Release an Tx DevX queue object. 168186d259ceSMichael Baum * 168286d259ceSMichael Baum * @param txq_obj 168386d259ceSMichael Baum * DevX Tx queue object. 168486d259ceSMichael Baum */ 168586d259ceSMichael Baum void 168686d259ceSMichael Baum mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 168786d259ceSMichael Baum { 168886d259ceSMichael Baum MLX5_ASSERT(txq_obj); 1689c06f77aeSMichael Baum if (txq_obj->txq_ctrl->is_hairpin) { 16907274b417SDariusz Sosnowski if (txq_obj->sq) { 16917274b417SDariusz Sosnowski claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq)); 16927274b417SDariusz Sosnowski txq_obj->sq = NULL; 16937274b417SDariusz Sosnowski } 169486d259ceSMichael Baum if (txq_obj->tis) 169586d259ceSMichael Baum claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 16967274b417SDariusz Sosnowski if (txq_obj->umem_obj_wq_buffer) { 16977274b417SDariusz Sosnowski claim_zero(mlx5_os_umem_dereg(txq_obj->umem_obj_wq_buffer)); 16987274b417SDariusz Sosnowski txq_obj->umem_obj_wq_buffer = NULL; 16997274b417SDariusz Sosnowski } 17007274b417SDariusz Sosnowski if (txq_obj->umem_buf_wq_buffer) { 17017274b417SDariusz Sosnowski mlx5_free(txq_obj->umem_buf_wq_buffer); 17027274b417SDariusz Sosnowski txq_obj->umem_buf_wq_buffer = NULL; 17037274b417SDariusz Sosnowski } 1704f1ae0b35SOphir Munk #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H) 170586d259ceSMichael Baum } else { 170688f2e3f1SMichael Baum mlx5_txq_release_devx_resources(txq_obj); 170786d259ceSMichael Baum #endif 170886d259ceSMichael Baum } 170986d259ceSMichael Baum } 171086d259ceSMichael Baum 17118bb2410eSOphir Munk struct mlx5_obj_ops devx_obj_ops = { 17128bb2410eSOphir Munk .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 17136deb19e1SMichael Baum .rxq_obj_new = mlx5_rxq_devx_obj_new, 171432287079SMichael Baum .rxq_event_get = mlx5_rx_devx_get_event, 1715c279f187SMichael Baum .rxq_obj_modify = mlx5_devx_modify_rq, 17166deb19e1SMichael Baum .rxq_obj_release = mlx5_rxq_devx_obj_release, 171725025da3SSpike Du .rxq_event_get_lwm = mlx5_rx_devx_get_event_lwm, 171825ae7f1aSMichael Baum .ind_table_new = mlx5_devx_ind_table_new, 1719fa7ad49eSAndrey Vesnovaty .ind_table_modify = mlx5_devx_ind_table_modify, 172025ae7f1aSMichael Baum .ind_table_destroy = mlx5_devx_ind_table_destroy, 172185552726SMichael Baum .hrxq_new = mlx5_devx_hrxq_new, 172285552726SMichael Baum .hrxq_destroy = mlx5_devx_tir_destroy, 1723b8cc58c1SAndrey Vesnovaty .hrxq_modify = mlx5_devx_hrxq_modify, 17240c762e81SMichael Baum .drop_action_create = mlx5_devx_drop_action_create, 17250c762e81SMichael Baum .drop_action_destroy = mlx5_devx_drop_action_destroy, 172686d259ceSMichael Baum .txq_obj_new = mlx5_txq_devx_obj_new, 1727686d05b6SXueming Li .txq_obj_modify = mlx5_txq_devx_modify, 172886d259ceSMichael Baum .txq_obj_release = mlx5_txq_devx_obj_release, 172923233fd6SBing Zhao .lb_dummy_queue_create = NULL, 173023233fd6SBing Zhao .lb_dummy_queue_release = NULL, 17318bb2410eSOphir Munk }; 1732