1832a4cf1SMatan Azrad /* SPDX-License-Identifier: BSD-3-Clause 2832a4cf1SMatan Azrad * Copyright 2021 Mellanox Technologies, Ltd 3832a4cf1SMatan Azrad */ 4832a4cf1SMatan Azrad 5832a4cf1SMatan Azrad #include <rte_malloc.h> 6832a4cf1SMatan Azrad #include <rte_log.h> 7832a4cf1SMatan Azrad #include <rte_errno.h> 882242186SXueming Li #include <rte_bus_pci.h> 939a2c871SMatan Azrad #include <rte_spinlock.h> 10832a4cf1SMatan Azrad #include <rte_comp.h> 11832a4cf1SMatan Azrad #include <rte_compressdev.h> 12832a4cf1SMatan Azrad #include <rte_compressdev_pmd.h> 13832a4cf1SMatan Azrad 14832a4cf1SMatan Azrad #include <mlx5_glue.h> 15832a4cf1SMatan Azrad #include <mlx5_common.h> 16832a4cf1SMatan Azrad #include <mlx5_devx_cmds.h> 17832a4cf1SMatan Azrad #include <mlx5_common_os.h> 188619fcd5SMatan Azrad #include <mlx5_common_devx.h> 198619fcd5SMatan Azrad #include <mlx5_common_mr.h> 20832a4cf1SMatan Azrad #include <mlx5_prm.h> 21832a4cf1SMatan Azrad 22832a4cf1SMatan Azrad #include "mlx5_compress_utils.h" 23832a4cf1SMatan Azrad 24832a4cf1SMatan Azrad #define MLX5_COMPRESS_DRIVER_NAME mlx5_compress 25fefca160SMatan Azrad #define MLX5_COMPRESS_MAX_QPS 1024 2639a2c871SMatan Azrad #define MLX5_COMP_MAX_WIN_SIZE_CONF 6u 2739a2c871SMatan Azrad 2839a2c871SMatan Azrad struct mlx5_compress_xform { 2939a2c871SMatan Azrad LIST_ENTRY(mlx5_compress_xform) next; 3039a2c871SMatan Azrad enum rte_comp_xform_type type; 3139a2c871SMatan Azrad enum rte_comp_checksum_type csum_type; 3239a2c871SMatan Azrad uint32_t opcode; 3339a2c871SMatan Azrad uint32_t gga_ctrl1; /* BE. */ 3439a2c871SMatan Azrad }; 35832a4cf1SMatan Azrad 36832a4cf1SMatan Azrad struct mlx5_compress_priv { 37832a4cf1SMatan Azrad TAILQ_ENTRY(mlx5_compress_priv) next; 387af08c8fSMichael Baum struct rte_compressdev *compressdev; 39ca1418ceSMichael Baum struct mlx5_common_device *cdev; /* Backend mlx5 device. */ 40832a4cf1SMatan Azrad void *uar; 41832a4cf1SMatan Azrad uint8_t min_block_size; 42832a4cf1SMatan Azrad /* Minimum huffman block size supported by the device. */ 43fefca160SMatan Azrad struct rte_compressdev_config dev_config; 4439a2c871SMatan Azrad LIST_HEAD(xform_list, mlx5_compress_xform) xform_list; 4539a2c871SMatan Azrad rte_spinlock_t xform_sl; 4637862dafSMatan Azrad volatile uint64_t *uar_addr; 47bab51810SRaja Zidane /* HCA caps*/ 48bab51810SRaja Zidane uint32_t mmo_decomp_sq:1; 49bab51810SRaja Zidane uint32_t mmo_decomp_qp:1; 50bab51810SRaja Zidane uint32_t mmo_comp_sq:1; 51bab51810SRaja Zidane uint32_t mmo_comp_qp:1; 52bab51810SRaja Zidane uint32_t mmo_dma_sq:1; 53bab51810SRaja Zidane uint32_t mmo_dma_qp:1; 5437862dafSMatan Azrad #ifndef RTE_ARCH_64 5537862dafSMatan Azrad rte_spinlock_t uar32_sl; 5637862dafSMatan Azrad #endif /* RTE_ARCH_64 */ 57832a4cf1SMatan Azrad }; 58832a4cf1SMatan Azrad 598619fcd5SMatan Azrad struct mlx5_compress_qp { 608619fcd5SMatan Azrad uint16_t qp_id; 618619fcd5SMatan Azrad uint16_t entries_n; 628619fcd5SMatan Azrad uint16_t pi; 638619fcd5SMatan Azrad uint16_t ci; 640165bccdSMatan Azrad struct mlx5_mr_ctrl mr_ctrl; 658619fcd5SMatan Azrad int socket_id; 668619fcd5SMatan Azrad struct mlx5_devx_cq cq; 67bab51810SRaja Zidane struct mlx5_devx_qp qp; 688619fcd5SMatan Azrad struct mlx5_pmd_mr opaque_mr; 698619fcd5SMatan Azrad struct rte_comp_op **ops; 708619fcd5SMatan Azrad struct mlx5_compress_priv *priv; 71ccfd891aSMatan Azrad struct rte_compressdev_stats stats; 728619fcd5SMatan Azrad }; 738619fcd5SMatan Azrad 74832a4cf1SMatan Azrad TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list = 75832a4cf1SMatan Azrad TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list); 76832a4cf1SMatan Azrad static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER; 77832a4cf1SMatan Azrad 78832a4cf1SMatan Azrad int mlx5_compress_logtype; 79832a4cf1SMatan Azrad 80384bac8dSMatan Azrad static const struct rte_compressdev_capabilities mlx5_caps[] = { 81384bac8dSMatan Azrad { 82384bac8dSMatan Azrad .algo = RTE_COMP_ALGO_NULL, 83384bac8dSMatan Azrad .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM | 84384bac8dSMatan Azrad RTE_COMP_FF_CRC32_CHECKSUM | 85384bac8dSMatan Azrad RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | 86384bac8dSMatan Azrad RTE_COMP_FF_SHAREABLE_PRIV_XFORM, 87384bac8dSMatan Azrad }, 88384bac8dSMatan Azrad { 89384bac8dSMatan Azrad .algo = RTE_COMP_ALGO_DEFLATE, 90384bac8dSMatan Azrad .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM | 91384bac8dSMatan Azrad RTE_COMP_FF_CRC32_CHECKSUM | 92384bac8dSMatan Azrad RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | 93384bac8dSMatan Azrad RTE_COMP_FF_SHAREABLE_PRIV_XFORM | 94384bac8dSMatan Azrad RTE_COMP_FF_HUFFMAN_FIXED | 95384bac8dSMatan Azrad RTE_COMP_FF_HUFFMAN_DYNAMIC, 96384bac8dSMatan Azrad .window_size = {.min = 10, .max = 15, .increment = 1}, 97384bac8dSMatan Azrad }, 98384bac8dSMatan Azrad { 99384bac8dSMatan Azrad .algo = RTE_COMP_ALGO_LIST_END, 100384bac8dSMatan Azrad } 101384bac8dSMatan Azrad }; 102fefca160SMatan Azrad 103fefca160SMatan Azrad static void 104fefca160SMatan Azrad mlx5_compress_dev_info_get(struct rte_compressdev *dev, 105fefca160SMatan Azrad struct rte_compressdev_info *info) 106fefca160SMatan Azrad { 107fefca160SMatan Azrad RTE_SET_USED(dev); 108fefca160SMatan Azrad if (info != NULL) { 109fefca160SMatan Azrad info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS; 110fefca160SMatan Azrad info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 111fefca160SMatan Azrad info->capabilities = mlx5_caps; 112fefca160SMatan Azrad } 113fefca160SMatan Azrad } 114fefca160SMatan Azrad 115fefca160SMatan Azrad static int 116fefca160SMatan Azrad mlx5_compress_dev_configure(struct rte_compressdev *dev, 117fefca160SMatan Azrad struct rte_compressdev_config *config) 118fefca160SMatan Azrad { 119fefca160SMatan Azrad struct mlx5_compress_priv *priv; 120fefca160SMatan Azrad 121fefca160SMatan Azrad if (dev == NULL || config == NULL) 122fefca160SMatan Azrad return -EINVAL; 123fefca160SMatan Azrad priv = dev->data->dev_private; 124fefca160SMatan Azrad priv->dev_config = *config; 125fefca160SMatan Azrad return 0; 126fefca160SMatan Azrad } 127fefca160SMatan Azrad 128fefca160SMatan Azrad static int 129fefca160SMatan Azrad mlx5_compress_dev_close(struct rte_compressdev *dev) 130fefca160SMatan Azrad { 131fefca160SMatan Azrad RTE_SET_USED(dev); 132fefca160SMatan Azrad return 0; 133fefca160SMatan Azrad } 134fefca160SMatan Azrad 1358619fcd5SMatan Azrad static int 1368619fcd5SMatan Azrad mlx5_compress_qp_release(struct rte_compressdev *dev, uint16_t qp_id) 1378619fcd5SMatan Azrad { 1388619fcd5SMatan Azrad struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id]; 1398619fcd5SMatan Azrad 140bab51810SRaja Zidane if (qp->qp.qp != NULL) 141bab51810SRaja Zidane mlx5_devx_qp_destroy(&qp->qp); 1428619fcd5SMatan Azrad if (qp->cq.cq != NULL) 1438619fcd5SMatan Azrad mlx5_devx_cq_destroy(&qp->cq); 1448619fcd5SMatan Azrad if (qp->opaque_mr.obj != NULL) { 1458619fcd5SMatan Azrad void *opaq = qp->opaque_mr.addr; 1468619fcd5SMatan Azrad 1478619fcd5SMatan Azrad mlx5_common_verbs_dereg_mr(&qp->opaque_mr); 1488619fcd5SMatan Azrad if (opaq != NULL) 1498619fcd5SMatan Azrad rte_free(opaq); 1508619fcd5SMatan Azrad } 1510165bccdSMatan Azrad mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); 1528619fcd5SMatan Azrad rte_free(qp); 1538619fcd5SMatan Azrad dev->data->queue_pairs[qp_id] = NULL; 1548619fcd5SMatan Azrad return 0; 1558619fcd5SMatan Azrad } 1568619fcd5SMatan Azrad 1578619fcd5SMatan Azrad static void 158bab51810SRaja Zidane mlx5_compress_init_qp(struct mlx5_compress_qp *qp) 1598619fcd5SMatan Azrad { 1608619fcd5SMatan Azrad volatile struct mlx5_gga_wqe *restrict wqe = 161bab51810SRaja Zidane (volatile struct mlx5_gga_wqe *)qp->qp.wqes; 1628619fcd5SMatan Azrad volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr; 163bab51810SRaja Zidane const uint32_t sq_ds = rte_cpu_to_be_32((qp->qp.qp->id << 8) | 4u); 1648619fcd5SMatan Azrad const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS << 1658619fcd5SMatan Azrad MLX5_COMP_MODE_OFFSET); 1668619fcd5SMatan Azrad const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->opaque_mr.lkey); 1678619fcd5SMatan Azrad int i; 1688619fcd5SMatan Azrad 1698619fcd5SMatan Azrad /* All the next fields state should stay constant. */ 1708619fcd5SMatan Azrad for (i = 0; i < qp->entries_n; ++i, ++wqe) { 1718619fcd5SMatan Azrad wqe->sq_ds = sq_ds; 1728619fcd5SMatan Azrad wqe->flags = flags; 1738619fcd5SMatan Azrad wqe->opaque_lkey = opaq_lkey; 1748619fcd5SMatan Azrad wqe->opaque_vaddr = rte_cpu_to_be_64 1758619fcd5SMatan Azrad ((uint64_t)(uintptr_t)&opaq[i]); 1768619fcd5SMatan Azrad } 1778619fcd5SMatan Azrad } 1788619fcd5SMatan Azrad 1798619fcd5SMatan Azrad static int 1808619fcd5SMatan Azrad mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, 1818619fcd5SMatan Azrad uint32_t max_inflight_ops, int socket_id) 1828619fcd5SMatan Azrad { 1838619fcd5SMatan Azrad struct mlx5_compress_priv *priv = dev->data->dev_private; 1848619fcd5SMatan Azrad struct mlx5_compress_qp *qp; 1858619fcd5SMatan Azrad struct mlx5_devx_cq_attr cq_attr = { 1868619fcd5SMatan Azrad .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar), 1878619fcd5SMatan Azrad }; 188bab51810SRaja Zidane struct mlx5_devx_qp_attr qp_attr = { 189e35ccf24SMichael Baum .pd = priv->cdev->pdn, 190bab51810SRaja Zidane .uar_index = mlx5_os_get_devx_uar_page_id(priv->uar), 191bab51810SRaja Zidane .user_index = qp_id, 1928619fcd5SMatan Azrad }; 1938619fcd5SMatan Azrad uint32_t log_ops_n = rte_log2_u32(max_inflight_ops); 1948619fcd5SMatan Azrad uint32_t alloc_size = sizeof(*qp); 1958619fcd5SMatan Azrad void *opaq_buf; 1968619fcd5SMatan Azrad int ret; 1978619fcd5SMatan Azrad 1988619fcd5SMatan Azrad alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE); 1998619fcd5SMatan Azrad alloc_size += sizeof(struct rte_comp_op *) * (1u << log_ops_n); 2008619fcd5SMatan Azrad qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE, 2018619fcd5SMatan Azrad socket_id); 2028619fcd5SMatan Azrad if (qp == NULL) { 2038619fcd5SMatan Azrad DRV_LOG(ERR, "Failed to allocate qp memory."); 2048619fcd5SMatan Azrad rte_errno = ENOMEM; 2058619fcd5SMatan Azrad return -rte_errno; 2068619fcd5SMatan Azrad } 2078619fcd5SMatan Azrad dev->data->queue_pairs[qp_id] = qp; 208*9f1d636fSMichael Baum if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen, 20912b253eeSMichael Baum priv->dev_config.socket_id)) { 21012b253eeSMichael Baum DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.", 21112b253eeSMichael Baum (uint32_t)qp_id); 21212b253eeSMichael Baum rte_errno = ENOMEM; 21312b253eeSMichael Baum goto err; 21412b253eeSMichael Baum } 215c87bc83aSMichael Baum opaq_buf = rte_calloc(__func__, (size_t)1 << log_ops_n, 2168619fcd5SMatan Azrad sizeof(struct mlx5_gga_compress_opaque), 2178619fcd5SMatan Azrad sizeof(struct mlx5_gga_compress_opaque)); 2188619fcd5SMatan Azrad if (opaq_buf == NULL) { 2198619fcd5SMatan Azrad DRV_LOG(ERR, "Failed to allocate opaque memory."); 2208619fcd5SMatan Azrad rte_errno = ENOMEM; 2218619fcd5SMatan Azrad goto err; 2228619fcd5SMatan Azrad } 2238619fcd5SMatan Azrad qp->entries_n = 1 << log_ops_n; 2248619fcd5SMatan Azrad qp->socket_id = socket_id; 2258619fcd5SMatan Azrad qp->qp_id = qp_id; 2268619fcd5SMatan Azrad qp->priv = priv; 2278619fcd5SMatan Azrad qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1), 2288619fcd5SMatan Azrad RTE_CACHE_LINE_SIZE); 229e35ccf24SMichael Baum if (mlx5_common_verbs_reg_mr(priv->cdev->pd, opaq_buf, qp->entries_n * 2308619fcd5SMatan Azrad sizeof(struct mlx5_gga_compress_opaque), 2318619fcd5SMatan Azrad &qp->opaque_mr) != 0) { 2328619fcd5SMatan Azrad rte_free(opaq_buf); 2338619fcd5SMatan Azrad DRV_LOG(ERR, "Failed to register opaque MR."); 2348619fcd5SMatan Azrad rte_errno = ENOMEM; 2358619fcd5SMatan Azrad goto err; 2368619fcd5SMatan Azrad } 237ca1418ceSMichael Baum ret = mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq, log_ops_n, &cq_attr, 2388619fcd5SMatan Azrad socket_id); 2398619fcd5SMatan Azrad if (ret != 0) { 2408619fcd5SMatan Azrad DRV_LOG(ERR, "Failed to create CQ."); 2418619fcd5SMatan Azrad goto err; 2428619fcd5SMatan Azrad } 243bab51810SRaja Zidane qp_attr.cqn = qp->cq.cq->id; 244fe46b20cSMichael Baum qp_attr.ts_format = 245fe46b20cSMichael Baum mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format); 246bab51810SRaja Zidane qp_attr.rq_size = 0; 247bab51810SRaja Zidane qp_attr.sq_size = RTE_BIT32(log_ops_n); 248bab51810SRaja Zidane qp_attr.mmo = priv->mmo_decomp_qp && priv->mmo_comp_qp 249bab51810SRaja Zidane && priv->mmo_dma_qp; 250ca1418ceSMichael Baum ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp, log_ops_n, &qp_attr, 2518619fcd5SMatan Azrad socket_id); 2528619fcd5SMatan Azrad if (ret != 0) { 253bab51810SRaja Zidane DRV_LOG(ERR, "Failed to create QP."); 2548619fcd5SMatan Azrad goto err; 2558619fcd5SMatan Azrad } 256bab51810SRaja Zidane mlx5_compress_init_qp(qp); 257bab51810SRaja Zidane ret = mlx5_devx_qp2rts(&qp->qp, 0); 258bab51810SRaja Zidane if (ret) 2598619fcd5SMatan Azrad goto err; 2601b9e9826SThomas Monjalon DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u", 261bab51810SRaja Zidane (uint32_t)qp_id, qp->qp.qp->id, qp->cq.cq->id, qp->entries_n); 2628619fcd5SMatan Azrad return 0; 2638619fcd5SMatan Azrad err: 2648619fcd5SMatan Azrad mlx5_compress_qp_release(dev, qp_id); 2658619fcd5SMatan Azrad return -1; 2668619fcd5SMatan Azrad } 2678619fcd5SMatan Azrad 26839a2c871SMatan Azrad static int 26939a2c871SMatan Azrad mlx5_compress_xform_free(struct rte_compressdev *dev, void *xform) 27039a2c871SMatan Azrad { 27139a2c871SMatan Azrad struct mlx5_compress_priv *priv = dev->data->dev_private; 27239a2c871SMatan Azrad 27339a2c871SMatan Azrad rte_spinlock_lock(&priv->xform_sl); 27439a2c871SMatan Azrad LIST_REMOVE((struct mlx5_compress_xform *)xform, next); 27539a2c871SMatan Azrad rte_spinlock_unlock(&priv->xform_sl); 27639a2c871SMatan Azrad rte_free(xform); 27739a2c871SMatan Azrad return 0; 27839a2c871SMatan Azrad } 27939a2c871SMatan Azrad 28039a2c871SMatan Azrad static int 28139a2c871SMatan Azrad mlx5_compress_xform_create(struct rte_compressdev *dev, 28239a2c871SMatan Azrad const struct rte_comp_xform *xform, 28339a2c871SMatan Azrad void **private_xform) 28439a2c871SMatan Azrad { 28539a2c871SMatan Azrad struct mlx5_compress_priv *priv = dev->data->dev_private; 28639a2c871SMatan Azrad struct mlx5_compress_xform *xfrm; 28739a2c871SMatan Azrad uint32_t size; 28839a2c871SMatan Azrad 28939a2c871SMatan Azrad if (xform->type == RTE_COMP_COMPRESS && xform->compress.level == 29039a2c871SMatan Azrad RTE_COMP_LEVEL_NONE) { 29139a2c871SMatan Azrad DRV_LOG(ERR, "Non-compressed block is not supported."); 29239a2c871SMatan Azrad return -ENOTSUP; 29339a2c871SMatan Azrad } 29439a2c871SMatan Azrad if ((xform->type == RTE_COMP_COMPRESS && xform->compress.hash_algo != 29539a2c871SMatan Azrad RTE_COMP_HASH_ALGO_NONE) || (xform->type == RTE_COMP_DECOMPRESS && 29639a2c871SMatan Azrad xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE)) { 29739a2c871SMatan Azrad DRV_LOG(ERR, "SHA is not supported."); 29839a2c871SMatan Azrad return -ENOTSUP; 29939a2c871SMatan Azrad } 30039a2c871SMatan Azrad xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0, 30139a2c871SMatan Azrad priv->dev_config.socket_id); 30239a2c871SMatan Azrad if (xfrm == NULL) 30339a2c871SMatan Azrad return -ENOMEM; 30439a2c871SMatan Azrad xfrm->opcode = MLX5_OPCODE_MMO; 30539a2c871SMatan Azrad xfrm->type = xform->type; 30639a2c871SMatan Azrad switch (xform->type) { 30739a2c871SMatan Azrad case RTE_COMP_COMPRESS: 30839a2c871SMatan Azrad switch (xform->compress.algo) { 30939a2c871SMatan Azrad case RTE_COMP_ALGO_NULL: 31039a2c871SMatan Azrad xfrm->opcode += MLX5_OPC_MOD_MMO_DMA << 31139a2c871SMatan Azrad WQE_CSEG_OPC_MOD_OFFSET; 31239a2c871SMatan Azrad break; 31339a2c871SMatan Azrad case RTE_COMP_ALGO_DEFLATE: 31439a2c871SMatan Azrad size = 1 << xform->compress.window_size; 31539a2c871SMatan Azrad size /= MLX5_GGA_COMP_WIN_SIZE_UNITS; 31639a2c871SMatan Azrad xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size), 31739a2c871SMatan Azrad MLX5_COMP_MAX_WIN_SIZE_CONF) << 31839a2c871SMatan Azrad WQE_GGA_COMP_WIN_SIZE_OFFSET; 319237aad88SRaja Zidane switch (xform->compress.level) { 320237aad88SRaja Zidane case RTE_COMP_LEVEL_PMD_DEFAULT: 32139a2c871SMatan Azrad size = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX; 322237aad88SRaja Zidane break; 323237aad88SRaja Zidane case RTE_COMP_LEVEL_MAX: 324237aad88SRaja Zidane size = priv->min_block_size; 325237aad88SRaja Zidane break; 326237aad88SRaja Zidane default: 327237aad88SRaja Zidane size = RTE_MAX(MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX 328237aad88SRaja Zidane + 1 - xform->compress.level, 329237aad88SRaja Zidane priv->min_block_size); 330237aad88SRaja Zidane } 33139a2c871SMatan Azrad xfrm->gga_ctrl1 += RTE_MIN(size, 33239a2c871SMatan Azrad MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX) << 33339a2c871SMatan Azrad WQE_GGA_COMP_BLOCK_SIZE_OFFSET; 33439a2c871SMatan Azrad xfrm->opcode += MLX5_OPC_MOD_MMO_COMP << 33539a2c871SMatan Azrad WQE_CSEG_OPC_MOD_OFFSET; 33639a2c871SMatan Azrad size = xform->compress.deflate.huffman == 33739a2c871SMatan Azrad RTE_COMP_HUFFMAN_DYNAMIC ? 33839a2c871SMatan Azrad MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX : 33939a2c871SMatan Azrad MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN; 34039a2c871SMatan Azrad xfrm->gga_ctrl1 += size << 34139a2c871SMatan Azrad WQE_GGA_COMP_DYNAMIC_SIZE_OFFSET; 34239a2c871SMatan Azrad break; 34339a2c871SMatan Azrad default: 34439a2c871SMatan Azrad goto err; 34539a2c871SMatan Azrad } 34639a2c871SMatan Azrad xfrm->csum_type = xform->compress.chksum; 34739a2c871SMatan Azrad break; 34839a2c871SMatan Azrad case RTE_COMP_DECOMPRESS: 34939a2c871SMatan Azrad switch (xform->decompress.algo) { 35039a2c871SMatan Azrad case RTE_COMP_ALGO_NULL: 35139a2c871SMatan Azrad xfrm->opcode += MLX5_OPC_MOD_MMO_DMA << 35239a2c871SMatan Azrad WQE_CSEG_OPC_MOD_OFFSET; 35339a2c871SMatan Azrad break; 35439a2c871SMatan Azrad case RTE_COMP_ALGO_DEFLATE: 35539a2c871SMatan Azrad xfrm->opcode += MLX5_OPC_MOD_MMO_DECOMP << 35639a2c871SMatan Azrad WQE_CSEG_OPC_MOD_OFFSET; 35739a2c871SMatan Azrad break; 35839a2c871SMatan Azrad default: 35939a2c871SMatan Azrad goto err; 36039a2c871SMatan Azrad } 36139a2c871SMatan Azrad xfrm->csum_type = xform->decompress.chksum; 36239a2c871SMatan Azrad break; 36339a2c871SMatan Azrad default: 36439a2c871SMatan Azrad DRV_LOG(ERR, "Algorithm %u is not supported.", xform->type); 36539a2c871SMatan Azrad goto err; 36639a2c871SMatan Azrad } 36739a2c871SMatan Azrad DRV_LOG(DEBUG, "New xform: gga ctrl1 = 0x%08X opcode = 0x%08X csum " 36839a2c871SMatan Azrad "type = %d.", xfrm->gga_ctrl1, xfrm->opcode, xfrm->csum_type); 36939a2c871SMatan Azrad xfrm->gga_ctrl1 = rte_cpu_to_be_32(xfrm->gga_ctrl1); 37039a2c871SMatan Azrad rte_spinlock_lock(&priv->xform_sl); 37139a2c871SMatan Azrad LIST_INSERT_HEAD(&priv->xform_list, xfrm, next); 37239a2c871SMatan Azrad rte_spinlock_unlock(&priv->xform_sl); 37339a2c871SMatan Azrad *private_xform = xfrm; 37439a2c871SMatan Azrad return 0; 37539a2c871SMatan Azrad err: 37639a2c871SMatan Azrad rte_free(xfrm); 37739a2c871SMatan Azrad return -ENOTSUP; 37839a2c871SMatan Azrad } 37939a2c871SMatan Azrad 380f8c97babSMatan Azrad static void 381f8c97babSMatan Azrad mlx5_compress_dev_stop(struct rte_compressdev *dev) 382f8c97babSMatan Azrad { 383f8c97babSMatan Azrad RTE_SET_USED(dev); 384f8c97babSMatan Azrad } 385f8c97babSMatan Azrad 386f8c97babSMatan Azrad static int 387f8c97babSMatan Azrad mlx5_compress_dev_start(struct rte_compressdev *dev) 388f8c97babSMatan Azrad { 389f8c97babSMatan Azrad RTE_SET_USED(dev); 390f8c97babSMatan Azrad return 0; 391f8c97babSMatan Azrad } 392f8c97babSMatan Azrad 393ccfd891aSMatan Azrad static void 394ccfd891aSMatan Azrad mlx5_compress_stats_get(struct rte_compressdev *dev, 395ccfd891aSMatan Azrad struct rte_compressdev_stats *stats) 396ccfd891aSMatan Azrad { 397ccfd891aSMatan Azrad int qp_id; 398ccfd891aSMatan Azrad 399ccfd891aSMatan Azrad for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 400ccfd891aSMatan Azrad struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id]; 401ccfd891aSMatan Azrad 402ccfd891aSMatan Azrad stats->enqueued_count += qp->stats.enqueued_count; 403ccfd891aSMatan Azrad stats->dequeued_count += qp->stats.dequeued_count; 404ccfd891aSMatan Azrad stats->enqueue_err_count += qp->stats.enqueue_err_count; 405ccfd891aSMatan Azrad stats->dequeue_err_count += qp->stats.dequeue_err_count; 406ccfd891aSMatan Azrad } 407ccfd891aSMatan Azrad } 408ccfd891aSMatan Azrad 409ccfd891aSMatan Azrad static void 410ccfd891aSMatan Azrad mlx5_compress_stats_reset(struct rte_compressdev *dev) 411ccfd891aSMatan Azrad { 412ccfd891aSMatan Azrad int qp_id; 413ccfd891aSMatan Azrad 414ccfd891aSMatan Azrad for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 415ccfd891aSMatan Azrad struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id]; 416ccfd891aSMatan Azrad 417ccfd891aSMatan Azrad memset(&qp->stats, 0, sizeof(qp->stats)); 418ccfd891aSMatan Azrad } 419ccfd891aSMatan Azrad } 420ccfd891aSMatan Azrad 421832a4cf1SMatan Azrad static struct rte_compressdev_ops mlx5_compress_ops = { 422fefca160SMatan Azrad .dev_configure = mlx5_compress_dev_configure, 423f8c97babSMatan Azrad .dev_start = mlx5_compress_dev_start, 424f8c97babSMatan Azrad .dev_stop = mlx5_compress_dev_stop, 425fefca160SMatan Azrad .dev_close = mlx5_compress_dev_close, 426fefca160SMatan Azrad .dev_infos_get = mlx5_compress_dev_info_get, 427ccfd891aSMatan Azrad .stats_get = mlx5_compress_stats_get, 428ccfd891aSMatan Azrad .stats_reset = mlx5_compress_stats_reset, 4298619fcd5SMatan Azrad .queue_pair_setup = mlx5_compress_qp_setup, 4308619fcd5SMatan Azrad .queue_pair_release = mlx5_compress_qp_release, 43139a2c871SMatan Azrad .private_xform_create = mlx5_compress_xform_create, 43239a2c871SMatan Azrad .private_xform_free = mlx5_compress_xform_free, 433832a4cf1SMatan Azrad .stream_create = NULL, 434832a4cf1SMatan Azrad .stream_free = NULL, 435832a4cf1SMatan Azrad }; 436832a4cf1SMatan Azrad 437f8c97babSMatan Azrad static __rte_always_inline uint32_t 438f8c97babSMatan Azrad mlx5_compress_dseg_set(struct mlx5_compress_qp *qp, 439f8c97babSMatan Azrad volatile struct mlx5_wqe_dseg *restrict dseg, 440f8c97babSMatan Azrad struct rte_mbuf *restrict mbuf, 441f8c97babSMatan Azrad uint32_t offset, uint32_t len) 442f8c97babSMatan Azrad { 443f8c97babSMatan Azrad uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset); 444f8c97babSMatan Azrad 445f8c97babSMatan Azrad dseg->bcount = rte_cpu_to_be_32(len); 446*9f1d636fSMichael Baum dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf); 447f8c97babSMatan Azrad dseg->pbuf = rte_cpu_to_be_64(addr); 448f8c97babSMatan Azrad return dseg->lkey; 449f8c97babSMatan Azrad } 450f8c97babSMatan Azrad 45137862dafSMatan Azrad /* 45237862dafSMatan Azrad * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and 45337862dafSMatan Azrad * 64bit architectures. 45437862dafSMatan Azrad */ 45537862dafSMatan Azrad static __rte_always_inline void 45637862dafSMatan Azrad mlx5_compress_uar_write(uint64_t val, struct mlx5_compress_priv *priv) 45737862dafSMatan Azrad { 45837862dafSMatan Azrad #ifdef RTE_ARCH_64 45937862dafSMatan Azrad *priv->uar_addr = val; 46037862dafSMatan Azrad #else /* !RTE_ARCH_64 */ 46137862dafSMatan Azrad rte_spinlock_lock(&priv->uar32_sl); 46237862dafSMatan Azrad *(volatile uint32_t *)priv->uar_addr = val; 46337862dafSMatan Azrad rte_io_wmb(); 46437862dafSMatan Azrad *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32; 46537862dafSMatan Azrad rte_spinlock_unlock(&priv->uar32_sl); 46637862dafSMatan Azrad #endif 46737862dafSMatan Azrad } 46837862dafSMatan Azrad 469f8c97babSMatan Azrad static uint16_t 470f8c97babSMatan Azrad mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops, 471f8c97babSMatan Azrad uint16_t nb_ops) 472f8c97babSMatan Azrad { 473f8c97babSMatan Azrad struct mlx5_compress_qp *qp = queue_pair; 474f8c97babSMatan Azrad volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *) 475bab51810SRaja Zidane qp->qp.wqes, *wqe; 476f8c97babSMatan Azrad struct mlx5_compress_xform *xform; 477f8c97babSMatan Azrad struct rte_comp_op *op; 478f8c97babSMatan Azrad uint16_t mask = qp->entries_n - 1; 479f8c97babSMatan Azrad uint16_t remain = qp->entries_n - (qp->pi - qp->ci); 480f8c97babSMatan Azrad uint16_t idx; 481f8c97babSMatan Azrad bool invalid; 482f8c97babSMatan Azrad 483f8c97babSMatan Azrad if (remain < nb_ops) 484f8c97babSMatan Azrad nb_ops = remain; 485f8c97babSMatan Azrad else 486f8c97babSMatan Azrad remain = nb_ops; 487f8c97babSMatan Azrad if (unlikely(remain == 0)) 488f8c97babSMatan Azrad return 0; 489f8c97babSMatan Azrad do { 490f8c97babSMatan Azrad idx = qp->pi & mask; 491f8c97babSMatan Azrad wqe = &wqes[idx]; 492f8c97babSMatan Azrad rte_prefetch0(&wqes[(qp->pi + 1) & mask]); 493f8c97babSMatan Azrad op = *ops++; 494f8c97babSMatan Azrad xform = op->private_xform; 495f8c97babSMatan Azrad /* 496f8c97babSMatan Azrad * Check operation arguments and error cases: 497f8c97babSMatan Azrad * - Operation type must be state-less. 498f8c97babSMatan Azrad * - Compress operation flush flag must be FULL or FINAL. 499f8c97babSMatan Azrad * - Source and destination buffers must be mapped internally. 500f8c97babSMatan Azrad */ 501f8c97babSMatan Azrad invalid = op->op_type != RTE_COMP_OP_STATELESS || 502f8c97babSMatan Azrad (xform->type == RTE_COMP_COMPRESS && 503f8c97babSMatan Azrad op->flush_flag < RTE_COMP_FLUSH_FULL); 504f8c97babSMatan Azrad if (unlikely(invalid || 505f8c97babSMatan Azrad (mlx5_compress_dseg_set(qp, &wqe->gather, 506f8c97babSMatan Azrad op->m_src, 507f8c97babSMatan Azrad op->src.offset, 508f8c97babSMatan Azrad op->src.length) == 509f8c97babSMatan Azrad UINT32_MAX) || 510f8c97babSMatan Azrad (mlx5_compress_dseg_set(qp, &wqe->scatter, 511f8c97babSMatan Azrad op->m_dst, 512f8c97babSMatan Azrad op->dst.offset, 513f8c97babSMatan Azrad rte_pktmbuf_pkt_len(op->m_dst) - 514f8c97babSMatan Azrad op->dst.offset) == 515f8c97babSMatan Azrad UINT32_MAX))) { 516f8c97babSMatan Azrad op->status = invalid ? RTE_COMP_OP_STATUS_INVALID_ARGS : 517f8c97babSMatan Azrad RTE_COMP_OP_STATUS_ERROR; 518f8c97babSMatan Azrad nb_ops -= remain; 519f8c97babSMatan Azrad if (unlikely(nb_ops == 0)) 520f8c97babSMatan Azrad return 0; 521f8c97babSMatan Azrad break; 522f8c97babSMatan Azrad } 523f8c97babSMatan Azrad wqe->gga_ctrl1 = xform->gga_ctrl1; 524f8c97babSMatan Azrad wqe->opcode = rte_cpu_to_be_32(xform->opcode + (qp->pi << 8)); 525f8c97babSMatan Azrad qp->ops[idx] = op; 526f8c97babSMatan Azrad qp->pi++; 527f8c97babSMatan Azrad } while (--remain); 528ccfd891aSMatan Azrad qp->stats.enqueued_count += nb_ops; 529f8c97babSMatan Azrad rte_io_wmb(); 530bab51810SRaja Zidane qp->qp.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi); 531f8c97babSMatan Azrad rte_wmb(); 53237862dafSMatan Azrad mlx5_compress_uar_write(*(volatile uint64_t *)wqe, qp->priv); 533f8c97babSMatan Azrad rte_wmb(); 534f8c97babSMatan Azrad return nb_ops; 535f8c97babSMatan Azrad } 536f8c97babSMatan Azrad 537f8c97babSMatan Azrad static void 538f8c97babSMatan Azrad mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe, 539f8c97babSMatan Azrad volatile uint32_t *opaq) 540f8c97babSMatan Azrad { 541f8c97babSMatan Azrad size_t i; 542f8c97babSMatan Azrad 543f8c97babSMatan Azrad DRV_LOG(ERR, "Error cqe:"); 544f8c97babSMatan Azrad for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4) 545f8c97babSMatan Azrad DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], 546f8c97babSMatan Azrad cqe[i + 2], cqe[i + 3]); 547f8c97babSMatan Azrad DRV_LOG(ERR, "\nError wqe:"); 548f8c97babSMatan Azrad for (i = 0; i < sizeof(struct mlx5_gga_wqe) >> 2; i += 4) 549f8c97babSMatan Azrad DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1], 550f8c97babSMatan Azrad wqe[i + 2], wqe[i + 3]); 551f8c97babSMatan Azrad DRV_LOG(ERR, "\nError opaq:"); 552f8c97babSMatan Azrad for (i = 0; i < sizeof(struct mlx5_gga_compress_opaque) >> 2; i += 4) 553f8c97babSMatan Azrad DRV_LOG(ERR, "%08X %08X %08X %08X", opaq[i], opaq[i + 1], 554f8c97babSMatan Azrad opaq[i + 2], opaq[i + 3]); 555f8c97babSMatan Azrad } 556f8c97babSMatan Azrad 557f8c97babSMatan Azrad static void 558f8c97babSMatan Azrad mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp, 559f8c97babSMatan Azrad struct rte_comp_op *op) 560f8c97babSMatan Azrad { 561f8c97babSMatan Azrad const uint32_t idx = qp->ci & (qp->entries_n - 1); 562f8c97babSMatan Azrad volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *) 563f8c97babSMatan Azrad &qp->cq.cqes[idx]; 564f8c97babSMatan Azrad volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *) 565bab51810SRaja Zidane qp->qp.wqes; 566f8c97babSMatan Azrad volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr; 567f8c97babSMatan Azrad 568f8c97babSMatan Azrad op->status = RTE_COMP_OP_STATUS_ERROR; 569f8c97babSMatan Azrad op->consumed = 0; 570f8c97babSMatan Azrad op->produced = 0; 571f8c97babSMatan Azrad op->output_chksum = 0; 572f8c97babSMatan Azrad op->debug_status = rte_be_to_cpu_32(opaq[idx].syndrom) | 573f8c97babSMatan Azrad ((uint64_t)rte_be_to_cpu_32(cqe->syndrome) << 32); 574f8c97babSMatan Azrad mlx5_compress_dump_err_objs((volatile uint32_t *)cqe, 575f8c97babSMatan Azrad (volatile uint32_t *)&wqes[idx], 576f8c97babSMatan Azrad (volatile uint32_t *)&opaq[idx]); 577ccfd891aSMatan Azrad qp->stats.dequeue_err_count++; 578f8c97babSMatan Azrad } 579f8c97babSMatan Azrad 580f8c97babSMatan Azrad static uint16_t 581f8c97babSMatan Azrad mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops, 582f8c97babSMatan Azrad uint16_t nb_ops) 583f8c97babSMatan Azrad { 584f8c97babSMatan Azrad struct mlx5_compress_qp *qp = queue_pair; 585f8c97babSMatan Azrad volatile struct mlx5_compress_xform *restrict xform; 586f8c97babSMatan Azrad volatile struct mlx5_cqe *restrict cqe; 587f8c97babSMatan Azrad volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr; 588f8c97babSMatan Azrad struct rte_comp_op *restrict op; 589f8c97babSMatan Azrad const unsigned int cq_size = qp->entries_n; 590f8c97babSMatan Azrad const unsigned int mask = cq_size - 1; 591f8c97babSMatan Azrad uint32_t idx; 592f8c97babSMatan Azrad uint32_t next_idx = qp->ci & mask; 593f8c97babSMatan Azrad const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops); 594f8c97babSMatan Azrad uint16_t i = 0; 595f8c97babSMatan Azrad int ret; 596f8c97babSMatan Azrad 597f8c97babSMatan Azrad if (unlikely(max == 0)) 598f8c97babSMatan Azrad return 0; 599f8c97babSMatan Azrad do { 600f8c97babSMatan Azrad idx = next_idx; 601f8c97babSMatan Azrad next_idx = (qp->ci + 1) & mask; 602f8c97babSMatan Azrad rte_prefetch0(&qp->cq.cqes[next_idx]); 603f8c97babSMatan Azrad rte_prefetch0(qp->ops[next_idx]); 604f8c97babSMatan Azrad op = qp->ops[idx]; 605f8c97babSMatan Azrad cqe = &qp->cq.cqes[idx]; 606f8c97babSMatan Azrad ret = check_cqe(cqe, cq_size, qp->ci); 607f8c97babSMatan Azrad /* 608f8c97babSMatan Azrad * Be sure owner read is done before any other cookie field or 609f8c97babSMatan Azrad * opaque field. 610f8c97babSMatan Azrad */ 611f8c97babSMatan Azrad rte_io_rmb(); 612f8c97babSMatan Azrad if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 613f8c97babSMatan Azrad if (likely(ret == MLX5_CQE_STATUS_HW_OWN)) 614f8c97babSMatan Azrad break; 615f8c97babSMatan Azrad mlx5_compress_cqe_err_handle(qp, op); 616f8c97babSMatan Azrad } else { 617f8c97babSMatan Azrad xform = op->private_xform; 618f8c97babSMatan Azrad op->status = RTE_COMP_OP_STATUS_SUCCESS; 619f8c97babSMatan Azrad op->consumed = op->src.length; 620f8c97babSMatan Azrad op->produced = rte_be_to_cpu_32(cqe->byte_cnt); 621f8c97babSMatan Azrad MLX5_ASSERT(cqe->byte_cnt == 622c0249919SMatan Azrad opaq[idx].scattered_length); 623f8c97babSMatan Azrad switch (xform->csum_type) { 624f8c97babSMatan Azrad case RTE_COMP_CHECKSUM_CRC32: 625f8c97babSMatan Azrad op->output_chksum = (uint64_t)rte_be_to_cpu_32 626f8c97babSMatan Azrad (opaq[idx].crc32); 627f8c97babSMatan Azrad break; 628f8c97babSMatan Azrad case RTE_COMP_CHECKSUM_ADLER32: 629f8c97babSMatan Azrad op->output_chksum = (uint64_t)rte_be_to_cpu_32 630f8c97babSMatan Azrad (opaq[idx].adler32) << 32; 631f8c97babSMatan Azrad break; 632f8c97babSMatan Azrad case RTE_COMP_CHECKSUM_CRC32_ADLER32: 633f8c97babSMatan Azrad op->output_chksum = (uint64_t)rte_be_to_cpu_32 634f8c97babSMatan Azrad (opaq[idx].crc32) | 635f8c97babSMatan Azrad ((uint64_t)rte_be_to_cpu_32 636f8c97babSMatan Azrad (opaq[idx].adler32) << 32); 637f8c97babSMatan Azrad break; 638f8c97babSMatan Azrad default: 639f8c97babSMatan Azrad break; 640f8c97babSMatan Azrad } 641f8c97babSMatan Azrad } 642f8c97babSMatan Azrad ops[i++] = op; 643f8c97babSMatan Azrad qp->ci++; 644f8c97babSMatan Azrad } while (i < max); 645f8c97babSMatan Azrad if (likely(i != 0)) { 646f8c97babSMatan Azrad rte_io_wmb(); 647f8c97babSMatan Azrad qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci); 648ccfd891aSMatan Azrad qp->stats.dequeued_count += i; 649f8c97babSMatan Azrad } 650f8c97babSMatan Azrad return i; 651f8c97babSMatan Azrad } 652f8c97babSMatan Azrad 653832a4cf1SMatan Azrad static void 654e35ccf24SMichael Baum mlx5_compress_uar_release(struct mlx5_compress_priv *priv) 655832a4cf1SMatan Azrad { 656832a4cf1SMatan Azrad if (priv->uar != NULL) { 657832a4cf1SMatan Azrad mlx5_glue->devx_free_uar(priv->uar); 658832a4cf1SMatan Azrad priv->uar = NULL; 659832a4cf1SMatan Azrad } 660832a4cf1SMatan Azrad } 661832a4cf1SMatan Azrad 662832a4cf1SMatan Azrad static int 663e35ccf24SMichael Baum mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv) 664832a4cf1SMatan Azrad { 665ca1418ceSMichael Baum priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1); 666832a4cf1SMatan Azrad if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) == 667832a4cf1SMatan Azrad NULL) { 668832a4cf1SMatan Azrad rte_errno = errno; 669832a4cf1SMatan Azrad DRV_LOG(ERR, "Failed to allocate UAR."); 670832a4cf1SMatan Azrad return -1; 671832a4cf1SMatan Azrad } 67237862dafSMatan Azrad priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar); 673c0249919SMatan Azrad MLX5_ASSERT(priv->uar_addr); 67437862dafSMatan Azrad #ifndef RTE_ARCH_64 67537862dafSMatan Azrad rte_spinlock_init(&priv->uar32_sl); 67637862dafSMatan Azrad #endif /* RTE_ARCH_64 */ 677832a4cf1SMatan Azrad return 0; 678832a4cf1SMatan Azrad } 679832a4cf1SMatan Azrad 680832a4cf1SMatan Azrad static int 6817af08c8fSMichael Baum mlx5_compress_dev_probe(struct mlx5_common_device *cdev) 682832a4cf1SMatan Azrad { 6837af08c8fSMichael Baum struct rte_compressdev *compressdev; 684832a4cf1SMatan Azrad struct mlx5_compress_priv *priv; 685fe46b20cSMichael Baum struct mlx5_hca_attr *attr = &cdev->config.hca_attr; 686832a4cf1SMatan Azrad struct rte_compressdev_pmd_init_params init_params = { 687832a4cf1SMatan Azrad .name = "", 6887af08c8fSMichael Baum .socket_id = cdev->dev->numa_node, 689832a4cf1SMatan Azrad }; 690ca1418ceSMichael Baum const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx); 691832a4cf1SMatan Azrad 692832a4cf1SMatan Azrad if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 693832a4cf1SMatan Azrad DRV_LOG(ERR, "Non-primary process type is not supported."); 694832a4cf1SMatan Azrad rte_errno = ENOTSUP; 695832a4cf1SMatan Azrad return -rte_errno; 696832a4cf1SMatan Azrad } 697fe46b20cSMichael Baum if ((attr->mmo_compress_sq_en == 0 || attr->mmo_decompress_sq_en == 0 || 698fe46b20cSMichael Baum attr->mmo_dma_sq_en == 0) && (attr->mmo_compress_qp_en == 0 || 699fe46b20cSMichael Baum attr->mmo_decompress_qp_en == 0 || attr->mmo_dma_qp_en == 0)) { 700832a4cf1SMatan Azrad DRV_LOG(ERR, "Not enough capabilities to support compress " 701832a4cf1SMatan Azrad "operations, maybe old FW/OFED version?"); 702832a4cf1SMatan Azrad rte_errno = ENOTSUP; 703832a4cf1SMatan Azrad return -ENOTSUP; 704832a4cf1SMatan Azrad } 705ca1418ceSMichael Baum compressdev = rte_compressdev_pmd_create(ibdev_name, cdev->dev, 706832a4cf1SMatan Azrad sizeof(*priv), &init_params); 7077af08c8fSMichael Baum if (compressdev == NULL) { 708ca1418ceSMichael Baum DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name); 709832a4cf1SMatan Azrad return -ENODEV; 710832a4cf1SMatan Azrad } 711832a4cf1SMatan Azrad DRV_LOG(INFO, 712ca1418ceSMichael Baum "Compress device %s was created successfully.", ibdev_name); 7137af08c8fSMichael Baum compressdev->dev_ops = &mlx5_compress_ops; 7147af08c8fSMichael Baum compressdev->dequeue_burst = mlx5_compress_dequeue_burst; 7157af08c8fSMichael Baum compressdev->enqueue_burst = mlx5_compress_enqueue_burst; 7167af08c8fSMichael Baum compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 7177af08c8fSMichael Baum priv = compressdev->data->dev_private; 718fe46b20cSMichael Baum priv->mmo_decomp_sq = attr->mmo_decompress_sq_en; 719fe46b20cSMichael Baum priv->mmo_decomp_qp = attr->mmo_decompress_qp_en; 720fe46b20cSMichael Baum priv->mmo_comp_sq = attr->mmo_compress_sq_en; 721fe46b20cSMichael Baum priv->mmo_comp_qp = attr->mmo_compress_qp_en; 722fe46b20cSMichael Baum priv->mmo_dma_sq = attr->mmo_dma_sq_en; 723fe46b20cSMichael Baum priv->mmo_dma_qp = attr->mmo_dma_qp_en; 724ca1418ceSMichael Baum priv->cdev = cdev; 7257af08c8fSMichael Baum priv->compressdev = compressdev; 726fe46b20cSMichael Baum priv->min_block_size = attr->compress_min_block_size; 727e35ccf24SMichael Baum if (mlx5_compress_uar_prepare(priv) != 0) { 7287af08c8fSMichael Baum rte_compressdev_pmd_destroy(priv->compressdev); 729832a4cf1SMatan Azrad return -1; 730832a4cf1SMatan Azrad } 731832a4cf1SMatan Azrad pthread_mutex_lock(&priv_list_lock); 732832a4cf1SMatan Azrad TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next); 733832a4cf1SMatan Azrad pthread_mutex_unlock(&priv_list_lock); 734832a4cf1SMatan Azrad return 0; 735832a4cf1SMatan Azrad } 736832a4cf1SMatan Azrad 737832a4cf1SMatan Azrad static int 7387af08c8fSMichael Baum mlx5_compress_dev_remove(struct mlx5_common_device *cdev) 739832a4cf1SMatan Azrad { 740832a4cf1SMatan Azrad struct mlx5_compress_priv *priv = NULL; 741832a4cf1SMatan Azrad 742832a4cf1SMatan Azrad pthread_mutex_lock(&priv_list_lock); 743832a4cf1SMatan Azrad TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next) 7447af08c8fSMichael Baum if (priv->compressdev->device == cdev->dev) 745832a4cf1SMatan Azrad break; 746832a4cf1SMatan Azrad if (priv) 747832a4cf1SMatan Azrad TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next); 748832a4cf1SMatan Azrad pthread_mutex_unlock(&priv_list_lock); 749832a4cf1SMatan Azrad if (priv) { 750e35ccf24SMichael Baum mlx5_compress_uar_release(priv); 7517af08c8fSMichael Baum rte_compressdev_pmd_destroy(priv->compressdev); 752832a4cf1SMatan Azrad } 753832a4cf1SMatan Azrad return 0; 754832a4cf1SMatan Azrad } 755832a4cf1SMatan Azrad 756832a4cf1SMatan Azrad static const struct rte_pci_id mlx5_compress_pci_id_map[] = { 757832a4cf1SMatan Azrad { 758832a4cf1SMatan Azrad RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 759832a4cf1SMatan Azrad PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 760832a4cf1SMatan Azrad }, 761832a4cf1SMatan Azrad { 762832a4cf1SMatan Azrad .vendor_id = 0 763832a4cf1SMatan Azrad } 764832a4cf1SMatan Azrad }; 765832a4cf1SMatan Azrad 76682242186SXueming Li static struct mlx5_class_driver mlx5_compress_driver = { 76782242186SXueming Li .drv_class = MLX5_CLASS_COMPRESS, 768832a4cf1SMatan Azrad .name = RTE_STR(MLX5_COMPRESS_DRIVER_NAME), 769832a4cf1SMatan Azrad .id_table = mlx5_compress_pci_id_map, 77082242186SXueming Li .probe = mlx5_compress_dev_probe, 77182242186SXueming Li .remove = mlx5_compress_dev_remove, 772832a4cf1SMatan Azrad }; 773832a4cf1SMatan Azrad 774832a4cf1SMatan Azrad RTE_INIT(rte_mlx5_compress_init) 775832a4cf1SMatan Azrad { 776832a4cf1SMatan Azrad mlx5_common_init(); 777832a4cf1SMatan Azrad if (mlx5_glue != NULL) 77882242186SXueming Li mlx5_class_driver_register(&mlx5_compress_driver); 779832a4cf1SMatan Azrad } 780832a4cf1SMatan Azrad 781eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(mlx5_compress_logtype, NOTICE) 782832a4cf1SMatan Azrad RTE_PMD_EXPORT_NAME(MLX5_COMPRESS_DRIVER_NAME, __COUNTER__); 783832a4cf1SMatan Azrad RTE_PMD_REGISTER_PCI_TABLE(MLX5_COMPRESS_DRIVER_NAME, mlx5_compress_pci_id_map); 784832a4cf1SMatan Azrad RTE_PMD_REGISTER_KMOD_DEP(MLX5_COMPRESS_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib"); 785