1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2021 Mellanox Technologies, Ltd 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_log.h> 7 #include <rte_errno.h> 8 #include <rte_pci.h> 9 #include <rte_spinlock.h> 10 #include <rte_comp.h> 11 #include <rte_compressdev.h> 12 #include <rte_compressdev_pmd.h> 13 14 #include <mlx5_glue.h> 15 #include <mlx5_common.h> 16 #include <mlx5_common_pci.h> 17 #include <mlx5_devx_cmds.h> 18 #include <mlx5_common_os.h> 19 #include <mlx5_common_devx.h> 20 #include <mlx5_common_mr.h> 21 #include <mlx5_prm.h> 22 23 #include "mlx5_compress_utils.h" 24 25 #define MLX5_COMPRESS_DRIVER_NAME mlx5_compress 26 #define MLX5_COMPRESS_LOG_NAME pmd.compress.mlx5 27 #define MLX5_COMPRESS_MAX_QPS 1024 28 #define MLX5_COMP_MAX_WIN_SIZE_CONF 6u 29 30 struct mlx5_compress_xform { 31 LIST_ENTRY(mlx5_compress_xform) next; 32 enum rte_comp_xform_type type; 33 enum rte_comp_checksum_type csum_type; 34 uint32_t opcode; 35 uint32_t gga_ctrl1; /* BE. */ 36 }; 37 38 struct mlx5_compress_priv { 39 TAILQ_ENTRY(mlx5_compress_priv) next; 40 struct ibv_context *ctx; /* Device context. */ 41 struct rte_pci_device *pci_dev; 42 struct rte_compressdev *cdev; 43 void *uar; 44 uint32_t pdn; /* Protection Domain number. */ 45 uint8_t min_block_size; 46 uint8_t sq_ts_format; /* Whether SQ supports timestamp formats. */ 47 /* Minimum huffman block size supported by the device. */ 48 struct ibv_pd *pd; 49 struct rte_compressdev_config dev_config; 50 LIST_HEAD(xform_list, mlx5_compress_xform) xform_list; 51 rte_spinlock_t xform_sl; 52 struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */ 53 volatile uint64_t *uar_addr; 54 #ifndef RTE_ARCH_64 55 rte_spinlock_t uar32_sl; 56 #endif /* RTE_ARCH_64 */ 57 }; 58 59 struct mlx5_compress_qp { 60 uint16_t qp_id; 61 uint16_t entries_n; 62 uint16_t pi; 63 uint16_t ci; 64 struct mlx5_mr_ctrl mr_ctrl; 65 int socket_id; 66 struct mlx5_devx_cq cq; 67 struct mlx5_devx_sq sq; 68 struct mlx5_pmd_mr opaque_mr; 69 struct rte_comp_op **ops; 70 struct mlx5_compress_priv *priv; 71 struct rte_compressdev_stats stats; 72 }; 73 74 TAILQ_HEAD(mlx5_compress_privs, mlx5_compress_priv) mlx5_compress_priv_list = 75 TAILQ_HEAD_INITIALIZER(mlx5_compress_priv_list); 76 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER; 77 78 int mlx5_compress_logtype; 79 80 static const struct rte_compressdev_capabilities mlx5_caps[] = { 81 { 82 .algo = RTE_COMP_ALGO_NULL, 83 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM | 84 RTE_COMP_FF_CRC32_CHECKSUM | 85 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | 86 RTE_COMP_FF_SHAREABLE_PRIV_XFORM, 87 }, 88 { 89 .algo = RTE_COMP_ALGO_DEFLATE, 90 .comp_feature_flags = RTE_COMP_FF_ADLER32_CHECKSUM | 91 RTE_COMP_FF_CRC32_CHECKSUM | 92 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | 93 RTE_COMP_FF_SHAREABLE_PRIV_XFORM | 94 RTE_COMP_FF_HUFFMAN_FIXED | 95 RTE_COMP_FF_HUFFMAN_DYNAMIC, 96 .window_size = {.min = 10, .max = 15, .increment = 1}, 97 }, 98 { 99 .algo = RTE_COMP_ALGO_LIST_END, 100 } 101 }; 102 103 static void 104 mlx5_compress_dev_info_get(struct rte_compressdev *dev, 105 struct rte_compressdev_info *info) 106 { 107 RTE_SET_USED(dev); 108 if (info != NULL) { 109 info->max_nb_queue_pairs = MLX5_COMPRESS_MAX_QPS; 110 info->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 111 info->capabilities = mlx5_caps; 112 } 113 } 114 115 static int 116 mlx5_compress_dev_configure(struct rte_compressdev *dev, 117 struct rte_compressdev_config *config) 118 { 119 struct mlx5_compress_priv *priv; 120 121 if (dev == NULL || config == NULL) 122 return -EINVAL; 123 priv = dev->data->dev_private; 124 priv->dev_config = *config; 125 return 0; 126 } 127 128 static int 129 mlx5_compress_dev_close(struct rte_compressdev *dev) 130 { 131 RTE_SET_USED(dev); 132 return 0; 133 } 134 135 static int 136 mlx5_compress_qp_release(struct rte_compressdev *dev, uint16_t qp_id) 137 { 138 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id]; 139 140 if (qp->sq.sq != NULL) 141 mlx5_devx_sq_destroy(&qp->sq); 142 if (qp->cq.cq != NULL) 143 mlx5_devx_cq_destroy(&qp->cq); 144 if (qp->opaque_mr.obj != NULL) { 145 void *opaq = qp->opaque_mr.addr; 146 147 mlx5_common_verbs_dereg_mr(&qp->opaque_mr); 148 if (opaq != NULL) 149 rte_free(opaq); 150 } 151 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); 152 rte_free(qp); 153 dev->data->queue_pairs[qp_id] = NULL; 154 return 0; 155 } 156 157 static void 158 mlx5_compress_init_sq(struct mlx5_compress_qp *qp) 159 { 160 volatile struct mlx5_gga_wqe *restrict wqe = 161 (volatile struct mlx5_gga_wqe *)qp->sq.wqes; 162 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr; 163 const uint32_t sq_ds = rte_cpu_to_be_32((qp->sq.sq->id << 8) | 4u); 164 const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS << 165 MLX5_COMP_MODE_OFFSET); 166 const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->opaque_mr.lkey); 167 int i; 168 169 /* All the next fields state should stay constant. */ 170 for (i = 0; i < qp->entries_n; ++i, ++wqe) { 171 wqe->sq_ds = sq_ds; 172 wqe->flags = flags; 173 wqe->opaque_lkey = opaq_lkey; 174 wqe->opaque_vaddr = rte_cpu_to_be_64 175 ((uint64_t)(uintptr_t)&opaq[i]); 176 } 177 } 178 179 static int 180 mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, 181 uint32_t max_inflight_ops, int socket_id) 182 { 183 struct mlx5_compress_priv *priv = dev->data->dev_private; 184 struct mlx5_compress_qp *qp; 185 struct mlx5_devx_cq_attr cq_attr = { 186 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar), 187 }; 188 struct mlx5_devx_create_sq_attr sq_attr = { 189 .user_index = qp_id, 190 .wq_attr = (struct mlx5_devx_wq_attr){ 191 .pd = priv->pdn, 192 .uar_page = mlx5_os_get_devx_uar_page_id(priv->uar), 193 }, 194 }; 195 struct mlx5_devx_modify_sq_attr modify_attr = { 196 .state = MLX5_SQC_STATE_RDY, 197 }; 198 uint32_t log_ops_n = rte_log2_u32(max_inflight_ops); 199 uint32_t alloc_size = sizeof(*qp); 200 void *opaq_buf; 201 int ret; 202 203 alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE); 204 alloc_size += sizeof(struct rte_comp_op *) * (1u << log_ops_n); 205 qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE, 206 socket_id); 207 if (qp == NULL) { 208 DRV_LOG(ERR, "Failed to allocate qp memory."); 209 rte_errno = ENOMEM; 210 return -rte_errno; 211 } 212 dev->data->queue_pairs[qp_id] = qp; 213 opaq_buf = rte_calloc(__func__, 1u << log_ops_n, 214 sizeof(struct mlx5_gga_compress_opaque), 215 sizeof(struct mlx5_gga_compress_opaque)); 216 if (opaq_buf == NULL) { 217 DRV_LOG(ERR, "Failed to allocate opaque memory."); 218 rte_errno = ENOMEM; 219 goto err; 220 } 221 if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N, 222 priv->dev_config.socket_id)) { 223 DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.", 224 (uint32_t)qp_id); 225 rte_errno = ENOMEM; 226 goto err; 227 } 228 qp->entries_n = 1 << log_ops_n; 229 qp->socket_id = socket_id; 230 qp->qp_id = qp_id; 231 qp->priv = priv; 232 qp->ops = (struct rte_comp_op **)RTE_ALIGN((uintptr_t)(qp + 1), 233 RTE_CACHE_LINE_SIZE); 234 if (mlx5_common_verbs_reg_mr(priv->pd, opaq_buf, qp->entries_n * 235 sizeof(struct mlx5_gga_compress_opaque), 236 &qp->opaque_mr) != 0) { 237 rte_free(opaq_buf); 238 DRV_LOG(ERR, "Failed to register opaque MR."); 239 rte_errno = ENOMEM; 240 goto err; 241 } 242 ret = mlx5_devx_cq_create(priv->ctx, &qp->cq, log_ops_n, &cq_attr, 243 socket_id); 244 if (ret != 0) { 245 DRV_LOG(ERR, "Failed to create CQ."); 246 goto err; 247 } 248 sq_attr.cqn = qp->cq.cq->id; 249 sq_attr.ts_format = mlx5_ts_format_conv(priv->sq_ts_format); 250 ret = mlx5_devx_sq_create(priv->ctx, &qp->sq, log_ops_n, &sq_attr, 251 socket_id); 252 if (ret != 0) { 253 DRV_LOG(ERR, "Failed to create SQ."); 254 goto err; 255 } 256 mlx5_compress_init_sq(qp); 257 ret = mlx5_devx_cmd_modify_sq(qp->sq.sq, &modify_attr); 258 if (ret != 0) { 259 DRV_LOG(ERR, "Can't change SQ state to ready."); 260 goto err; 261 } 262 DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u", 263 (uint32_t)qp_id, qp->sq.sq->id, qp->cq.cq->id, qp->entries_n); 264 return 0; 265 err: 266 mlx5_compress_qp_release(dev, qp_id); 267 return -1; 268 } 269 270 static int 271 mlx5_compress_xform_free(struct rte_compressdev *dev, void *xform) 272 { 273 struct mlx5_compress_priv *priv = dev->data->dev_private; 274 275 rte_spinlock_lock(&priv->xform_sl); 276 LIST_REMOVE((struct mlx5_compress_xform *)xform, next); 277 rte_spinlock_unlock(&priv->xform_sl); 278 rte_free(xform); 279 return 0; 280 } 281 282 static int 283 mlx5_compress_xform_create(struct rte_compressdev *dev, 284 const struct rte_comp_xform *xform, 285 void **private_xform) 286 { 287 struct mlx5_compress_priv *priv = dev->data->dev_private; 288 struct mlx5_compress_xform *xfrm; 289 uint32_t size; 290 291 if (xform->type == RTE_COMP_COMPRESS && xform->compress.level == 292 RTE_COMP_LEVEL_NONE) { 293 DRV_LOG(ERR, "Non-compressed block is not supported."); 294 return -ENOTSUP; 295 } 296 if ((xform->type == RTE_COMP_COMPRESS && xform->compress.hash_algo != 297 RTE_COMP_HASH_ALGO_NONE) || (xform->type == RTE_COMP_DECOMPRESS && 298 xform->decompress.hash_algo != RTE_COMP_HASH_ALGO_NONE)) { 299 DRV_LOG(ERR, "SHA is not supported."); 300 return -ENOTSUP; 301 } 302 xfrm = rte_zmalloc_socket(__func__, sizeof(*xfrm), 0, 303 priv->dev_config.socket_id); 304 if (xfrm == NULL) 305 return -ENOMEM; 306 xfrm->opcode = MLX5_OPCODE_MMO; 307 xfrm->type = xform->type; 308 switch (xform->type) { 309 case RTE_COMP_COMPRESS: 310 switch (xform->compress.algo) { 311 case RTE_COMP_ALGO_NULL: 312 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA << 313 WQE_CSEG_OPC_MOD_OFFSET; 314 break; 315 case RTE_COMP_ALGO_DEFLATE: 316 size = 1 << xform->compress.window_size; 317 size /= MLX5_GGA_COMP_WIN_SIZE_UNITS; 318 xfrm->gga_ctrl1 += RTE_MIN(rte_log2_u32(size), 319 MLX5_COMP_MAX_WIN_SIZE_CONF) << 320 WQE_GGA_COMP_WIN_SIZE_OFFSET; 321 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT) 322 size = MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX; 323 else 324 size = priv->min_block_size - 1 + 325 xform->compress.level; 326 xfrm->gga_ctrl1 += RTE_MIN(size, 327 MLX5_GGA_COMP_LOG_BLOCK_SIZE_MAX) << 328 WQE_GGA_COMP_BLOCK_SIZE_OFFSET; 329 xfrm->opcode += MLX5_OPC_MOD_MMO_COMP << 330 WQE_CSEG_OPC_MOD_OFFSET; 331 size = xform->compress.deflate.huffman == 332 RTE_COMP_HUFFMAN_DYNAMIC ? 333 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MAX : 334 MLX5_GGA_COMP_LOG_DYNAMIC_SIZE_MIN; 335 xfrm->gga_ctrl1 += size << 336 WQE_GGA_COMP_DYNAMIC_SIZE_OFFSET; 337 break; 338 default: 339 goto err; 340 } 341 xfrm->csum_type = xform->compress.chksum; 342 break; 343 case RTE_COMP_DECOMPRESS: 344 switch (xform->decompress.algo) { 345 case RTE_COMP_ALGO_NULL: 346 xfrm->opcode += MLX5_OPC_MOD_MMO_DMA << 347 WQE_CSEG_OPC_MOD_OFFSET; 348 break; 349 case RTE_COMP_ALGO_DEFLATE: 350 xfrm->opcode += MLX5_OPC_MOD_MMO_DECOMP << 351 WQE_CSEG_OPC_MOD_OFFSET; 352 break; 353 default: 354 goto err; 355 } 356 xfrm->csum_type = xform->decompress.chksum; 357 break; 358 default: 359 DRV_LOG(ERR, "Algorithm %u is not supported.", xform->type); 360 goto err; 361 } 362 DRV_LOG(DEBUG, "New xform: gga ctrl1 = 0x%08X opcode = 0x%08X csum " 363 "type = %d.", xfrm->gga_ctrl1, xfrm->opcode, xfrm->csum_type); 364 xfrm->gga_ctrl1 = rte_cpu_to_be_32(xfrm->gga_ctrl1); 365 rte_spinlock_lock(&priv->xform_sl); 366 LIST_INSERT_HEAD(&priv->xform_list, xfrm, next); 367 rte_spinlock_unlock(&priv->xform_sl); 368 *private_xform = xfrm; 369 return 0; 370 err: 371 rte_free(xfrm); 372 return -ENOTSUP; 373 } 374 375 static void 376 mlx5_compress_dev_stop(struct rte_compressdev *dev) 377 { 378 RTE_SET_USED(dev); 379 } 380 381 static int 382 mlx5_compress_dev_start(struct rte_compressdev *dev) 383 { 384 RTE_SET_USED(dev); 385 return 0; 386 } 387 388 static void 389 mlx5_compress_stats_get(struct rte_compressdev *dev, 390 struct rte_compressdev_stats *stats) 391 { 392 int qp_id; 393 394 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 395 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id]; 396 397 stats->enqueued_count += qp->stats.enqueued_count; 398 stats->dequeued_count += qp->stats.dequeued_count; 399 stats->enqueue_err_count += qp->stats.enqueue_err_count; 400 stats->dequeue_err_count += qp->stats.dequeue_err_count; 401 } 402 } 403 404 static void 405 mlx5_compress_stats_reset(struct rte_compressdev *dev) 406 { 407 int qp_id; 408 409 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 410 struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id]; 411 412 memset(&qp->stats, 0, sizeof(qp->stats)); 413 } 414 } 415 416 static struct rte_compressdev_ops mlx5_compress_ops = { 417 .dev_configure = mlx5_compress_dev_configure, 418 .dev_start = mlx5_compress_dev_start, 419 .dev_stop = mlx5_compress_dev_stop, 420 .dev_close = mlx5_compress_dev_close, 421 .dev_infos_get = mlx5_compress_dev_info_get, 422 .stats_get = mlx5_compress_stats_get, 423 .stats_reset = mlx5_compress_stats_reset, 424 .queue_pair_setup = mlx5_compress_qp_setup, 425 .queue_pair_release = mlx5_compress_qp_release, 426 .private_xform_create = mlx5_compress_xform_create, 427 .private_xform_free = mlx5_compress_xform_free, 428 .stream_create = NULL, 429 .stream_free = NULL, 430 }; 431 432 static __rte_always_inline uint32_t 433 mlx5_compress_dseg_set(struct mlx5_compress_qp *qp, 434 volatile struct mlx5_wqe_dseg *restrict dseg, 435 struct rte_mbuf *restrict mbuf, 436 uint32_t offset, uint32_t len) 437 { 438 uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset); 439 440 dseg->bcount = rte_cpu_to_be_32(len); 441 dseg->lkey = mlx5_mr_addr2mr_bh(qp->priv->pd, 0, &qp->priv->mr_scache, 442 &qp->mr_ctrl, addr, 443 !!(mbuf->ol_flags & EXT_ATTACHED_MBUF)); 444 dseg->pbuf = rte_cpu_to_be_64(addr); 445 return dseg->lkey; 446 } 447 448 /* 449 * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and 450 * 64bit architectures. 451 */ 452 static __rte_always_inline void 453 mlx5_compress_uar_write(uint64_t val, struct mlx5_compress_priv *priv) 454 { 455 #ifdef RTE_ARCH_64 456 *priv->uar_addr = val; 457 #else /* !RTE_ARCH_64 */ 458 rte_spinlock_lock(&priv->uar32_sl); 459 *(volatile uint32_t *)priv->uar_addr = val; 460 rte_io_wmb(); 461 *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32; 462 rte_spinlock_unlock(&priv->uar32_sl); 463 #endif 464 } 465 466 static uint16_t 467 mlx5_compress_enqueue_burst(void *queue_pair, struct rte_comp_op **ops, 468 uint16_t nb_ops) 469 { 470 struct mlx5_compress_qp *qp = queue_pair; 471 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *) 472 qp->sq.wqes, *wqe; 473 struct mlx5_compress_xform *xform; 474 struct rte_comp_op *op; 475 uint16_t mask = qp->entries_n - 1; 476 uint16_t remain = qp->entries_n - (qp->pi - qp->ci); 477 uint16_t idx; 478 bool invalid; 479 480 if (remain < nb_ops) 481 nb_ops = remain; 482 else 483 remain = nb_ops; 484 if (unlikely(remain == 0)) 485 return 0; 486 do { 487 idx = qp->pi & mask; 488 wqe = &wqes[idx]; 489 rte_prefetch0(&wqes[(qp->pi + 1) & mask]); 490 op = *ops++; 491 xform = op->private_xform; 492 /* 493 * Check operation arguments and error cases: 494 * - Operation type must be state-less. 495 * - Compress operation flush flag must be FULL or FINAL. 496 * - Source and destination buffers must be mapped internally. 497 */ 498 invalid = op->op_type != RTE_COMP_OP_STATELESS || 499 (xform->type == RTE_COMP_COMPRESS && 500 op->flush_flag < RTE_COMP_FLUSH_FULL); 501 if (unlikely(invalid || 502 (mlx5_compress_dseg_set(qp, &wqe->gather, 503 op->m_src, 504 op->src.offset, 505 op->src.length) == 506 UINT32_MAX) || 507 (mlx5_compress_dseg_set(qp, &wqe->scatter, 508 op->m_dst, 509 op->dst.offset, 510 rte_pktmbuf_pkt_len(op->m_dst) - 511 op->dst.offset) == 512 UINT32_MAX))) { 513 op->status = invalid ? RTE_COMP_OP_STATUS_INVALID_ARGS : 514 RTE_COMP_OP_STATUS_ERROR; 515 nb_ops -= remain; 516 if (unlikely(nb_ops == 0)) 517 return 0; 518 break; 519 } 520 wqe->gga_ctrl1 = xform->gga_ctrl1; 521 wqe->opcode = rte_cpu_to_be_32(xform->opcode + (qp->pi << 8)); 522 qp->ops[idx] = op; 523 qp->pi++; 524 } while (--remain); 525 qp->stats.enqueued_count += nb_ops; 526 rte_io_wmb(); 527 qp->sq.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi); 528 rte_wmb(); 529 mlx5_compress_uar_write(*(volatile uint64_t *)wqe, qp->priv); 530 rte_wmb(); 531 return nb_ops; 532 } 533 534 static void 535 mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe, 536 volatile uint32_t *opaq) 537 { 538 size_t i; 539 540 DRV_LOG(ERR, "Error cqe:"); 541 for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4) 542 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], 543 cqe[i + 2], cqe[i + 3]); 544 DRV_LOG(ERR, "\nError wqe:"); 545 for (i = 0; i < sizeof(struct mlx5_gga_wqe) >> 2; i += 4) 546 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1], 547 wqe[i + 2], wqe[i + 3]); 548 DRV_LOG(ERR, "\nError opaq:"); 549 for (i = 0; i < sizeof(struct mlx5_gga_compress_opaque) >> 2; i += 4) 550 DRV_LOG(ERR, "%08X %08X %08X %08X", opaq[i], opaq[i + 1], 551 opaq[i + 2], opaq[i + 3]); 552 } 553 554 static void 555 mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp, 556 struct rte_comp_op *op) 557 { 558 const uint32_t idx = qp->ci & (qp->entries_n - 1); 559 volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *) 560 &qp->cq.cqes[idx]; 561 volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *) 562 qp->sq.wqes; 563 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr; 564 565 op->status = RTE_COMP_OP_STATUS_ERROR; 566 op->consumed = 0; 567 op->produced = 0; 568 op->output_chksum = 0; 569 op->debug_status = rte_be_to_cpu_32(opaq[idx].syndrom) | 570 ((uint64_t)rte_be_to_cpu_32(cqe->syndrome) << 32); 571 mlx5_compress_dump_err_objs((volatile uint32_t *)cqe, 572 (volatile uint32_t *)&wqes[idx], 573 (volatile uint32_t *)&opaq[idx]); 574 qp->stats.dequeue_err_count++; 575 } 576 577 static uint16_t 578 mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops, 579 uint16_t nb_ops) 580 { 581 struct mlx5_compress_qp *qp = queue_pair; 582 volatile struct mlx5_compress_xform *restrict xform; 583 volatile struct mlx5_cqe *restrict cqe; 584 volatile struct mlx5_gga_compress_opaque *opaq = qp->opaque_mr.addr; 585 struct rte_comp_op *restrict op; 586 const unsigned int cq_size = qp->entries_n; 587 const unsigned int mask = cq_size - 1; 588 uint32_t idx; 589 uint32_t next_idx = qp->ci & mask; 590 const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops); 591 uint16_t i = 0; 592 int ret; 593 594 if (unlikely(max == 0)) 595 return 0; 596 do { 597 idx = next_idx; 598 next_idx = (qp->ci + 1) & mask; 599 rte_prefetch0(&qp->cq.cqes[next_idx]); 600 rte_prefetch0(qp->ops[next_idx]); 601 op = qp->ops[idx]; 602 cqe = &qp->cq.cqes[idx]; 603 ret = check_cqe(cqe, cq_size, qp->ci); 604 /* 605 * Be sure owner read is done before any other cookie field or 606 * opaque field. 607 */ 608 rte_io_rmb(); 609 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) { 610 if (likely(ret == MLX5_CQE_STATUS_HW_OWN)) 611 break; 612 mlx5_compress_cqe_err_handle(qp, op); 613 } else { 614 xform = op->private_xform; 615 op->status = RTE_COMP_OP_STATUS_SUCCESS; 616 op->consumed = op->src.length; 617 op->produced = rte_be_to_cpu_32(cqe->byte_cnt); 618 MLX5_ASSERT(cqe->byte_cnt == 619 opaq[idx].scattered_length); 620 switch (xform->csum_type) { 621 case RTE_COMP_CHECKSUM_CRC32: 622 op->output_chksum = (uint64_t)rte_be_to_cpu_32 623 (opaq[idx].crc32); 624 break; 625 case RTE_COMP_CHECKSUM_ADLER32: 626 op->output_chksum = (uint64_t)rte_be_to_cpu_32 627 (opaq[idx].adler32) << 32; 628 break; 629 case RTE_COMP_CHECKSUM_CRC32_ADLER32: 630 op->output_chksum = (uint64_t)rte_be_to_cpu_32 631 (opaq[idx].crc32) | 632 ((uint64_t)rte_be_to_cpu_32 633 (opaq[idx].adler32) << 32); 634 break; 635 default: 636 break; 637 } 638 } 639 ops[i++] = op; 640 qp->ci++; 641 } while (i < max); 642 if (likely(i != 0)) { 643 rte_io_wmb(); 644 qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci); 645 qp->stats.dequeued_count += i; 646 } 647 return i; 648 } 649 650 static struct ibv_device * 651 mlx5_compress_get_ib_device_match(struct rte_pci_addr *addr) 652 { 653 int n; 654 struct ibv_device **ibv_list = mlx5_glue->get_device_list(&n); 655 struct ibv_device *ibv_match = NULL; 656 657 if (ibv_list == NULL) { 658 rte_errno = ENOSYS; 659 return NULL; 660 } 661 while (n-- > 0) { 662 struct rte_pci_addr paddr; 663 664 DRV_LOG(DEBUG, "Checking device \"%s\"..", ibv_list[n]->name); 665 if (mlx5_dev_to_pci_addr(ibv_list[n]->ibdev_path, &paddr) != 0) 666 continue; 667 if (rte_pci_addr_cmp(addr, &paddr) != 0) 668 continue; 669 ibv_match = ibv_list[n]; 670 break; 671 } 672 if (ibv_match == NULL) 673 rte_errno = ENOENT; 674 mlx5_glue->free_device_list(ibv_list); 675 return ibv_match; 676 } 677 678 static void 679 mlx5_compress_hw_global_release(struct mlx5_compress_priv *priv) 680 { 681 if (priv->pd != NULL) { 682 claim_zero(mlx5_glue->dealloc_pd(priv->pd)); 683 priv->pd = NULL; 684 } 685 if (priv->uar != NULL) { 686 mlx5_glue->devx_free_uar(priv->uar); 687 priv->uar = NULL; 688 } 689 } 690 691 static int 692 mlx5_compress_pd_create(struct mlx5_compress_priv *priv) 693 { 694 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 695 struct mlx5dv_obj obj; 696 struct mlx5dv_pd pd_info; 697 int ret; 698 699 priv->pd = mlx5_glue->alloc_pd(priv->ctx); 700 if (priv->pd == NULL) { 701 DRV_LOG(ERR, "Failed to allocate PD."); 702 return errno ? -errno : -ENOMEM; 703 } 704 obj.pd.in = priv->pd; 705 obj.pd.out = &pd_info; 706 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); 707 if (ret != 0) { 708 DRV_LOG(ERR, "Fail to get PD object info."); 709 mlx5_glue->dealloc_pd(priv->pd); 710 priv->pd = NULL; 711 return -errno; 712 } 713 priv->pdn = pd_info.pdn; 714 return 0; 715 #else 716 (void)priv; 717 DRV_LOG(ERR, "Cannot get pdn - no DV support."); 718 return -ENOTSUP; 719 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */ 720 } 721 722 static int 723 mlx5_compress_hw_global_prepare(struct mlx5_compress_priv *priv) 724 { 725 if (mlx5_compress_pd_create(priv) != 0) 726 return -1; 727 priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1); 728 if (priv->uar == NULL || mlx5_os_get_devx_uar_reg_addr(priv->uar) == 729 NULL) { 730 rte_errno = errno; 731 claim_zero(mlx5_glue->dealloc_pd(priv->pd)); 732 DRV_LOG(ERR, "Failed to allocate UAR."); 733 return -1; 734 } 735 priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar); 736 MLX5_ASSERT(priv->uar_addr); 737 #ifndef RTE_ARCH_64 738 rte_spinlock_init(&priv->uar32_sl); 739 #endif /* RTE_ARCH_64 */ 740 return 0; 741 } 742 743 /** 744 * DPDK callback to register a PCI device. 745 * 746 * This function spawns compress device out of a given PCI device. 747 * 748 * @param[in] pci_drv 749 * PCI driver structure (mlx5_compress_driver). 750 * @param[in] pci_dev 751 * PCI device information. 752 * 753 * @return 754 * 0 on success, 1 to skip this driver, a negative errno value otherwise 755 * and rte_errno is set. 756 */ 757 static int 758 mlx5_compress_pci_probe(struct rte_pci_driver *pci_drv, 759 struct rte_pci_device *pci_dev) 760 { 761 struct ibv_device *ibv; 762 struct rte_compressdev *cdev; 763 struct ibv_context *ctx; 764 struct mlx5_compress_priv *priv; 765 struct mlx5_hca_attr att = { 0 }; 766 struct rte_compressdev_pmd_init_params init_params = { 767 .name = "", 768 .socket_id = pci_dev->device.numa_node, 769 }; 770 771 RTE_SET_USED(pci_drv); 772 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 773 DRV_LOG(ERR, "Non-primary process type is not supported."); 774 rte_errno = ENOTSUP; 775 return -rte_errno; 776 } 777 ibv = mlx5_compress_get_ib_device_match(&pci_dev->addr); 778 if (ibv == NULL) { 779 DRV_LOG(ERR, "No matching IB device for PCI slot " 780 PCI_PRI_FMT ".", pci_dev->addr.domain, 781 pci_dev->addr.bus, pci_dev->addr.devid, 782 pci_dev->addr.function); 783 return -rte_errno; 784 } 785 DRV_LOG(INFO, "PCI information matches for device \"%s\".", ibv->name); 786 ctx = mlx5_glue->dv_open_device(ibv); 787 if (ctx == NULL) { 788 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name); 789 rte_errno = ENODEV; 790 return -rte_errno; 791 } 792 if (mlx5_devx_cmd_query_hca_attr(ctx, &att) != 0 || 793 att.mmo_compress_en == 0 || att.mmo_decompress_en == 0 || 794 att.mmo_dma_en == 0) { 795 DRV_LOG(ERR, "Not enough capabilities to support compress " 796 "operations, maybe old FW/OFED version?"); 797 claim_zero(mlx5_glue->close_device(ctx)); 798 rte_errno = ENOTSUP; 799 return -ENOTSUP; 800 } 801 cdev = rte_compressdev_pmd_create(ibv->name, &pci_dev->device, 802 sizeof(*priv), &init_params); 803 if (cdev == NULL) { 804 DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name); 805 claim_zero(mlx5_glue->close_device(ctx)); 806 return -ENODEV; 807 } 808 DRV_LOG(INFO, 809 "Compress device %s was created successfully.", ibv->name); 810 cdev->dev_ops = &mlx5_compress_ops; 811 cdev->dequeue_burst = mlx5_compress_dequeue_burst; 812 cdev->enqueue_burst = mlx5_compress_enqueue_burst; 813 cdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 814 priv = cdev->data->dev_private; 815 priv->ctx = ctx; 816 priv->pci_dev = pci_dev; 817 priv->cdev = cdev; 818 priv->min_block_size = att.compress_min_block_size; 819 priv->sq_ts_format = att.sq_ts_format; 820 if (mlx5_compress_hw_global_prepare(priv) != 0) { 821 rte_compressdev_pmd_destroy(priv->cdev); 822 claim_zero(mlx5_glue->close_device(priv->ctx)); 823 return -1; 824 } 825 if (mlx5_mr_btree_init(&priv->mr_scache.cache, 826 MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) { 827 DRV_LOG(ERR, "Failed to allocate shared cache MR memory."); 828 mlx5_compress_hw_global_release(priv); 829 rte_compressdev_pmd_destroy(priv->cdev); 830 claim_zero(mlx5_glue->close_device(priv->ctx)); 831 rte_errno = ENOMEM; 832 return -rte_errno; 833 } 834 priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr; 835 priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr; 836 pthread_mutex_lock(&priv_list_lock); 837 TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next); 838 pthread_mutex_unlock(&priv_list_lock); 839 return 0; 840 } 841 842 /** 843 * DPDK callback to remove a PCI device. 844 * 845 * This function removes all compress devices belong to a given PCI device. 846 * 847 * @param[in] pci_dev 848 * Pointer to the PCI device. 849 * 850 * @return 851 * 0 on success, the function cannot fail. 852 */ 853 static int 854 mlx5_compress_pci_remove(struct rte_pci_device *pdev) 855 { 856 struct mlx5_compress_priv *priv = NULL; 857 858 pthread_mutex_lock(&priv_list_lock); 859 TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next) 860 if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0) 861 break; 862 if (priv) 863 TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next); 864 pthread_mutex_unlock(&priv_list_lock); 865 if (priv) { 866 mlx5_mr_release_cache(&priv->mr_scache); 867 mlx5_compress_hw_global_release(priv); 868 rte_compressdev_pmd_destroy(priv->cdev); 869 claim_zero(mlx5_glue->close_device(priv->ctx)); 870 } 871 return 0; 872 } 873 874 static const struct rte_pci_id mlx5_compress_pci_id_map[] = { 875 { 876 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 877 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 878 }, 879 { 880 .vendor_id = 0 881 } 882 }; 883 884 static struct mlx5_pci_driver mlx5_compress_driver = { 885 .driver_class = MLX5_CLASS_COMPRESS, 886 .pci_driver = { 887 .driver = { 888 .name = RTE_STR(MLX5_COMPRESS_DRIVER_NAME), 889 }, 890 .id_table = mlx5_compress_pci_id_map, 891 .probe = mlx5_compress_pci_probe, 892 .remove = mlx5_compress_pci_remove, 893 .drv_flags = 0, 894 }, 895 }; 896 897 RTE_INIT(rte_mlx5_compress_init) 898 { 899 mlx5_common_init(); 900 if (mlx5_glue != NULL) 901 mlx5_pci_driver_register(&mlx5_compress_driver); 902 } 903 904 RTE_LOG_REGISTER(mlx5_compress_logtype, MLX5_COMPRESS_LOG_NAME, NOTICE) 905 RTE_PMD_EXPORT_NAME(MLX5_COMPRESS_DRIVER_NAME, __COUNTER__); 906 RTE_PMD_REGISTER_PCI_TABLE(MLX5_COMPRESS_DRIVER_NAME, mlx5_compress_pci_id_map); 907 RTE_PMD_REGISTER_KMOD_DEP(MLX5_COMPRESS_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib"); 908