1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2024 Marvell. 3 */ 4 5 #include <rte_compressdev_pmd.h> 6 #include <rte_comp.h> 7 #include <rte_errno.h> 8 #include <rte_malloc.h> 9 10 #include "nitrox_comp.h" 11 #include "nitrox_device.h" 12 #include "nitrox_logs.h" 13 #include "nitrox_comp_reqmgr.h" 14 #include "nitrox_qp.h" 15 16 static const char nitrox_comp_drv_name[] = RTE_STR(COMPRESSDEV_NAME_NITROX_PMD); 17 static const struct rte_driver nitrox_rte_comp_drv = { 18 .name = nitrox_comp_drv_name, 19 .alias = nitrox_comp_drv_name 20 }; 21 22 static int nitrox_comp_queue_pair_release(struct rte_compressdev *dev, 23 uint16_t qp_id); 24 25 static const struct rte_compressdev_capabilities 26 nitrox_comp_pmd_capabilities[] = { 27 { .algo = RTE_COMP_ALGO_DEFLATE, 28 .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED | 29 RTE_COMP_FF_HUFFMAN_DYNAMIC | 30 RTE_COMP_FF_CRC32_CHECKSUM | 31 RTE_COMP_FF_ADLER32_CHECKSUM | 32 RTE_COMP_FF_SHAREABLE_PRIV_XFORM | 33 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | 34 RTE_COMP_FF_OOP_SGL_IN_LB_OUT | 35 RTE_COMP_FF_OOP_LB_IN_SGL_OUT | 36 RTE_COMP_FF_STATEFUL_COMPRESSION | 37 RTE_COMP_FF_STATEFUL_DECOMPRESSION, 38 .window_size = { 39 .min = NITROX_COMP_WINDOW_SIZE_MIN, 40 .max = NITROX_COMP_WINDOW_SIZE_MAX, 41 .increment = 1 42 }, 43 }, 44 RTE_COMP_END_OF_CAPABILITIES_LIST() 45 }; 46 47 static int nitrox_comp_dev_configure(struct rte_compressdev *dev, 48 struct rte_compressdev_config *config) 49 { 50 struct nitrox_comp_device *comp_dev = dev->data->dev_private; 51 struct nitrox_device *ndev = comp_dev->ndev; 52 uint32_t xform_cnt; 53 char name[RTE_MEMPOOL_NAMESIZE]; 54 55 if (config->nb_queue_pairs > ndev->nr_queues) { 56 NITROX_LOG_LINE(ERR, "Invalid queue pairs, max supported %d", 57 ndev->nr_queues); 58 return -EINVAL; 59 } 60 61 xform_cnt = config->max_nb_priv_xforms + config->max_nb_streams; 62 if (unlikely(xform_cnt == 0)) { 63 NITROX_LOG_LINE(ERR, "Invalid configuration with 0 xforms"); 64 return -EINVAL; 65 } 66 67 snprintf(name, sizeof(name), "%s_xform", dev->data->name); 68 comp_dev->xform_pool = rte_mempool_create(name, 69 xform_cnt, sizeof(struct nitrox_comp_xform), 70 0, 0, NULL, NULL, NULL, NULL, 71 config->socket_id, 0); 72 if (comp_dev->xform_pool == NULL) { 73 NITROX_LOG_LINE(ERR, "Failed to create xform pool, err %d", 74 rte_errno); 75 return -rte_errno; 76 } 77 78 return 0; 79 } 80 81 static int nitrox_comp_dev_start(struct rte_compressdev *dev) 82 { 83 RTE_SET_USED(dev); 84 return 0; 85 } 86 87 static void nitrox_comp_dev_stop(struct rte_compressdev *dev) 88 { 89 RTE_SET_USED(dev); 90 } 91 92 static int nitrox_comp_dev_close(struct rte_compressdev *dev) 93 { 94 int i, ret; 95 struct nitrox_comp_device *comp_dev = dev->data->dev_private; 96 97 for (i = 0; i < dev->data->nb_queue_pairs; i++) { 98 ret = nitrox_comp_queue_pair_release(dev, i); 99 if (ret) 100 return ret; 101 } 102 103 rte_mempool_free(comp_dev->xform_pool); 104 comp_dev->xform_pool = NULL; 105 return 0; 106 } 107 108 static void nitrox_comp_stats_get(struct rte_compressdev *dev, 109 struct rte_compressdev_stats *stats) 110 { 111 int qp_id; 112 113 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 114 struct nitrox_qp *qp = dev->data->queue_pairs[qp_id]; 115 116 if (!qp) 117 continue; 118 119 stats->enqueued_count += qp->stats.enqueued_count; 120 stats->dequeued_count += qp->stats.dequeued_count; 121 stats->enqueue_err_count += qp->stats.enqueue_err_count; 122 stats->dequeue_err_count += qp->stats.dequeue_err_count; 123 } 124 } 125 126 static void nitrox_comp_stats_reset(struct rte_compressdev *dev) 127 { 128 int qp_id; 129 130 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 131 struct nitrox_qp *qp = dev->data->queue_pairs[qp_id]; 132 133 if (!qp) 134 continue; 135 136 memset(&qp->stats, 0, sizeof(qp->stats)); 137 } 138 } 139 140 static void nitrox_comp_dev_info_get(struct rte_compressdev *dev, 141 struct rte_compressdev_info *info) 142 { 143 struct nitrox_comp_device *comp_dev = dev->data->dev_private; 144 struct nitrox_device *ndev = comp_dev->ndev; 145 146 if (!info) 147 return; 148 149 info->max_nb_queue_pairs = ndev->nr_queues; 150 info->feature_flags = dev->feature_flags; 151 info->capabilities = nitrox_comp_pmd_capabilities; 152 } 153 154 static int nitrox_comp_queue_pair_setup(struct rte_compressdev *dev, 155 uint16_t qp_id, 156 uint32_t max_inflight_ops, int socket_id) 157 { 158 struct nitrox_comp_device *comp_dev = dev->data->dev_private; 159 struct nitrox_device *ndev = comp_dev->ndev; 160 struct nitrox_qp *qp = NULL; 161 int err; 162 163 NITROX_LOG_LINE(DEBUG, "queue %d", qp_id); 164 if (qp_id >= ndev->nr_queues) { 165 NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d", 166 qp_id, ndev->nr_queues); 167 return -EINVAL; 168 } 169 170 if (dev->data->queue_pairs[qp_id]) { 171 err = nitrox_comp_queue_pair_release(dev, qp_id); 172 if (err) 173 return err; 174 } 175 176 qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp), 177 RTE_CACHE_LINE_SIZE, 178 socket_id); 179 if (!qp) { 180 NITROX_LOG_LINE(ERR, "Failed to allocate nitrox qp"); 181 return -ENOMEM; 182 } 183 184 qp->type = NITROX_QUEUE_ZIP; 185 qp->qno = qp_id; 186 err = nitrox_qp_setup(qp, ndev->bar_addr, dev->data->name, 187 max_inflight_ops, ZIP_INSTR_SIZE, 188 socket_id); 189 if (unlikely(err)) 190 goto qp_setup_err; 191 192 qp->sr_mp = nitrox_comp_req_pool_create(dev, qp->count, qp_id, 193 socket_id); 194 if (unlikely(!qp->sr_mp)) 195 goto req_pool_err; 196 197 dev->data->queue_pairs[qp_id] = qp; 198 NITROX_LOG_LINE(DEBUG, "queue %d setup done", qp_id); 199 return 0; 200 201 req_pool_err: 202 nitrox_qp_release(qp, ndev->bar_addr); 203 qp_setup_err: 204 rte_free(qp); 205 return err; 206 } 207 208 static int nitrox_comp_queue_pair_release(struct rte_compressdev *dev, 209 uint16_t qp_id) 210 { 211 struct nitrox_comp_device *comp_dev = dev->data->dev_private; 212 struct nitrox_device *ndev = comp_dev->ndev; 213 struct nitrox_qp *qp; 214 int err; 215 216 NITROX_LOG_LINE(DEBUG, "queue %d", qp_id); 217 if (qp_id >= ndev->nr_queues) { 218 NITROX_LOG_LINE(ERR, "queue %u invalid, max queues supported %d", 219 qp_id, ndev->nr_queues); 220 return -EINVAL; 221 } 222 223 qp = dev->data->queue_pairs[qp_id]; 224 if (!qp) { 225 NITROX_LOG_LINE(DEBUG, "queue %u already freed", qp_id); 226 return 0; 227 } 228 229 if (!nitrox_qp_is_empty(qp)) { 230 NITROX_LOG_LINE(ERR, "queue %d not empty", qp_id); 231 return -EAGAIN; 232 } 233 234 dev->data->queue_pairs[qp_id] = NULL; 235 err = nitrox_qp_release(qp, ndev->bar_addr); 236 nitrox_comp_req_pool_free(qp->sr_mp); 237 rte_free(qp); 238 NITROX_LOG_LINE(DEBUG, "queue %d release done", qp_id); 239 return err; 240 } 241 242 static int nitrox_comp_private_xform_create(struct rte_compressdev *dev, 243 const struct rte_comp_xform *xform, 244 void **private_xform) 245 { 246 struct nitrox_comp_device *comp_dev = dev->data->dev_private; 247 struct nitrox_comp_xform *nxform; 248 enum rte_comp_checksum_type chksum_type; 249 int ret; 250 251 if (unlikely(comp_dev->xform_pool == NULL)) { 252 NITROX_LOG_LINE(ERR, "private xform pool not yet created"); 253 return -EINVAL; 254 } 255 256 if (rte_mempool_get(comp_dev->xform_pool, private_xform)) { 257 NITROX_LOG_LINE(ERR, "Failed to get from private xform pool"); 258 return -ENOMEM; 259 } 260 261 nxform = (struct nitrox_comp_xform *)*private_xform; 262 memset(nxform, 0, sizeof(*nxform)); 263 if (xform->type == RTE_COMP_COMPRESS) { 264 enum rte_comp_huffman algo; 265 int level; 266 267 nxform->op = NITROX_COMP_OP_COMPRESS; 268 if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) { 269 NITROX_LOG_LINE(ERR, "Only deflate is supported"); 270 ret = -ENOTSUP; 271 goto err_exit; 272 } 273 274 algo = xform->compress.deflate.huffman; 275 if (algo == RTE_COMP_HUFFMAN_DEFAULT) 276 nxform->algo = NITROX_COMP_ALGO_DEFLATE_DEFAULT; 277 else if (algo == RTE_COMP_HUFFMAN_FIXED) 278 nxform->algo = NITROX_COMP_ALGO_DEFLATE_FIXEDHUFF; 279 else if (algo == RTE_COMP_HUFFMAN_DYNAMIC) 280 nxform->algo = NITROX_COMP_ALGO_DEFLATE_DYNHUFF; 281 else { 282 NITROX_LOG_LINE(ERR, "Invalid deflate algorithm %d", algo); 283 ret = -EINVAL; 284 goto err_exit; 285 } 286 287 level = xform->compress.level; 288 if (level == RTE_COMP_LEVEL_PMD_DEFAULT) { 289 nxform->level = NITROX_COMP_LEVEL_MEDIUM; 290 } else if (level >= NITROX_COMP_LEVEL_LOWEST_START && 291 level <= NITROX_COMP_LEVEL_LOWEST_END) { 292 nxform->level = NITROX_COMP_LEVEL_LOWEST; 293 } else if (level >= NITROX_COMP_LEVEL_LOWER_START && 294 level <= NITROX_COMP_LEVEL_LOWER_END) { 295 nxform->level = NITROX_COMP_LEVEL_LOWER; 296 } else if (level >= NITROX_COMP_LEVEL_MEDIUM_START && 297 level <= NITROX_COMP_LEVEL_MEDIUM_END) { 298 nxform->level = NITROX_COMP_LEVEL_MEDIUM; 299 } else if (level >= NITROX_COMP_LEVEL_BEST_START && 300 level <= NITROX_COMP_LEVEL_BEST_END) { 301 nxform->level = NITROX_COMP_LEVEL_BEST; 302 } else { 303 NITROX_LOG_LINE(ERR, "Unsupported compression level %d", 304 xform->compress.level); 305 ret = -ENOTSUP; 306 goto err_exit; 307 } 308 309 chksum_type = xform->compress.chksum; 310 } else if (xform->type == RTE_COMP_DECOMPRESS) { 311 nxform->op = NITROX_COMP_OP_DECOMPRESS; 312 if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) { 313 NITROX_LOG_LINE(ERR, "Only deflate is supported"); 314 ret = -ENOTSUP; 315 goto err_exit; 316 } 317 318 nxform->algo = NITROX_COMP_ALGO_DEFLATE_DEFAULT; 319 nxform->level = NITROX_COMP_LEVEL_BEST; 320 chksum_type = xform->decompress.chksum; 321 } else { 322 ret = -EINVAL; 323 goto err_exit; 324 } 325 326 if (chksum_type == RTE_COMP_CHECKSUM_NONE) 327 nxform->chksum_type = NITROX_CHKSUM_TYPE_NONE; 328 else if (chksum_type == RTE_COMP_CHECKSUM_CRC32) 329 nxform->chksum_type = NITROX_CHKSUM_TYPE_CRC32; 330 else if (chksum_type == RTE_COMP_CHECKSUM_ADLER32) 331 nxform->chksum_type = NITROX_CHKSUM_TYPE_ADLER32; 332 else { 333 NITROX_LOG_LINE(ERR, "Unsupported checksum type %d", 334 chksum_type); 335 ret = -ENOTSUP; 336 goto err_exit; 337 } 338 339 nxform->context = NULL; 340 nxform->history_window = NULL; 341 nxform->window_size = 0; 342 nxform->hlen = 0; 343 nxform->exn = 0; 344 nxform->exbits = 0; 345 nxform->bf = true; 346 return 0; 347 err_exit: 348 memset(nxform, 0, sizeof(*nxform)); 349 rte_mempool_put(comp_dev->xform_pool, nxform); 350 return ret; 351 } 352 353 static int nitrox_comp_private_xform_free(struct rte_compressdev *dev, 354 void *private_xform) 355 { 356 struct nitrox_comp_xform *nxform = private_xform; 357 struct rte_mempool *mp = rte_mempool_from_obj(nxform); 358 359 RTE_SET_USED(dev); 360 if (unlikely(nxform == NULL)) 361 return -EINVAL; 362 363 memset(nxform, 0, sizeof(*nxform)); 364 mp = rte_mempool_from_obj(nxform); 365 rte_mempool_put(mp, nxform); 366 return 0; 367 } 368 369 static int nitrox_comp_stream_free(struct rte_compressdev *dev, void *stream) 370 { 371 struct nitrox_comp_xform *nxform = stream; 372 373 if (unlikely(nxform == NULL)) 374 return -EINVAL; 375 376 rte_free(nxform->history_window); 377 nxform->history_window = NULL; 378 rte_free(nxform->context); 379 nxform->context = NULL; 380 return nitrox_comp_private_xform_free(dev, stream); 381 } 382 383 static int nitrox_comp_stream_create(struct rte_compressdev *dev, 384 const struct rte_comp_xform *xform, void **stream) 385 { 386 int err; 387 struct nitrox_comp_xform *nxform; 388 struct nitrox_comp_device *comp_dev = dev->data->dev_private; 389 390 err = nitrox_comp_private_xform_create(dev, xform, stream); 391 if (unlikely(err)) 392 return err; 393 394 nxform = *stream; 395 if (xform->type == RTE_COMP_COMPRESS) { 396 uint8_t window_size = xform->compress.window_size; 397 398 if (unlikely(window_size < NITROX_COMP_WINDOW_SIZE_MIN || 399 window_size > NITROX_COMP_WINDOW_SIZE_MAX)) { 400 NITROX_LOG_LINE(ERR, "Invalid window size %d", 401 window_size); 402 return -EINVAL; 403 } 404 405 if (window_size == NITROX_COMP_WINDOW_SIZE_MAX) 406 nxform->window_size = NITROX_CONSTANTS_MAX_SEARCH_DEPTH; 407 else 408 nxform->window_size = RTE_BIT32(window_size); 409 } else { 410 nxform->window_size = NITROX_DEFAULT_DEFLATE_SEARCH_DEPTH; 411 } 412 413 nxform->history_window = rte_zmalloc_socket(NULL, nxform->window_size, 414 8, comp_dev->xform_pool->socket_id); 415 if (unlikely(nxform->history_window == NULL)) { 416 err = -ENOMEM; 417 goto err_exit; 418 } 419 420 if (xform->type == RTE_COMP_COMPRESS) 421 return 0; 422 423 nxform->context = rte_zmalloc_socket(NULL, 424 NITROX_DECOMP_CTX_SIZE, 8, 425 comp_dev->xform_pool->socket_id); 426 if (unlikely(nxform->context == NULL)) { 427 err = -ENOMEM; 428 goto err_exit; 429 } 430 431 return 0; 432 err_exit: 433 nitrox_comp_stream_free(dev, *stream); 434 return err; 435 } 436 437 static int nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_comp_op *op) 438 { 439 struct nitrox_softreq *sr; 440 int err; 441 442 if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr))) 443 return -ENOMEM; 444 445 err = nitrox_process_comp_req(op, sr); 446 if (unlikely(err)) { 447 rte_mempool_put(qp->sr_mp, sr); 448 return err; 449 } 450 451 if (op->status == RTE_COMP_OP_STATUS_SUCCESS) 452 err = nitrox_qp_enqueue_sr(qp, sr); 453 else 454 nitrox_qp_enqueue(qp, nitrox_comp_instr_addr(sr), sr); 455 456 return err; 457 } 458 459 static uint16_t nitrox_comp_dev_enq_burst(void *queue_pair, 460 struct rte_comp_op **ops, 461 uint16_t nb_ops) 462 { 463 struct nitrox_qp *qp = queue_pair; 464 uint16_t free_slots = 0; 465 uint16_t cnt = 0; 466 uint16_t dbcnt = 0; 467 bool err = false; 468 469 free_slots = nitrox_qp_free_count(qp); 470 if (nb_ops > free_slots) 471 nb_ops = free_slots; 472 473 for (cnt = 0; cnt < nb_ops; cnt++) { 474 if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) { 475 err = true; 476 break; 477 } 478 479 if (ops[cnt]->status != RTE_COMP_OP_STATUS_SUCCESS) 480 dbcnt++; 481 } 482 483 nitrox_ring_dbell(qp, dbcnt); 484 qp->stats.enqueued_count += cnt; 485 if (unlikely(err)) 486 qp->stats.enqueue_err_count++; 487 488 return cnt; 489 } 490 491 static int nitrox_deq_single_op(struct nitrox_qp *qp, 492 struct rte_comp_op **op_ptr) 493 { 494 struct nitrox_softreq *sr; 495 int err; 496 497 sr = nitrox_qp_get_softreq(qp); 498 err = nitrox_check_comp_req(sr, op_ptr); 499 if (err == -EAGAIN) 500 return err; 501 502 nitrox_qp_dequeue(qp); 503 rte_mempool_put(qp->sr_mp, sr); 504 if (err == 0) 505 qp->stats.dequeued_count++; 506 else 507 qp->stats.dequeue_err_count++; 508 509 return 0; 510 } 511 512 static uint16_t nitrox_comp_dev_deq_burst(void *queue_pair, 513 struct rte_comp_op **ops, 514 uint16_t nb_ops) 515 { 516 struct nitrox_qp *qp = queue_pair; 517 uint16_t filled_slots = nitrox_qp_used_count(qp); 518 int cnt = 0; 519 520 if (nb_ops > filled_slots) 521 nb_ops = filled_slots; 522 523 for (cnt = 0; cnt < nb_ops; cnt++) 524 if (nitrox_deq_single_op(qp, &ops[cnt])) 525 break; 526 527 return cnt; 528 } 529 530 static struct rte_compressdev_ops nitrox_compressdev_ops = { 531 .dev_configure = nitrox_comp_dev_configure, 532 .dev_start = nitrox_comp_dev_start, 533 .dev_stop = nitrox_comp_dev_stop, 534 .dev_close = nitrox_comp_dev_close, 535 536 .stats_get = nitrox_comp_stats_get, 537 .stats_reset = nitrox_comp_stats_reset, 538 539 .dev_infos_get = nitrox_comp_dev_info_get, 540 541 .queue_pair_setup = nitrox_comp_queue_pair_setup, 542 .queue_pair_release = nitrox_comp_queue_pair_release, 543 544 .private_xform_create = nitrox_comp_private_xform_create, 545 .private_xform_free = nitrox_comp_private_xform_free, 546 .stream_create = nitrox_comp_stream_create, 547 .stream_free = nitrox_comp_stream_free, 548 }; 549 550 int 551 nitrox_comp_pmd_create(struct nitrox_device *ndev) 552 { 553 char name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 554 struct rte_compressdev_pmd_init_params init_params = { 555 .name = "", 556 .socket_id = ndev->pdev->device.numa_node, 557 }; 558 struct rte_compressdev *cdev; 559 560 rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name)); 561 snprintf(name + strlen(name), 562 RTE_COMPRESSDEV_NAME_MAX_LEN - strlen(name), 563 "_n5comp"); 564 ndev->rte_comp_dev.driver = &nitrox_rte_comp_drv; 565 ndev->rte_comp_dev.numa_node = ndev->pdev->device.numa_node; 566 ndev->rte_comp_dev.devargs = NULL; 567 cdev = rte_compressdev_pmd_create(name, 568 &ndev->rte_comp_dev, 569 sizeof(struct nitrox_comp_device), 570 &init_params); 571 if (!cdev) { 572 NITROX_LOG_LINE(ERR, "Cryptodev '%s' creation failed", name); 573 return -ENODEV; 574 } 575 576 cdev->dev_ops = &nitrox_compressdev_ops; 577 cdev->enqueue_burst = nitrox_comp_dev_enq_burst; 578 cdev->dequeue_burst = nitrox_comp_dev_deq_burst; 579 cdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 580 581 ndev->comp_dev = cdev->data->dev_private; 582 ndev->comp_dev->cdev = cdev; 583 ndev->comp_dev->ndev = ndev; 584 ndev->comp_dev->xform_pool = NULL; 585 NITROX_LOG_LINE(DEBUG, "Created compressdev '%s', dev_id %d", 586 cdev->data->name, cdev->data->dev_id); 587 return 0; 588 } 589 590 int 591 nitrox_comp_pmd_destroy(struct nitrox_device *ndev) 592 { 593 int err; 594 595 if (ndev->comp_dev == NULL) 596 return 0; 597 598 err = rte_compressdev_pmd_destroy(ndev->comp_dev->cdev); 599 if (err) 600 return err; 601 602 ndev->comp_dev = NULL; 603 return 0; 604 } 605