1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Cavium, Inc 3 */ 4 5 #include <string.h> 6 7 #include <rte_byteorder.h> 8 #include <rte_common.h> 9 #include <rte_cpuflags.h> 10 #include <rte_malloc.h> 11 12 #include "otx_zip.h" 13 14 static const struct rte_compressdev_capabilities 15 octtx_zip_pmd_capabilities[] = { 16 { .algo = RTE_COMP_ALGO_DEFLATE, 17 /* Deflate */ 18 .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED | 19 RTE_COMP_FF_HUFFMAN_DYNAMIC, 20 /* Non sharable Priv XFORM and Stateless */ 21 .window_size = { 22 .min = 1, 23 .max = 14, 24 .increment = 1 25 /* size supported 2^1 to 2^14 */ 26 }, 27 }, 28 RTE_COMP_END_OF_CAPABILITIES_LIST() 29 }; 30 31 /* 32 * Reset session to default state for next set of stateless operation 33 */ 34 static inline void 35 reset_stream(struct zip_stream *z_stream) 36 { 37 union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst); 38 39 inst->s.bf = 1; 40 inst->s.ef = 0; 41 } 42 43 int 44 zip_process_op(struct rte_comp_op *op, 45 struct zipvf_qp *qp, 46 struct zip_stream *zstrm) 47 { 48 union zip_inst_s *inst = zstrm->inst; 49 volatile union zip_zres_s *zresult = NULL; 50 51 52 if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) || 53 (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) || 54 (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) { 55 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 56 ZIP_PMD_ERR("Segmented packet is not supported\n"); 57 return 0; 58 } 59 60 zipvf_prepare_cmd_stateless(op, zstrm); 61 62 zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF]; 63 zresult->s.compcode = 0; 64 65 #ifdef ZIP_DBG 66 zip_dump_instruction(inst); 67 #endif 68 69 /* Submit zip command */ 70 zipvf_push_command(qp, (void *)inst); 71 72 /* Check and Process results in sync mode */ 73 do { 74 } while (!zresult->s.compcode); 75 76 if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) { 77 op->status = RTE_COMP_OP_STATUS_SUCCESS; 78 } else { 79 /* FATAL error cannot do anything */ 80 ZIP_PMD_ERR("operation failed with error code:%d\n", 81 zresult->s.compcode); 82 if (zresult->s.compcode == ZIP_COMP_E_DSTOP) 83 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; 84 else 85 op->status = RTE_COMP_OP_STATUS_ERROR; 86 } 87 88 #ifdef ZIP_DBG 89 ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten); 90 #endif 91 92 /* Update op stats */ 93 switch (op->status) { 94 case RTE_COMP_OP_STATUS_SUCCESS: 95 op->consumed = zresult->s.totalbytesread; 96 /* Fall-through */ 97 case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED: 98 op->produced = zresult->s.totalbyteswritten; 99 break; 100 default: 101 ZIP_PMD_ERR("stats not updated for status:%d\n", 102 op->status); 103 break; 104 } 105 /* zstream is reset irrespective of result */ 106 reset_stream(zstrm); 107 108 zresult->s.compcode = ZIP_COMP_E_NOTDONE; 109 return 0; 110 } 111 112 /** Parse xform parameters and setup a stream */ 113 static int 114 zip_set_stream_parameters(struct rte_compressdev *dev, 115 const struct rte_comp_xform *xform, 116 struct zip_stream *z_stream) 117 { 118 int ret; 119 union zip_inst_s *inst; 120 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private; 121 void *res; 122 123 /* Allocate resources required by a stream */ 124 ret = rte_mempool_get_bulk(vf->zip_mp, 125 z_stream->bufs, MAX_BUFS_PER_STREAM); 126 if (ret < 0) 127 return -1; 128 129 /* get one command buffer from pool and set up */ 130 inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF]; 131 res = z_stream->bufs[RES_BUF]; 132 133 memset(inst->u, 0, sizeof(inst->u)); 134 135 /* set bf for only first ops of stream */ 136 inst->s.bf = 1; 137 138 if (xform->type == RTE_COMP_COMPRESS) { 139 inst->s.op = ZIP_OP_E_COMP; 140 141 switch (xform->compress.deflate.huffman) { 142 case RTE_COMP_HUFFMAN_DEFAULT: 143 inst->s.cc = ZIP_CC_DEFAULT; 144 break; 145 case RTE_COMP_HUFFMAN_FIXED: 146 inst->s.cc = ZIP_CC_FIXED_HUFF; 147 break; 148 case RTE_COMP_HUFFMAN_DYNAMIC: 149 inst->s.cc = ZIP_CC_DYN_HUFF; 150 break; 151 default: 152 ret = -1; 153 goto err; 154 } 155 156 switch (xform->compress.level) { 157 case RTE_COMP_LEVEL_MIN: 158 inst->s.ss = ZIP_COMP_E_LEVEL_MIN; 159 break; 160 case RTE_COMP_LEVEL_MAX: 161 inst->s.ss = ZIP_COMP_E_LEVEL_MAX; 162 break; 163 case RTE_COMP_LEVEL_NONE: 164 ZIP_PMD_ERR("Compression level not supported"); 165 ret = -1; 166 goto err; 167 default: 168 /* for any value between min and max , choose 169 * PMD default. 170 */ 171 inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/ 172 break; 173 } 174 } else if (xform->type == RTE_COMP_DECOMPRESS) { 175 inst->s.op = ZIP_OP_E_DECOMP; 176 /* from HRM, 177 * For DEFLATE decompression, [CC] must be 0x0. 178 * For decompression, [SS] must be 0x0 179 */ 180 inst->s.cc = 0; 181 /* Speed bit should not be set for decompression */ 182 inst->s.ss = 0; 183 /* decompression context is supported only for STATEFUL 184 * operations. Currently we support STATELESS ONLY so 185 * skip setting of ctx pointer 186 */ 187 188 } else { 189 ZIP_PMD_ERR("\nxform type not supported"); 190 ret = -1; 191 goto err; 192 } 193 194 inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res); 195 inst->s.res_ptr_ctl.s.length = 0; 196 197 z_stream->inst = inst; 198 z_stream->func = zip_process_op; 199 200 return 0; 201 202 err: 203 rte_mempool_put_bulk(vf->zip_mp, 204 (void *)&(z_stream->bufs[0]), 205 MAX_BUFS_PER_STREAM); 206 207 return ret; 208 } 209 210 /** Configure device */ 211 static int 212 zip_pmd_config(struct rte_compressdev *dev, 213 struct rte_compressdev_config *config) 214 { 215 int nb_streams; 216 char res_pool[RTE_MEMZONE_NAMESIZE]; 217 struct zip_vf *vf; 218 struct rte_mempool *zip_buf_mp; 219 220 if (!config || !dev) 221 return -EIO; 222 223 vf = (struct zip_vf *)(dev->data->dev_private); 224 225 /* create pool with maximum numbers of resources 226 * required by streams 227 */ 228 229 /* use common pool for non-shareable priv_xform and stream */ 230 nb_streams = config->max_nb_priv_xforms + config->max_nb_streams; 231 232 snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u", 233 dev->data->dev_id); 234 235 /** TBD Should we use the per core object cache for stream resources */ 236 zip_buf_mp = rte_mempool_create( 237 res_pool, 238 nb_streams * MAX_BUFS_PER_STREAM, 239 ZIP_BUF_SIZE, 240 0, 241 0, 242 NULL, 243 NULL, 244 NULL, 245 NULL, 246 SOCKET_ID_ANY, 247 0); 248 249 if (zip_buf_mp == NULL) { 250 ZIP_PMD_ERR( 251 "Failed to create buf mempool octtx_zip_res_pool%u", 252 dev->data->dev_id); 253 return -1; 254 } 255 256 vf->zip_mp = zip_buf_mp; 257 258 return 0; 259 } 260 261 /** Start device */ 262 static int 263 zip_pmd_start(__rte_unused struct rte_compressdev *dev) 264 { 265 return 0; 266 } 267 268 /** Stop device */ 269 static void 270 zip_pmd_stop(__rte_unused struct rte_compressdev *dev) 271 { 272 273 } 274 275 /** Close device */ 276 static int 277 zip_pmd_close(struct rte_compressdev *dev) 278 { 279 if (dev == NULL) 280 return -1; 281 282 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private; 283 rte_mempool_free(vf->zip_mp); 284 285 return 0; 286 } 287 288 /** Get device statistics */ 289 static void 290 zip_pmd_stats_get(struct rte_compressdev *dev, 291 struct rte_compressdev_stats *stats) 292 { 293 int qp_id; 294 295 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 296 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id]; 297 298 stats->enqueued_count += qp->qp_stats.enqueued_count; 299 stats->dequeued_count += qp->qp_stats.dequeued_count; 300 301 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; 302 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; 303 } 304 } 305 306 /** Reset device statistics */ 307 static void 308 zip_pmd_stats_reset(struct rte_compressdev *dev) 309 { 310 int qp_id; 311 312 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { 313 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id]; 314 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); 315 } 316 } 317 318 /** Get device info */ 319 static void 320 zip_pmd_info_get(struct rte_compressdev *dev, 321 struct rte_compressdev_info *dev_info) 322 { 323 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private; 324 325 if (dev_info != NULL) { 326 dev_info->driver_name = dev->device->driver->name; 327 dev_info->feature_flags = dev->feature_flags; 328 dev_info->capabilities = octtx_zip_pmd_capabilities; 329 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs; 330 } 331 } 332 333 /** Release queue pair */ 334 static int 335 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id) 336 { 337 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id]; 338 339 if (qp != NULL) { 340 zipvf_q_term(qp); 341 342 rte_ring_free(qp->processed_pkts); 343 344 rte_free(qp); 345 dev->data->queue_pairs[qp_id] = NULL; 346 } 347 return 0; 348 } 349 350 /** Create a ring to place process packets on */ 351 static struct rte_ring * 352 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp, 353 unsigned int ring_size, int socket_id) 354 { 355 struct rte_ring *r; 356 357 r = rte_ring_lookup(qp->name); 358 if (r) { 359 if (rte_ring_get_size(r) >= ring_size) { 360 ZIP_PMD_INFO("Reusing existing ring %s for processed" 361 " packets", qp->name); 362 return r; 363 } 364 365 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed" 366 " packets", qp->name); 367 return NULL; 368 } 369 370 return rte_ring_create(qp->name, ring_size, socket_id, 371 RING_F_EXACT_SZ); 372 } 373 374 /** Setup a queue pair */ 375 static int 376 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, 377 uint32_t max_inflight_ops, int socket_id) 378 { 379 struct zipvf_qp *qp = NULL; 380 struct zip_vf *vf; 381 char *name; 382 int ret; 383 384 if (!dev) 385 return -1; 386 387 vf = (struct zip_vf *) (dev->data->dev_private); 388 389 /* Free memory prior to re-allocation if needed. */ 390 if (dev->data->queue_pairs[qp_id] != NULL) { 391 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id); 392 return 0; 393 } 394 395 name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0); 396 if (name == NULL) 397 return (-ENOMEM); 398 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, 399 "zip_pmd_%u_qp_%u", 400 dev->data->dev_id, qp_id); 401 402 /* Allocate the queue pair data structure. */ 403 qp = rte_zmalloc_socket(name, sizeof(*qp), 404 RTE_CACHE_LINE_SIZE, socket_id); 405 if (qp == NULL) { 406 rte_free(name); 407 return (-ENOMEM); 408 } 409 410 qp->name = name; 411 412 /* Create completion queue up to max_inflight_ops */ 413 qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp, 414 max_inflight_ops, socket_id); 415 if (qp->processed_pkts == NULL) 416 goto qp_setup_cleanup; 417 418 qp->id = qp_id; 419 qp->vf = vf; 420 421 ret = zipvf_q_init(qp); 422 if (ret < 0) 423 goto qp_setup_cleanup; 424 425 dev->data->queue_pairs[qp_id] = qp; 426 427 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); 428 return 0; 429 430 qp_setup_cleanup: 431 rte_ring_free(qp->processed_pkts); 432 rte_free(qp); 433 return -1; 434 } 435 436 static int 437 zip_pmd_stream_create(struct rte_compressdev *dev, 438 const struct rte_comp_xform *xform, void **stream) 439 { 440 int ret; 441 struct zip_stream *strm = NULL; 442 443 strm = rte_malloc(NULL, 444 sizeof(struct zip_stream), 0); 445 446 if (strm == NULL) 447 return (-ENOMEM); 448 449 ret = zip_set_stream_parameters(dev, xform, strm); 450 if (ret < 0) { 451 ZIP_PMD_ERR("failed configure xform parameters"); 452 rte_free(strm); 453 return ret; 454 } 455 *stream = strm; 456 return 0; 457 } 458 459 static int 460 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream) 461 { 462 struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private); 463 struct zip_stream *z_stream; 464 465 if (stream == NULL) 466 return 0; 467 468 z_stream = (struct zip_stream *)stream; 469 470 /* Free resources back to pool */ 471 rte_mempool_put_bulk(vf->zip_mp, 472 (void *)&(z_stream->bufs[0]), 473 MAX_BUFS_PER_STREAM); 474 475 /* Zero out the whole structure */ 476 memset(stream, 0, sizeof(struct zip_stream)); 477 rte_free(stream); 478 479 return 0; 480 } 481 482 483 static uint16_t 484 zip_pmd_enqueue_burst_sync(void *queue_pair, 485 struct rte_comp_op **ops, uint16_t nb_ops) 486 { 487 struct zipvf_qp *qp = queue_pair; 488 struct rte_comp_op *op; 489 struct zip_stream *zstrm; 490 int i, ret = 0; 491 uint16_t enqd = 0; 492 493 for (i = 0; i < nb_ops; i++) { 494 op = ops[i]; 495 496 if (op->op_type == RTE_COMP_OP_STATEFUL) { 497 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 498 } else { 499 /* process stateless ops */ 500 zstrm = (struct zip_stream *)op->private_xform; 501 if (unlikely(zstrm == NULL)) 502 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; 503 else 504 ret = zstrm->func(op, qp, zstrm); 505 } 506 507 /* Whatever is out of op, put it into completion queue with 508 * its status 509 */ 510 if (!ret) 511 ret = rte_ring_enqueue(qp->processed_pkts, (void *)op); 512 513 if (unlikely(ret < 0)) { 514 /* increment count if failed to enqueue op*/ 515 qp->qp_stats.enqueue_err_count++; 516 } else { 517 qp->qp_stats.enqueued_count++; 518 enqd++; 519 } 520 } 521 return enqd; 522 } 523 524 static uint16_t 525 zip_pmd_dequeue_burst_sync(void *queue_pair, 526 struct rte_comp_op **ops, uint16_t nb_ops) 527 { 528 struct zipvf_qp *qp = queue_pair; 529 530 unsigned int nb_dequeued = 0; 531 532 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, 533 (void **)ops, nb_ops, NULL); 534 qp->qp_stats.dequeued_count += nb_dequeued; 535 536 return nb_dequeued; 537 } 538 539 static struct rte_compressdev_ops octtx_zip_pmd_ops = { 540 .dev_configure = zip_pmd_config, 541 .dev_start = zip_pmd_start, 542 .dev_stop = zip_pmd_stop, 543 .dev_close = zip_pmd_close, 544 545 .stats_get = zip_pmd_stats_get, 546 .stats_reset = zip_pmd_stats_reset, 547 548 .dev_infos_get = zip_pmd_info_get, 549 550 .queue_pair_setup = zip_pmd_qp_setup, 551 .queue_pair_release = zip_pmd_qp_release, 552 553 .private_xform_create = zip_pmd_stream_create, 554 .private_xform_free = zip_pmd_stream_free, 555 .stream_create = NULL, 556 .stream_free = NULL 557 }; 558 559 static int 560 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 561 struct rte_pci_device *pci_dev) 562 { 563 int ret = 0; 564 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 565 struct rte_compressdev *compressdev; 566 struct rte_compressdev_pmd_init_params init_params = { 567 "", 568 rte_socket_id(), 569 }; 570 571 ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x", 572 (unsigned int)pci_dev->id.vendor_id, 573 (unsigned int)pci_dev->id.device_id); 574 575 rte_pci_device_name(&pci_dev->addr, compressdev_name, 576 sizeof(compressdev_name)); 577 578 compressdev = rte_compressdev_pmd_create(compressdev_name, 579 &pci_dev->device, sizeof(struct zip_vf), &init_params); 580 if (compressdev == NULL) { 581 ZIP_PMD_ERR("driver %s: create failed", init_params.name); 582 return -ENODEV; 583 } 584 585 /* 586 * create only if proc_type is primary. 587 */ 588 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 589 /* create vf dev with given pmd dev id */ 590 ret = zipvf_create(compressdev); 591 if (ret < 0) { 592 ZIP_PMD_ERR("Device creation failed"); 593 rte_compressdev_pmd_destroy(compressdev); 594 return ret; 595 } 596 } 597 598 compressdev->dev_ops = &octtx_zip_pmd_ops; 599 /* register rx/tx burst functions for data path */ 600 compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync; 601 compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync; 602 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED; 603 return ret; 604 } 605 606 static int 607 zip_pci_remove(struct rte_pci_device *pci_dev) 608 { 609 struct rte_compressdev *compressdev; 610 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; 611 612 if (pci_dev == NULL) { 613 ZIP_PMD_ERR(" Invalid PCI Device\n"); 614 return -EINVAL; 615 } 616 rte_pci_device_name(&pci_dev->addr, compressdev_name, 617 sizeof(compressdev_name)); 618 619 compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name); 620 if (compressdev == NULL) 621 return -ENODEV; 622 623 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 624 if (zipvf_destroy(compressdev) < 0) 625 return -ENODEV; 626 } 627 return rte_compressdev_pmd_destroy(compressdev); 628 } 629 630 static struct rte_pci_id pci_id_octtx_zipvf_table[] = { 631 { 632 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 633 PCI_DEVICE_ID_OCTEONTX_ZIPVF), 634 }, 635 { 636 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 637 PCI_DEVICE_ID_OCTEONTX2_ZIPVF), 638 }, 639 { 640 .device_id = 0 641 }, 642 }; 643 644 /** 645 * Structure that represents a PCI driver 646 */ 647 static struct rte_pci_driver octtx_zip_pmd = { 648 .id_table = pci_id_octtx_zipvf_table, 649 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 650 .probe = zip_pci_probe, 651 .remove = zip_pci_remove, 652 }; 653 654 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd); 655 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table); 656 RTE_LOG_REGISTER_DEFAULT(octtx_zip_logtype_driver, INFO); 657