105e7a8a8SJim Harris /* SPDX-License-Identifier: BSD-3-Clause 205e7a8a8SJim Harris * Copyright (C) 2018 Intel Corporation. 305e7a8a8SJim Harris * All rights reserved. 4ee020824SAlexey Marchuk * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 505e7a8a8SJim Harris */ 605e7a8a8SJim Harris 7976f8b09Spaul luse #include "accel_dpdk_compressdev.h" 85d2d59beSKonrad Sztyber #include "spdk/accel_module.h" 905e7a8a8SJim Harris 1005e7a8a8SJim Harris #include "spdk/stdinc.h" 1105e7a8a8SJim Harris #include "spdk/rpc.h" 1205e7a8a8SJim Harris #include "spdk/env.h" 1305e7a8a8SJim Harris #include "spdk/endian.h" 1405e7a8a8SJim Harris #include "spdk/string.h" 1505e7a8a8SJim Harris #include "spdk/thread.h" 1605e7a8a8SJim Harris #include "spdk/util.h" 1705e7a8a8SJim Harris #include "spdk/likely.h" 1805e7a8a8SJim Harris 1905e7a8a8SJim Harris #include "spdk/log.h" 2005e7a8a8SJim Harris 2105e7a8a8SJim Harris #include <rte_config.h> 2205e7a8a8SJim Harris #include <rte_bus_vdev.h> 2305e7a8a8SJim Harris #include <rte_compressdev.h> 2405e7a8a8SJim Harris #include <rte_comp.h> 2505e7a8a8SJim Harris #include <rte_mbuf_dyn.h> 2605e7a8a8SJim Harris 2705e7a8a8SJim Harris /* Used to store IO context in mbuf */ 2805e7a8a8SJim Harris static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = { 29976f8b09Spaul luse .name = "context_accel_comp", 3005e7a8a8SJim Harris .size = sizeof(uint64_t), 3105e7a8a8SJim Harris .align = __alignof__(uint64_t), 3205e7a8a8SJim Harris .flags = 0, 3305e7a8a8SJim Harris }; 3405e7a8a8SJim Harris static int g_mbuf_offset; 35976f8b09Spaul luse static enum compress_pmd g_opts; 36976f8b09Spaul luse static bool g_compressdev_enable = false; 37976f8b09Spaul luse static bool g_compressdev_initialized = false; 3805e7a8a8SJim Harris 3905e7a8a8SJim Harris #define NUM_MAX_XFORMS 2 4005e7a8a8SJim Harris #define NUM_MAX_INFLIGHT_OPS 128 4105e7a8a8SJim Harris #define DEFAULT_WINDOW_SIZE 15 42976f8b09Spaul luse #define MBUF_SPLIT (1UL << DEFAULT_WINDOW_SIZE) 4305e7a8a8SJim Harris #define QAT_PMD "compress_qat" 4405e7a8a8SJim Harris #define MLX5_PMD "mlx5_pci" 452186fc03SZhangfei Gao #define UADK_PMD "compress_uadk" 46976f8b09Spaul luse #define NUM_MBUFS 65536 4705e7a8a8SJim Harris #define POOL_CACHE_SIZE 256 4805e7a8a8SJim Harris 4905e7a8a8SJim Harris /* Global list of available compression devices. */ 5005e7a8a8SJim Harris struct compress_dev { 5105e7a8a8SJim Harris struct rte_compressdev_info cdev_info; /* includes device friendly name */ 5205e7a8a8SJim Harris uint8_t cdev_id; /* identifier for the device */ 5305e7a8a8SJim Harris void *comp_xform; /* shared private xform for comp on this PMD */ 5405e7a8a8SJim Harris void *decomp_xform; /* shared private xform for decomp on this PMD */ 55976f8b09Spaul luse bool sgl_in; 56976f8b09Spaul luse bool sgl_out; 5705e7a8a8SJim Harris TAILQ_ENTRY(compress_dev) link; 5805e7a8a8SJim Harris }; 5905e7a8a8SJim Harris static TAILQ_HEAD(, compress_dev) g_compress_devs = TAILQ_HEAD_INITIALIZER(g_compress_devs); 6005e7a8a8SJim Harris 6105e7a8a8SJim Harris #define MAX_NUM_QP 48 6205e7a8a8SJim Harris /* Global list and lock for unique device/queue pair combos */ 6305e7a8a8SJim Harris struct comp_device_qp { 6405e7a8a8SJim Harris struct compress_dev *device; /* ptr to compression device */ 6505e7a8a8SJim Harris uint8_t qp; /* queue pair for this node */ 668222c5baSpaul luse struct compress_io_channel *chan; 6705e7a8a8SJim Harris TAILQ_ENTRY(comp_device_qp) link; 6805e7a8a8SJim Harris }; 6905e7a8a8SJim Harris static TAILQ_HEAD(, comp_device_qp) g_comp_device_qp = TAILQ_HEAD_INITIALIZER(g_comp_device_qp); 7005e7a8a8SJim Harris static pthread_mutex_t g_comp_device_qp_lock = PTHREAD_MUTEX_INITIALIZER; 7105e7a8a8SJim Harris 72976f8b09Spaul luse struct compress_io_channel { 7305e7a8a8SJim Harris char *drv_name; /* name of the compression device driver */ 7405e7a8a8SJim Harris struct comp_device_qp *device_qp; 75976f8b09Spaul luse struct spdk_poller *poller; 76976f8b09Spaul luse struct rte_mbuf **src_mbufs; 77976f8b09Spaul luse struct rte_mbuf **dst_mbufs; 78ee020824SAlexey Marchuk STAILQ_HEAD(, spdk_accel_task) queued_tasks; 7905e7a8a8SJim Harris }; 8005e7a8a8SJim Harris 8105e7a8a8SJim Harris /* Shared mempools between all devices on this system */ 8205e7a8a8SJim Harris static struct rte_mempool *g_mbuf_mp = NULL; /* mbuf mempool */ 8305e7a8a8SJim Harris static struct rte_mempool *g_comp_op_mp = NULL; /* comp operations, must be rte* mempool */ 8405e7a8a8SJim Harris static struct rte_mbuf_ext_shared_info g_shinfo = {}; /* used by DPDK mbuf macros */ 8505e7a8a8SJim Harris static bool g_qat_available = false; 8605e7a8a8SJim Harris static bool g_mlx5_pci_available = false; 872186fc03SZhangfei Gao static bool g_uadk_available = false; 8805e7a8a8SJim Harris 8905e7a8a8SJim Harris /* Create shared (between all ops per PMD) compress xforms. */ 9005e7a8a8SJim Harris static struct rte_comp_xform g_comp_xform = { 9105e7a8a8SJim Harris .type = RTE_COMP_COMPRESS, 9205e7a8a8SJim Harris .compress = { 9305e7a8a8SJim Harris .algo = RTE_COMP_ALGO_DEFLATE, 9405e7a8a8SJim Harris .deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT, 9505e7a8a8SJim Harris .level = RTE_COMP_LEVEL_MAX, 9605e7a8a8SJim Harris .window_size = DEFAULT_WINDOW_SIZE, 9705e7a8a8SJim Harris .chksum = RTE_COMP_CHECKSUM_NONE, 9805e7a8a8SJim Harris .hash_algo = RTE_COMP_HASH_ALGO_NONE 9905e7a8a8SJim Harris } 10005e7a8a8SJim Harris }; 10105e7a8a8SJim Harris /* Create shared (between all ops per PMD) decompress xforms. */ 10205e7a8a8SJim Harris static struct rte_comp_xform g_decomp_xform = { 10305e7a8a8SJim Harris .type = RTE_COMP_DECOMPRESS, 10405e7a8a8SJim Harris .decompress = { 10505e7a8a8SJim Harris .algo = RTE_COMP_ALGO_DEFLATE, 10605e7a8a8SJim Harris .chksum = RTE_COMP_CHECKSUM_NONE, 10705e7a8a8SJim Harris .window_size = DEFAULT_WINDOW_SIZE, 10805e7a8a8SJim Harris .hash_algo = RTE_COMP_HASH_ALGO_NONE 10905e7a8a8SJim Harris } 11005e7a8a8SJim Harris }; 11105e7a8a8SJim Harris 11205e7a8a8SJim Harris /* Dummy function used by DPDK to free ext attached buffers 11305e7a8a8SJim Harris * to mbufs, we free them ourselves but this callback has to 11405e7a8a8SJim Harris * be here. 11505e7a8a8SJim Harris */ 11605e7a8a8SJim Harris static void 11705e7a8a8SJim Harris shinfo_free_cb(void *arg1, void *arg2) 11805e7a8a8SJim Harris { 11905e7a8a8SJim Harris } 12005e7a8a8SJim Harris 121976f8b09Spaul luse /* Called by accel_init_compress_drivers() to init each discovered compression device */ 12205e7a8a8SJim Harris static int 12305e7a8a8SJim Harris create_compress_dev(uint8_t index) 12405e7a8a8SJim Harris { 12505e7a8a8SJim Harris struct compress_dev *device; 12605e7a8a8SJim Harris uint16_t q_pairs; 12705e7a8a8SJim Harris uint8_t cdev_id; 12805e7a8a8SJim Harris int rc, i; 12905e7a8a8SJim Harris struct comp_device_qp *dev_qp; 13005e7a8a8SJim Harris struct comp_device_qp *tmp_qp; 13105e7a8a8SJim Harris 13205e7a8a8SJim Harris device = calloc(1, sizeof(struct compress_dev)); 13305e7a8a8SJim Harris if (!device) { 13405e7a8a8SJim Harris return -ENOMEM; 13505e7a8a8SJim Harris } 13605e7a8a8SJim Harris 13705e7a8a8SJim Harris /* Get details about this device. */ 13805e7a8a8SJim Harris rte_compressdev_info_get(index, &device->cdev_info); 13905e7a8a8SJim Harris 14005e7a8a8SJim Harris cdev_id = device->cdev_id = index; 14105e7a8a8SJim Harris 14205e7a8a8SJim Harris /* Zero means no limit so choose number of lcores. */ 14305e7a8a8SJim Harris if (device->cdev_info.max_nb_queue_pairs == 0) { 14405e7a8a8SJim Harris q_pairs = MAX_NUM_QP; 14505e7a8a8SJim Harris } else { 14605e7a8a8SJim Harris q_pairs = spdk_min(device->cdev_info.max_nb_queue_pairs, MAX_NUM_QP); 14705e7a8a8SJim Harris } 14805e7a8a8SJim Harris 14905e7a8a8SJim Harris /* Configure the compression device. */ 15005e7a8a8SJim Harris struct rte_compressdev_config config = { 15105e7a8a8SJim Harris .socket_id = rte_socket_id(), 15205e7a8a8SJim Harris .nb_queue_pairs = q_pairs, 15305e7a8a8SJim Harris .max_nb_priv_xforms = NUM_MAX_XFORMS, 15405e7a8a8SJim Harris .max_nb_streams = 0 15505e7a8a8SJim Harris }; 15605e7a8a8SJim Harris rc = rte_compressdev_configure(cdev_id, &config); 15705e7a8a8SJim Harris if (rc < 0) { 15805e7a8a8SJim Harris SPDK_ERRLOG("Failed to configure compressdev %u\n", cdev_id); 1592828b121SZhangfei Gao goto err_close; 16005e7a8a8SJim Harris } 16105e7a8a8SJim Harris 16205e7a8a8SJim Harris /* Pre-setup all potential qpairs now and assign them in the channel 16305e7a8a8SJim Harris * callback. 16405e7a8a8SJim Harris */ 16505e7a8a8SJim Harris for (i = 0; i < q_pairs; i++) { 16605e7a8a8SJim Harris rc = rte_compressdev_queue_pair_setup(cdev_id, i, 16705e7a8a8SJim Harris NUM_MAX_INFLIGHT_OPS, 16805e7a8a8SJim Harris rte_socket_id()); 16905e7a8a8SJim Harris if (rc) { 17005e7a8a8SJim Harris if (i > 0) { 17105e7a8a8SJim Harris q_pairs = i; 17205e7a8a8SJim Harris SPDK_NOTICELOG("FYI failed to setup a queue pair on " 17305e7a8a8SJim Harris "compressdev %u with error %u " 17405e7a8a8SJim Harris "so limiting to %u qpairs\n", 17505e7a8a8SJim Harris cdev_id, rc, q_pairs); 17605e7a8a8SJim Harris break; 17705e7a8a8SJim Harris } else { 17805e7a8a8SJim Harris SPDK_ERRLOG("Failed to setup queue pair on " 17905e7a8a8SJim Harris "compressdev %u with error %u\n", cdev_id, rc); 18005e7a8a8SJim Harris rc = -EINVAL; 1812828b121SZhangfei Gao goto err_close; 18205e7a8a8SJim Harris } 18305e7a8a8SJim Harris } 18405e7a8a8SJim Harris } 18505e7a8a8SJim Harris 18605e7a8a8SJim Harris rc = rte_compressdev_start(cdev_id); 18705e7a8a8SJim Harris if (rc < 0) { 18805e7a8a8SJim Harris SPDK_ERRLOG("Failed to start device %u: error %d\n", 18905e7a8a8SJim Harris cdev_id, rc); 1902828b121SZhangfei Gao goto err_close; 19105e7a8a8SJim Harris } 19205e7a8a8SJim Harris 19305e7a8a8SJim Harris if (device->cdev_info.capabilities->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) { 19405e7a8a8SJim Harris rc = rte_compressdev_private_xform_create(cdev_id, &g_comp_xform, 19505e7a8a8SJim Harris &device->comp_xform); 19605e7a8a8SJim Harris if (rc < 0) { 19705e7a8a8SJim Harris SPDK_ERRLOG("Failed to create private comp xform device %u: error %d\n", 19805e7a8a8SJim Harris cdev_id, rc); 1992828b121SZhangfei Gao goto err_stop; 20005e7a8a8SJim Harris } 20105e7a8a8SJim Harris 20205e7a8a8SJim Harris rc = rte_compressdev_private_xform_create(cdev_id, &g_decomp_xform, 20305e7a8a8SJim Harris &device->decomp_xform); 20405e7a8a8SJim Harris if (rc) { 20505e7a8a8SJim Harris SPDK_ERRLOG("Failed to create private decomp xform device %u: error %d\n", 20605e7a8a8SJim Harris cdev_id, rc); 2072828b121SZhangfei Gao goto err_stop; 20805e7a8a8SJim Harris } 20905e7a8a8SJim Harris } else { 21005e7a8a8SJim Harris SPDK_ERRLOG("PMD does not support shared transforms\n"); 2112828b121SZhangfei Gao goto err_stop; 21205e7a8a8SJim Harris } 21305e7a8a8SJim Harris 21405e7a8a8SJim Harris /* Build up list of device/qp combinations */ 21505e7a8a8SJim Harris for (i = 0; i < q_pairs; i++) { 21605e7a8a8SJim Harris dev_qp = calloc(1, sizeof(struct comp_device_qp)); 21705e7a8a8SJim Harris if (!dev_qp) { 21805e7a8a8SJim Harris rc = -ENOMEM; 2192828b121SZhangfei Gao goto err_qp; 22005e7a8a8SJim Harris } 22105e7a8a8SJim Harris dev_qp->device = device; 22205e7a8a8SJim Harris dev_qp->qp = i; 2238222c5baSpaul luse dev_qp->chan = NULL; 22405e7a8a8SJim Harris TAILQ_INSERT_TAIL(&g_comp_device_qp, dev_qp, link); 22505e7a8a8SJim Harris } 22605e7a8a8SJim Harris 22705e7a8a8SJim Harris TAILQ_INSERT_TAIL(&g_compress_devs, device, link); 22805e7a8a8SJim Harris 22905e7a8a8SJim Harris if (strcmp(device->cdev_info.driver_name, QAT_PMD) == 0) { 23005e7a8a8SJim Harris g_qat_available = true; 23105e7a8a8SJim Harris } 232976f8b09Spaul luse 23305e7a8a8SJim Harris if (strcmp(device->cdev_info.driver_name, MLX5_PMD) == 0) { 23405e7a8a8SJim Harris g_mlx5_pci_available = true; 23505e7a8a8SJim Harris } 23605e7a8a8SJim Harris 2372186fc03SZhangfei Gao if (strcmp(device->cdev_info.driver_name, UADK_PMD) == 0) { 2382186fc03SZhangfei Gao g_uadk_available = true; 2392186fc03SZhangfei Gao } 2402186fc03SZhangfei Gao 24105e7a8a8SJim Harris return 0; 24205e7a8a8SJim Harris 2432828b121SZhangfei Gao err_qp: 24405e7a8a8SJim Harris TAILQ_FOREACH_SAFE(dev_qp, &g_comp_device_qp, link, tmp_qp) { 24505e7a8a8SJim Harris TAILQ_REMOVE(&g_comp_device_qp, dev_qp, link); 24605e7a8a8SJim Harris free(dev_qp); 24705e7a8a8SJim Harris } 2482828b121SZhangfei Gao err_stop: 2492828b121SZhangfei Gao rte_compressdev_stop(cdev_id); 2502828b121SZhangfei Gao err_close: 2512828b121SZhangfei Gao rte_compressdev_close(cdev_id); 25205e7a8a8SJim Harris free(device); 25305e7a8a8SJim Harris return rc; 25405e7a8a8SJim Harris } 25505e7a8a8SJim Harris 256976f8b09Spaul luse /* Called from driver init entry point, accel_compress_init() */ 25705e7a8a8SJim Harris static int 258976f8b09Spaul luse accel_init_compress_drivers(void) 25905e7a8a8SJim Harris { 26005e7a8a8SJim Harris uint8_t cdev_count, i; 26105e7a8a8SJim Harris struct compress_dev *tmp_dev; 26205e7a8a8SJim Harris struct compress_dev *device; 26305e7a8a8SJim Harris int rc; 26405e7a8a8SJim Harris 2651f88c365STomasz Zawadzki /* If we have no compression devices, report error to fallback on other modules. */ 26605e7a8a8SJim Harris cdev_count = rte_compressdev_count(); 26705e7a8a8SJim Harris if (cdev_count == 0) { 2681f88c365STomasz Zawadzki return -ENODEV; 26905e7a8a8SJim Harris } 27005e7a8a8SJim Harris if (cdev_count > RTE_COMPRESS_MAX_DEVS) { 27105e7a8a8SJim Harris SPDK_ERRLOG("invalid device count from rte_compressdev_count()\n"); 27205e7a8a8SJim Harris return -EINVAL; 27305e7a8a8SJim Harris } 27405e7a8a8SJim Harris 27505e7a8a8SJim Harris g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context); 27605e7a8a8SJim Harris if (g_mbuf_offset < 0) { 27705e7a8a8SJim Harris SPDK_ERRLOG("error registering dynamic field with DPDK\n"); 27805e7a8a8SJim Harris return -EINVAL; 27905e7a8a8SJim Harris } 28005e7a8a8SJim Harris 28105e7a8a8SJim Harris g_mbuf_mp = rte_pktmbuf_pool_create("comp_mbuf_mp", NUM_MBUFS, POOL_CACHE_SIZE, 28205e7a8a8SJim Harris sizeof(struct rte_mbuf), 0, rte_socket_id()); 28305e7a8a8SJim Harris if (g_mbuf_mp == NULL) { 28405e7a8a8SJim Harris SPDK_ERRLOG("Cannot create mbuf pool\n"); 28505e7a8a8SJim Harris rc = -ENOMEM; 28605e7a8a8SJim Harris goto error_create_mbuf; 28705e7a8a8SJim Harris } 28805e7a8a8SJim Harris 28905e7a8a8SJim Harris g_comp_op_mp = rte_comp_op_pool_create("comp_op_pool", NUM_MBUFS, POOL_CACHE_SIZE, 29005e7a8a8SJim Harris 0, rte_socket_id()); 29105e7a8a8SJim Harris if (g_comp_op_mp == NULL) { 29205e7a8a8SJim Harris SPDK_ERRLOG("Cannot create comp op pool\n"); 29305e7a8a8SJim Harris rc = -ENOMEM; 29405e7a8a8SJim Harris goto error_create_op; 29505e7a8a8SJim Harris } 29605e7a8a8SJim Harris 29705e7a8a8SJim Harris /* Init all devices */ 29805e7a8a8SJim Harris for (i = 0; i < cdev_count; i++) { 29905e7a8a8SJim Harris rc = create_compress_dev(i); 30005e7a8a8SJim Harris if (rc != 0) { 30105e7a8a8SJim Harris goto error_create_compress_devs; 30205e7a8a8SJim Harris } 30305e7a8a8SJim Harris } 30405e7a8a8SJim Harris 30505e7a8a8SJim Harris if (g_qat_available == true) { 30605e7a8a8SJim Harris SPDK_NOTICELOG("initialized QAT PMD\n"); 30705e7a8a8SJim Harris } 30805e7a8a8SJim Harris 30905e7a8a8SJim Harris g_shinfo.free_cb = shinfo_free_cb; 31005e7a8a8SJim Harris 31105e7a8a8SJim Harris return 0; 31205e7a8a8SJim Harris 31305e7a8a8SJim Harris /* Error cleanup paths. */ 31405e7a8a8SJim Harris error_create_compress_devs: 31505e7a8a8SJim Harris TAILQ_FOREACH_SAFE(device, &g_compress_devs, link, tmp_dev) { 31605e7a8a8SJim Harris TAILQ_REMOVE(&g_compress_devs, device, link); 31705e7a8a8SJim Harris free(device); 31805e7a8a8SJim Harris } 31905e7a8a8SJim Harris error_create_op: 32005e7a8a8SJim Harris error_create_mbuf: 32105e7a8a8SJim Harris rte_mempool_free(g_mbuf_mp); 32205e7a8a8SJim Harris 32305e7a8a8SJim Harris return rc; 32405e7a8a8SJim Harris } 32505e7a8a8SJim Harris 326976f8b09Spaul luse int 327976f8b09Spaul luse accel_compressdev_enable_probe(enum compress_pmd *opts) 32805e7a8a8SJim Harris { 329976f8b09Spaul luse g_opts = *opts; 330976f8b09Spaul luse g_compressdev_enable = true; 33105e7a8a8SJim Harris 332976f8b09Spaul luse return 0; 33305e7a8a8SJim Harris } 33405e7a8a8SJim Harris 33505e7a8a8SJim Harris static int 33605e7a8a8SJim Harris _setup_compress_mbuf(struct rte_mbuf **mbufs, int *mbuf_total, uint64_t *total_length, 337976f8b09Spaul luse struct iovec *iovs, int iovcnt, struct spdk_accel_task *task) 33805e7a8a8SJim Harris { 339976f8b09Spaul luse uint64_t iovec_length, updated_length, phys_addr; 340976f8b09Spaul luse uint64_t processed, mbuf_length, remainder; 34105e7a8a8SJim Harris uint8_t *current_base = NULL; 34205e7a8a8SJim Harris int iov_index, mbuf_index; 34305e7a8a8SJim Harris int rc = 0; 34405e7a8a8SJim Harris 34505e7a8a8SJim Harris /* Setup mbufs */ 34605e7a8a8SJim Harris iov_index = mbuf_index = 0; 34705e7a8a8SJim Harris while (iov_index < iovcnt) { 34805e7a8a8SJim Harris 349976f8b09Spaul luse processed = 0; 350976f8b09Spaul luse iovec_length = iovs[iov_index].iov_len; 351976f8b09Spaul luse 35205e7a8a8SJim Harris current_base = iovs[iov_index].iov_base; 35305e7a8a8SJim Harris if (total_length) { 354976f8b09Spaul luse *total_length += iovec_length; 35505e7a8a8SJim Harris } 356976f8b09Spaul luse 35705e7a8a8SJim Harris assert(mbufs[mbuf_index] != NULL); 358976f8b09Spaul luse *RTE_MBUF_DYNFIELD(mbufs[mbuf_index], g_mbuf_offset, uint64_t *) = (uint64_t)task; 359976f8b09Spaul luse 360976f8b09Spaul luse do { 361976f8b09Spaul luse /* new length is min of remaining left or max mbuf size of MBUF_SPLIT */ 362976f8b09Spaul luse mbuf_length = updated_length = spdk_min(MBUF_SPLIT, iovec_length - processed); 363976f8b09Spaul luse 36405e7a8a8SJim Harris phys_addr = spdk_vtophys((void *)current_base, &updated_length); 36505e7a8a8SJim Harris 36605e7a8a8SJim Harris rte_pktmbuf_attach_extbuf(mbufs[mbuf_index], 36705e7a8a8SJim Harris current_base, 36805e7a8a8SJim Harris phys_addr, 36905e7a8a8SJim Harris updated_length, 37005e7a8a8SJim Harris &g_shinfo); 37105e7a8a8SJim Harris rte_pktmbuf_append(mbufs[mbuf_index], updated_length); 372976f8b09Spaul luse remainder = mbuf_length - updated_length; 37305e7a8a8SJim Harris 374976f8b09Spaul luse /* although the mbufs were preallocated, we still need to chain them */ 37505e7a8a8SJim Harris if (mbuf_index > 0) { 37605e7a8a8SJim Harris rte_pktmbuf_chain(mbufs[0], mbufs[mbuf_index]); 37705e7a8a8SJim Harris } 37805e7a8a8SJim Harris 379976f8b09Spaul luse /* keep track of the total we've put into the mbuf chain */ 380976f8b09Spaul luse processed += updated_length; 381976f8b09Spaul luse /* bump the base by what was previously added */ 382976f8b09Spaul luse current_base += updated_length; 383976f8b09Spaul luse 384976f8b09Spaul luse /* If we crossed 2MB boundary we need another mbuf for the remainder */ 38505e7a8a8SJim Harris if (remainder > 0) { 386976f8b09Spaul luse 387976f8b09Spaul luse assert(remainder <= MBUF_SPLIT); 388976f8b09Spaul luse 38905e7a8a8SJim Harris /* allocate an mbuf at the end of the array */ 39005e7a8a8SJim Harris rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, 39105e7a8a8SJim Harris (struct rte_mbuf **)&mbufs[*mbuf_total], 1); 39205e7a8a8SJim Harris if (rc) { 39305e7a8a8SJim Harris SPDK_ERRLOG("ERROR trying to get an extra mbuf!\n"); 39405e7a8a8SJim Harris return -1; 39505e7a8a8SJim Harris } 39605e7a8a8SJim Harris (*mbuf_total)++; 39705e7a8a8SJim Harris mbuf_index++; 398976f8b09Spaul luse *RTE_MBUF_DYNFIELD(mbufs[mbuf_index], g_mbuf_offset, uint64_t *) = (uint64_t)task; 399976f8b09Spaul luse 400976f8b09Spaul luse /* bump the base by what was previously added */ 40105e7a8a8SJim Harris current_base += updated_length; 402976f8b09Spaul luse 403976f8b09Spaul luse updated_length = remainder; 404976f8b09Spaul luse phys_addr = spdk_vtophys((void *)current_base, &updated_length); 405976f8b09Spaul luse 40605e7a8a8SJim Harris /* assert we don't cross another */ 407976f8b09Spaul luse assert(remainder == updated_length); 40805e7a8a8SJim Harris 40905e7a8a8SJim Harris rte_pktmbuf_attach_extbuf(mbufs[mbuf_index], 41005e7a8a8SJim Harris current_base, 41105e7a8a8SJim Harris phys_addr, 41205e7a8a8SJim Harris remainder, 41305e7a8a8SJim Harris &g_shinfo); 41405e7a8a8SJim Harris rte_pktmbuf_append(mbufs[mbuf_index], remainder); 41505e7a8a8SJim Harris rte_pktmbuf_chain(mbufs[0], mbufs[mbuf_index]); 416976f8b09Spaul luse 417976f8b09Spaul luse /* keep track of the total we've put into the mbuf chain */ 418976f8b09Spaul luse processed += remainder; 41905e7a8a8SJim Harris } 420976f8b09Spaul luse 42105e7a8a8SJim Harris mbuf_index++; 422976f8b09Spaul luse 423976f8b09Spaul luse } while (processed < iovec_length); 424976f8b09Spaul luse 425976f8b09Spaul luse assert(processed == iovec_length); 426976f8b09Spaul luse iov_index++; 42705e7a8a8SJim Harris } 42805e7a8a8SJim Harris 42905e7a8a8SJim Harris return 0; 43005e7a8a8SJim Harris } 43105e7a8a8SJim Harris 43205e7a8a8SJim Harris static int 433976f8b09Spaul luse _compress_operation(struct compress_io_channel *chan, struct spdk_accel_task *task) 43405e7a8a8SJim Harris { 435976f8b09Spaul luse int dst_iovcnt = task->d.iovcnt; 436976f8b09Spaul luse struct iovec *dst_iovs = task->d.iovs; 437976f8b09Spaul luse int src_iovcnt = task->s.iovcnt; 438976f8b09Spaul luse struct iovec *src_iovs = task->s.iovs; 43905e7a8a8SJim Harris struct rte_comp_op *comp_op; 440976f8b09Spaul luse uint8_t cdev_id; 44105e7a8a8SJim Harris uint64_t total_length = 0; 442976f8b09Spaul luse int rc = 0, i; 443976f8b09Spaul luse int src_mbuf_total = 0; 444976f8b09Spaul luse int dst_mbuf_total = 0; 44505e7a8a8SJim Harris bool device_error = false; 4465105dc5dSKonrad Sztyber bool compress = (task->op_code == SPDK_ACCEL_OPC_COMPRESS); 44705e7a8a8SJim Harris 448976f8b09Spaul luse assert(chan->device_qp->device != NULL); 449976f8b09Spaul luse cdev_id = chan->device_qp->device->cdev_id; 45005e7a8a8SJim Harris 451976f8b09Spaul luse /* calc our mbuf totals based on max MBUF size allowed so we can pre-alloc mbufs in bulk */ 452976f8b09Spaul luse for (i = 0 ; i < src_iovcnt; i++) { 453976f8b09Spaul luse src_mbuf_total += spdk_divide_round_up(src_iovs[i].iov_len, MBUF_SPLIT); 454976f8b09Spaul luse } 455976f8b09Spaul luse for (i = 0 ; i < dst_iovcnt; i++) { 456976f8b09Spaul luse dst_mbuf_total += spdk_divide_round_up(dst_iovs[i].iov_len, MBUF_SPLIT); 457976f8b09Spaul luse } 45805e7a8a8SJim Harris 45905e7a8a8SJim Harris comp_op = rte_comp_op_alloc(g_comp_op_mp); 46005e7a8a8SJim Harris if (!comp_op) { 46105e7a8a8SJim Harris SPDK_ERRLOG("trying to get a comp op!\n"); 46205e7a8a8SJim Harris rc = -ENOMEM; 46305e7a8a8SJim Harris goto error_get_op; 46405e7a8a8SJim Harris } 46505e7a8a8SJim Harris 46605e7a8a8SJim Harris /* get an mbuf per iov, src and dst */ 467976f8b09Spaul luse rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, chan->src_mbufs, src_mbuf_total); 46805e7a8a8SJim Harris if (rc) { 46905e7a8a8SJim Harris SPDK_ERRLOG("ERROR trying to get src_mbufs!\n"); 47005e7a8a8SJim Harris rc = -ENOMEM; 47105e7a8a8SJim Harris goto error_get_src; 47205e7a8a8SJim Harris } 473976f8b09Spaul luse assert(chan->src_mbufs[0]); 47405e7a8a8SJim Harris 475976f8b09Spaul luse rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, chan->dst_mbufs, dst_mbuf_total); 47605e7a8a8SJim Harris if (rc) { 47705e7a8a8SJim Harris SPDK_ERRLOG("ERROR trying to get dst_mbufs!\n"); 47805e7a8a8SJim Harris rc = -ENOMEM; 47905e7a8a8SJim Harris goto error_get_dst; 48005e7a8a8SJim Harris } 481976f8b09Spaul luse assert(chan->dst_mbufs[0]); 48205e7a8a8SJim Harris 483976f8b09Spaul luse rc = _setup_compress_mbuf(chan->src_mbufs, &src_mbuf_total, &total_length, 484976f8b09Spaul luse src_iovs, src_iovcnt, task); 485976f8b09Spaul luse 48605e7a8a8SJim Harris if (rc < 0) { 48705e7a8a8SJim Harris goto error_src_dst; 48805e7a8a8SJim Harris } 489976f8b09Spaul luse if (!chan->device_qp->device->sgl_in && src_mbuf_total > 1) { 490976f8b09Spaul luse SPDK_ERRLOG("Src buffer uses chained mbufs but driver %s doesn't support SGL input\n", 491976f8b09Spaul luse chan->drv_name); 49205e7a8a8SJim Harris rc = -EINVAL; 49305e7a8a8SJim Harris goto error_src_dst; 49405e7a8a8SJim Harris } 49505e7a8a8SJim Harris 496976f8b09Spaul luse comp_op->m_src = chan->src_mbufs[0]; 49705e7a8a8SJim Harris comp_op->src.offset = 0; 49805e7a8a8SJim Harris comp_op->src.length = total_length; 49905e7a8a8SJim Harris 500976f8b09Spaul luse rc = _setup_compress_mbuf(chan->dst_mbufs, &dst_mbuf_total, NULL, 501976f8b09Spaul luse dst_iovs, dst_iovcnt, task); 50205e7a8a8SJim Harris if (rc < 0) { 50305e7a8a8SJim Harris goto error_src_dst; 50405e7a8a8SJim Harris } 505976f8b09Spaul luse if (!chan->device_qp->device->sgl_out && dst_mbuf_total > 1) { 506976f8b09Spaul luse SPDK_ERRLOG("Dst buffer uses chained mbufs but driver %s doesn't support SGL output\n", 507976f8b09Spaul luse chan->drv_name); 50805e7a8a8SJim Harris rc = -EINVAL; 50905e7a8a8SJim Harris goto error_src_dst; 51005e7a8a8SJim Harris } 51105e7a8a8SJim Harris 512976f8b09Spaul luse comp_op->m_dst = chan->dst_mbufs[0]; 51305e7a8a8SJim Harris comp_op->dst.offset = 0; 51405e7a8a8SJim Harris 51505e7a8a8SJim Harris if (compress == true) { 516976f8b09Spaul luse comp_op->private_xform = chan->device_qp->device->comp_xform; 51705e7a8a8SJim Harris } else { 518976f8b09Spaul luse comp_op->private_xform = chan->device_qp->device->decomp_xform; 51905e7a8a8SJim Harris } 52005e7a8a8SJim Harris 52105e7a8a8SJim Harris comp_op->op_type = RTE_COMP_OP_STATELESS; 52205e7a8a8SJim Harris comp_op->flush_flag = RTE_COMP_FLUSH_FINAL; 52305e7a8a8SJim Harris 524976f8b09Spaul luse rc = rte_compressdev_enqueue_burst(cdev_id, chan->device_qp->qp, &comp_op, 1); 52505e7a8a8SJim Harris assert(rc <= 1); 52605e7a8a8SJim Harris 52705e7a8a8SJim Harris /* We always expect 1 got queued, if 0 then we need to queue it up. */ 52805e7a8a8SJim Harris if (rc == 1) { 52905e7a8a8SJim Harris return 0; 53005e7a8a8SJim Harris } else if (comp_op->status == RTE_COMP_OP_STATUS_NOT_PROCESSED) { 53105e7a8a8SJim Harris rc = -EAGAIN; 53205e7a8a8SJim Harris } else { 53305e7a8a8SJim Harris device_error = true; 53405e7a8a8SJim Harris } 53505e7a8a8SJim Harris 53605e7a8a8SJim Harris /* Error cleanup paths. */ 53705e7a8a8SJim Harris error_src_dst: 538976f8b09Spaul luse rte_pktmbuf_free_bulk(chan->dst_mbufs, dst_iovcnt); 53905e7a8a8SJim Harris error_get_dst: 540976f8b09Spaul luse rte_pktmbuf_free_bulk(chan->src_mbufs, src_iovcnt); 54105e7a8a8SJim Harris error_get_src: 54205e7a8a8SJim Harris rte_comp_op_free(comp_op); 54305e7a8a8SJim Harris error_get_op: 54405e7a8a8SJim Harris 54505e7a8a8SJim Harris if (device_error == true) { 54605e7a8a8SJim Harris /* There was an error sending the op to the device, most 54705e7a8a8SJim Harris * likely with the parameters. 54805e7a8a8SJim Harris */ 54905e7a8a8SJim Harris SPDK_ERRLOG("Compression API returned 0x%x\n", comp_op->status); 55005e7a8a8SJim Harris return -EINVAL; 55105e7a8a8SJim Harris } 55205e7a8a8SJim Harris if (rc != -ENOMEM && rc != -EAGAIN) { 55305e7a8a8SJim Harris return rc; 55405e7a8a8SJim Harris } 55505e7a8a8SJim Harris 556ee020824SAlexey Marchuk STAILQ_INSERT_TAIL(&chan->queued_tasks, task, link); 55705e7a8a8SJim Harris return 0; 55805e7a8a8SJim Harris } 55905e7a8a8SJim Harris 56005e7a8a8SJim Harris /* Poller for the DPDK compression driver. */ 56105e7a8a8SJim Harris static int 56205e7a8a8SJim Harris comp_dev_poller(void *args) 56305e7a8a8SJim Harris { 564976f8b09Spaul luse struct compress_io_channel *chan = args; 565976f8b09Spaul luse uint8_t cdev_id; 56605e7a8a8SJim Harris struct rte_comp_op *deq_ops[NUM_MAX_INFLIGHT_OPS]; 56705e7a8a8SJim Harris uint16_t num_deq; 568976f8b09Spaul luse struct spdk_accel_task *task, *task_to_resubmit; 569976f8b09Spaul luse int rc, i, status; 57005e7a8a8SJim Harris 571976f8b09Spaul luse assert(chan->device_qp->device != NULL); 572976f8b09Spaul luse cdev_id = chan->device_qp->device->cdev_id; 573976f8b09Spaul luse 574976f8b09Spaul luse num_deq = rte_compressdev_dequeue_burst(cdev_id, chan->device_qp->qp, deq_ops, 57505e7a8a8SJim Harris NUM_MAX_INFLIGHT_OPS); 57605e7a8a8SJim Harris for (i = 0; i < num_deq; i++) { 577976f8b09Spaul luse 57834edd9f1SKamil Godzwon /* We store this off regardless of success/error so we know how to construct the 579976f8b09Spaul luse * next task 580976f8b09Spaul luse */ 581976f8b09Spaul luse task = (struct spdk_accel_task *)*RTE_MBUF_DYNFIELD(deq_ops[i]->m_src, g_mbuf_offset, 58205e7a8a8SJim Harris uint64_t *); 583976f8b09Spaul luse status = deq_ops[i]->status; 58405e7a8a8SJim Harris 585976f8b09Spaul luse if (spdk_likely(status == RTE_COMP_OP_STATUS_SUCCESS)) { 58691f3063bSpaul luse if (task->output_size != NULL) { 587976f8b09Spaul luse *task->output_size = deq_ops[i]->produced; 58805e7a8a8SJim Harris } 58972c6ad5fSKonrad Sztyber status = 0; 590976f8b09Spaul luse } else { 591976f8b09Spaul luse SPDK_NOTICELOG("Deque status %u\n", status); 59272c6ad5fSKonrad Sztyber status = -EIO; 593976f8b09Spaul luse } 594976f8b09Spaul luse 595976f8b09Spaul luse spdk_accel_task_complete(task, status); 59605e7a8a8SJim Harris 59705e7a8a8SJim Harris /* Now free both mbufs and the compress operation. The rte_pktmbuf_free() 59805e7a8a8SJim Harris * call takes care of freeing all of the mbufs in the chain back to their 59905e7a8a8SJim Harris * original pool. 60005e7a8a8SJim Harris */ 60105e7a8a8SJim Harris rte_pktmbuf_free(deq_ops[i]->m_src); 60205e7a8a8SJim Harris rte_pktmbuf_free(deq_ops[i]->m_dst); 60305e7a8a8SJim Harris 60405e7a8a8SJim Harris /* There is no bulk free for com ops so we have to free them one at a time 60505e7a8a8SJim Harris * here however it would be rare that we'd ever have more than 1 at a time 60605e7a8a8SJim Harris * anyways. 60705e7a8a8SJim Harris */ 60805e7a8a8SJim Harris rte_comp_op_free(deq_ops[i]); 60905e7a8a8SJim Harris 61005e7a8a8SJim Harris /* Check if there are any pending comp ops to process, only pull one 61105e7a8a8SJim Harris * at a time off as _compress_operation() may re-queue the op. 61205e7a8a8SJim Harris */ 613ee020824SAlexey Marchuk if (!STAILQ_EMPTY(&chan->queued_tasks)) { 614ee020824SAlexey Marchuk task_to_resubmit = STAILQ_FIRST(&chan->queued_tasks); 615976f8b09Spaul luse rc = _compress_operation(chan, task_to_resubmit); 61605e7a8a8SJim Harris if (rc == 0) { 617ee020824SAlexey Marchuk STAILQ_REMOVE_HEAD(&chan->queued_tasks, link); 61805e7a8a8SJim Harris } 61905e7a8a8SJim Harris } 62005e7a8a8SJim Harris } 621976f8b09Spaul luse 62205e7a8a8SJim Harris return num_deq == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY; 62305e7a8a8SJim Harris } 62405e7a8a8SJim Harris 625976f8b09Spaul luse static int 626976f8b09Spaul luse _process_single_task(struct spdk_io_channel *ch, struct spdk_accel_task *task) 62705e7a8a8SJim Harris { 628976f8b09Spaul luse struct compress_io_channel *chan = spdk_io_channel_get_ctx(ch); 62905e7a8a8SJim Harris int rc; 63005e7a8a8SJim Harris 631976f8b09Spaul luse rc = _compress_operation(chan, task); 63205e7a8a8SJim Harris if (rc) { 63334edd9f1SKamil Godzwon SPDK_ERRLOG("Error (%d) in compress operation\n", rc); 63405e7a8a8SJim Harris assert(false); 63505e7a8a8SJim Harris } 63605e7a8a8SJim Harris 637976f8b09Spaul luse return rc; 63805e7a8a8SJim Harris } 63905e7a8a8SJim Harris 64005e7a8a8SJim Harris static int 641976f8b09Spaul luse compress_submit_tasks(struct spdk_io_channel *ch, struct spdk_accel_task *first_task) 64205e7a8a8SJim Harris { 643976f8b09Spaul luse struct compress_io_channel *chan = spdk_io_channel_get_ctx(ch); 644976f8b09Spaul luse struct spdk_accel_task *task, *tmp; 645976f8b09Spaul luse int rc = 0; 64605e7a8a8SJim Harris 647976f8b09Spaul luse task = first_task; 648976f8b09Spaul luse 649ee020824SAlexey Marchuk if (!STAILQ_EMPTY(&chan->queued_tasks)) { 650976f8b09Spaul luse goto queue_tasks; 651976f8b09Spaul luse } 652976f8b09Spaul luse 653976f8b09Spaul luse /* The caller will either submit a single task or a group of tasks that are 654976f8b09Spaul luse * linked together but they cannot be on a list. For example, see poller 655976f8b09Spaul luse * where a list of queued tasks is being resubmitted, the list they are on 656976f8b09Spaul luse * is initialized after saving off the first task from the list which is then 657976f8b09Spaul luse * passed in here. Similar thing is done in the accel framework. 658976f8b09Spaul luse */ 659976f8b09Spaul luse while (task) { 660ee020824SAlexey Marchuk tmp = STAILQ_NEXT(task, link); 661976f8b09Spaul luse rc = _process_single_task(ch, task); 662976f8b09Spaul luse 663976f8b09Spaul luse if (rc == -EBUSY) { 664976f8b09Spaul luse goto queue_tasks; 665976f8b09Spaul luse } else if (rc) { 666976f8b09Spaul luse spdk_accel_task_complete(task, rc); 667976f8b09Spaul luse } 668976f8b09Spaul luse task = tmp; 66905e7a8a8SJim Harris } 67005e7a8a8SJim Harris 67105e7a8a8SJim Harris return 0; 67205e7a8a8SJim Harris 673976f8b09Spaul luse queue_tasks: 674976f8b09Spaul luse while (task != NULL) { 675ee020824SAlexey Marchuk tmp = STAILQ_NEXT(task, link); 676ee020824SAlexey Marchuk STAILQ_INSERT_TAIL(&chan->queued_tasks, task, link); 677976f8b09Spaul luse task = tmp; 67805e7a8a8SJim Harris } 67905e7a8a8SJim Harris return 0; 68005e7a8a8SJim Harris } 68105e7a8a8SJim Harris 68205e7a8a8SJim Harris static bool 683976f8b09Spaul luse _set_pmd(struct compress_io_channel *chan) 68405e7a8a8SJim Harris { 685976f8b09Spaul luse 686976f8b09Spaul luse /* Note: the compress_isal PMD is not supported as accel_fw supports native ISAL 687976f8b09Spaul luse * using the accel_sw module */ 68805e7a8a8SJim Harris if (g_opts == COMPRESS_PMD_AUTO) { 68905e7a8a8SJim Harris if (g_qat_available) { 690976f8b09Spaul luse chan->drv_name = QAT_PMD; 69105e7a8a8SJim Harris } else if (g_mlx5_pci_available) { 692976f8b09Spaul luse chan->drv_name = MLX5_PMD; 6932186fc03SZhangfei Gao } else if (g_uadk_available) { 6942186fc03SZhangfei Gao chan->drv_name = UADK_PMD; 69505e7a8a8SJim Harris } 69605e7a8a8SJim Harris } else if (g_opts == COMPRESS_PMD_QAT_ONLY && g_qat_available) { 697976f8b09Spaul luse chan->drv_name = QAT_PMD; 69805e7a8a8SJim Harris } else if (g_opts == COMPRESS_PMD_MLX5_PCI_ONLY && g_mlx5_pci_available) { 699976f8b09Spaul luse chan->drv_name = MLX5_PMD; 7002186fc03SZhangfei Gao } else if (g_opts == COMPRESS_PMD_UADK_ONLY && g_uadk_available) { 7012186fc03SZhangfei Gao chan->drv_name = UADK_PMD; 70205e7a8a8SJim Harris } else { 70305e7a8a8SJim Harris SPDK_ERRLOG("Requested PMD is not available.\n"); 70405e7a8a8SJim Harris return false; 70505e7a8a8SJim Harris } 706976f8b09Spaul luse SPDK_NOTICELOG("Channel %p PMD being used: %s\n", chan, chan->drv_name); 70705e7a8a8SJim Harris return true; 70805e7a8a8SJim Harris } 70905e7a8a8SJim Harris 710976f8b09Spaul luse static int compress_create_cb(void *io_device, void *ctx_buf); 711976f8b09Spaul luse static void compress_destroy_cb(void *io_device, void *ctx_buf); 712976f8b09Spaul luse static struct spdk_accel_module_if g_compress_module; 71305e7a8a8SJim Harris static int 714976f8b09Spaul luse accel_compress_init(void) 71505e7a8a8SJim Harris { 71605e7a8a8SJim Harris int rc; 71705e7a8a8SJim Harris 718976f8b09Spaul luse if (!g_compressdev_enable) { 719976f8b09Spaul luse return -EINVAL; 720976f8b09Spaul luse } 721976f8b09Spaul luse 7222186fc03SZhangfei Gao if (g_opts == COMPRESS_PMD_UADK_ONLY) { 7232186fc03SZhangfei Gao char *driver_name = UADK_PMD; 7242186fc03SZhangfei Gao 7252186fc03SZhangfei Gao rc = rte_vdev_init(driver_name, NULL); 7262186fc03SZhangfei Gao if (rc) { 7272186fc03SZhangfei Gao SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. " 7282186fc03SZhangfei Gao "Possibly %s is not supported by DPDK library. " 7292186fc03SZhangfei Gao "Keep going...\n", driver_name, rc, driver_name); 7302186fc03SZhangfei Gao } 7312186fc03SZhangfei Gao } 7322186fc03SZhangfei Gao 733976f8b09Spaul luse rc = accel_init_compress_drivers(); 73405e7a8a8SJim Harris if (rc) { 735976f8b09Spaul luse assert(TAILQ_EMPTY(&g_compress_devs)); 7361f88c365STomasz Zawadzki return rc; 73705e7a8a8SJim Harris } 73805e7a8a8SJim Harris 739976f8b09Spaul luse g_compressdev_initialized = true; 740976f8b09Spaul luse spdk_io_device_register(&g_compress_module, compress_create_cb, compress_destroy_cb, 741976f8b09Spaul luse sizeof(struct compress_io_channel), "compressdev_accel_module"); 74205e7a8a8SJim Harris return 0; 74305e7a8a8SJim Harris } 74405e7a8a8SJim Harris 74505e7a8a8SJim Harris static int 746976f8b09Spaul luse compress_create_cb(void *io_device, void *ctx_buf) 74705e7a8a8SJim Harris { 748976f8b09Spaul luse struct compress_io_channel *chan = ctx_buf; 749976f8b09Spaul luse const struct rte_compressdev_capabilities *capab; 75005e7a8a8SJim Harris struct comp_device_qp *device_qp; 751976f8b09Spaul luse size_t length; 75205e7a8a8SJim Harris 753976f8b09Spaul luse if (_set_pmd(chan) == false) { 754976f8b09Spaul luse assert(false); 755976f8b09Spaul luse return -ENODEV; 756976f8b09Spaul luse } 75705e7a8a8SJim Harris 758976f8b09Spaul luse /* The following variable length arrays of mbuf pointers are required to submit to compressdev */ 759976f8b09Spaul luse length = NUM_MBUFS * sizeof(void *); 760976f8b09Spaul luse chan->src_mbufs = spdk_zmalloc(length, 0x40, NULL, 761976f8b09Spaul luse SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 762976f8b09Spaul luse if (chan->src_mbufs == NULL) { 763976f8b09Spaul luse return -ENOMEM; 764976f8b09Spaul luse } 765976f8b09Spaul luse chan->dst_mbufs = spdk_zmalloc(length, 0x40, NULL, 766976f8b09Spaul luse SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 767976f8b09Spaul luse if (chan->dst_mbufs == NULL) { 768*90486f7eSMateusz Kozlowski spdk_free(chan->src_mbufs); 769976f8b09Spaul luse return -ENOMEM; 770976f8b09Spaul luse } 77105e7a8a8SJim Harris 772976f8b09Spaul luse chan->poller = SPDK_POLLER_REGISTER(comp_dev_poller, chan, 0); 773ee020824SAlexey Marchuk STAILQ_INIT(&chan->queued_tasks); 774976f8b09Spaul luse 77505e7a8a8SJim Harris pthread_mutex_lock(&g_comp_device_qp_lock); 77605e7a8a8SJim Harris TAILQ_FOREACH(device_qp, &g_comp_device_qp, link) { 777976f8b09Spaul luse if (strcmp(device_qp->device->cdev_info.driver_name, chan->drv_name) == 0) { 7788222c5baSpaul luse if (device_qp->chan == NULL) { 779976f8b09Spaul luse chan->device_qp = device_qp; 7808222c5baSpaul luse device_qp->chan = chan; 78105e7a8a8SJim Harris break; 78205e7a8a8SJim Harris } 78305e7a8a8SJim Harris } 78405e7a8a8SJim Harris } 78505e7a8a8SJim Harris pthread_mutex_unlock(&g_comp_device_qp_lock); 78605e7a8a8SJim Harris 787976f8b09Spaul luse if (chan->device_qp == NULL) { 788976f8b09Spaul luse SPDK_ERRLOG("out of qpairs, cannot assign one\n"); 78905e7a8a8SJim Harris assert(false); 79005e7a8a8SJim Harris return -ENOMEM; 79105e7a8a8SJim Harris } else { 792976f8b09Spaul luse capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 793976f8b09Spaul luse 794976f8b09Spaul luse if (capab->comp_feature_flags & (RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | RTE_COMP_FF_OOP_SGL_IN_LB_OUT)) { 795976f8b09Spaul luse chan->device_qp->device->sgl_in = true; 79605e7a8a8SJim Harris } 79705e7a8a8SJim Harris 798976f8b09Spaul luse if (capab->comp_feature_flags & (RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | RTE_COMP_FF_OOP_LB_IN_SGL_OUT)) { 799976f8b09Spaul luse chan->device_qp->device->sgl_out = true; 80005e7a8a8SJim Harris } 80105e7a8a8SJim Harris } 80205e7a8a8SJim Harris 80305e7a8a8SJim Harris return 0; 80405e7a8a8SJim Harris } 80505e7a8a8SJim Harris 80605e7a8a8SJim Harris static void 807976f8b09Spaul luse accel_compress_write_config_json(struct spdk_json_write_ctx *w) 808976f8b09Spaul luse { 809976f8b09Spaul luse if (g_compressdev_enable) { 810976f8b09Spaul luse spdk_json_write_object_begin(w); 811976f8b09Spaul luse spdk_json_write_named_string(w, "method", "compressdev_scan_accel_module"); 812976f8b09Spaul luse spdk_json_write_named_object_begin(w, "params"); 813976f8b09Spaul luse spdk_json_write_named_uint32(w, "pmd", g_opts); 814976f8b09Spaul luse spdk_json_write_object_end(w); 815976f8b09Spaul luse spdk_json_write_object_end(w); 816976f8b09Spaul luse } 817976f8b09Spaul luse } 818976f8b09Spaul luse 819976f8b09Spaul luse static void 820976f8b09Spaul luse compress_destroy_cb(void *io_device, void *ctx_buf) 821976f8b09Spaul luse { 822976f8b09Spaul luse struct compress_io_channel *chan = ctx_buf; 8238222c5baSpaul luse struct comp_device_qp *device_qp = chan->device_qp; 824976f8b09Spaul luse 825976f8b09Spaul luse spdk_free(chan->src_mbufs); 826976f8b09Spaul luse spdk_free(chan->dst_mbufs); 827976f8b09Spaul luse 828976f8b09Spaul luse spdk_poller_unregister(&chan->poller); 829976f8b09Spaul luse 830976f8b09Spaul luse pthread_mutex_lock(&g_comp_device_qp_lock); 831976f8b09Spaul luse chan->device_qp = NULL; 8328222c5baSpaul luse device_qp->chan = NULL; 833976f8b09Spaul luse pthread_mutex_unlock(&g_comp_device_qp_lock); 834976f8b09Spaul luse } 835976f8b09Spaul luse 836976f8b09Spaul luse static size_t 837976f8b09Spaul luse accel_compress_get_ctx_size(void) 838976f8b09Spaul luse { 839976f8b09Spaul luse return 0; 840976f8b09Spaul luse } 841976f8b09Spaul luse 842976f8b09Spaul luse static bool 8435105dc5dSKonrad Sztyber compress_supports_opcode(enum spdk_accel_opcode opc) 844976f8b09Spaul luse { 8452186fc03SZhangfei Gao if (g_mlx5_pci_available || g_qat_available || g_uadk_available) { 846976f8b09Spaul luse switch (opc) { 8475105dc5dSKonrad Sztyber case SPDK_ACCEL_OPC_COMPRESS: 8485105dc5dSKonrad Sztyber case SPDK_ACCEL_OPC_DECOMPRESS: 849976f8b09Spaul luse return true; 850976f8b09Spaul luse default: 851976f8b09Spaul luse break; 852976f8b09Spaul luse } 853976f8b09Spaul luse } 854976f8b09Spaul luse 855976f8b09Spaul luse return false; 856976f8b09Spaul luse } 857976f8b09Spaul luse 85873c0be85SYankun Li static bool 85973c0be85SYankun Li compress_supports_algo(enum spdk_accel_comp_algo algo) 86073c0be85SYankun Li { 86173c0be85SYankun Li if (algo == SPDK_ACCEL_COMP_ALGO_DEFLATE) { 86273c0be85SYankun Li return true; 86373c0be85SYankun Li } 86473c0be85SYankun Li 86573c0be85SYankun Li return false; 86673c0be85SYankun Li } 86773c0be85SYankun Li 868ca6d701aSYankun Li static int 869ca6d701aSYankun Li compress_get_level_range(enum spdk_accel_comp_algo algo, 870ca6d701aSYankun Li uint32_t *min_level, uint32_t *max_level) 871ca6d701aSYankun Li { 872ca6d701aSYankun Li switch (algo) { 873ca6d701aSYankun Li case SPDK_ACCEL_COMP_ALGO_DEFLATE: 874ca6d701aSYankun Li /** 875ca6d701aSYankun Li * Hardware compression is set to the highest level by default and 876ca6d701aSYankun Li * will not be affected by cover parameters in actual operation. 877ca6d701aSYankun Li * This is set to the maximum range. 878ca6d701aSYankun Li * */ 879ca6d701aSYankun Li *min_level = 0; 880ca6d701aSYankun Li *max_level = 0; 881ca6d701aSYankun Li 882ca6d701aSYankun Li return 0; 883ca6d701aSYankun Li default: 884ca6d701aSYankun Li return -EINVAL; 885ca6d701aSYankun Li } 886ca6d701aSYankun Li } 887ca6d701aSYankun Li 888976f8b09Spaul luse static struct spdk_io_channel * 889976f8b09Spaul luse compress_get_io_channel(void) 890976f8b09Spaul luse { 891976f8b09Spaul luse return spdk_get_io_channel(&g_compress_module); 892976f8b09Spaul luse } 893976f8b09Spaul luse 894976f8b09Spaul luse static void accel_compress_exit(void *ctx); 895976f8b09Spaul luse static struct spdk_accel_module_if g_compress_module = { 896976f8b09Spaul luse .module_init = accel_compress_init, 897976f8b09Spaul luse .module_fini = accel_compress_exit, 898976f8b09Spaul luse .write_config_json = accel_compress_write_config_json, 899976f8b09Spaul luse .get_ctx_size = accel_compress_get_ctx_size, 900976f8b09Spaul luse .name = "dpdk_compressdev", 901976f8b09Spaul luse .supports_opcode = compress_supports_opcode, 902976f8b09Spaul luse .get_io_channel = compress_get_io_channel, 90373c0be85SYankun Li .submit_tasks = compress_submit_tasks, 904ca6d701aSYankun Li .compress_supports_algo = compress_supports_algo, 905ca6d701aSYankun Li .get_compress_level_range = compress_get_level_range, 906976f8b09Spaul luse }; 907976f8b09Spaul luse 908976f8b09Spaul luse void 909976f8b09Spaul luse accel_dpdk_compressdev_enable(void) 910976f8b09Spaul luse { 911976f8b09Spaul luse spdk_accel_module_list_add(&g_compress_module); 912976f8b09Spaul luse } 913976f8b09Spaul luse 9148222c5baSpaul luse /* Callback for unregistering the IO device. */ 915976f8b09Spaul luse static void 9168222c5baSpaul luse _device_unregister_cb(void *io_device) 91705e7a8a8SJim Harris { 91805e7a8a8SJim Harris struct comp_device_qp *dev_qp; 919976f8b09Spaul luse struct compress_dev *device; 920976f8b09Spaul luse 921976f8b09Spaul luse while ((device = TAILQ_FIRST(&g_compress_devs))) { 922976f8b09Spaul luse TAILQ_REMOVE(&g_compress_devs, device, link); 9232828b121SZhangfei Gao rte_compressdev_stop(device->cdev_id); 9242828b121SZhangfei Gao rte_compressdev_close(device->cdev_id); 925976f8b09Spaul luse free(device); 926976f8b09Spaul luse } 92705e7a8a8SJim Harris 92805e7a8a8SJim Harris while ((dev_qp = TAILQ_FIRST(&g_comp_device_qp))) { 92905e7a8a8SJim Harris TAILQ_REMOVE(&g_comp_device_qp, dev_qp, link); 93005e7a8a8SJim Harris free(dev_qp); 93105e7a8a8SJim Harris } 932976f8b09Spaul luse 9332186fc03SZhangfei Gao if (g_opts == COMPRESS_PMD_UADK_ONLY) { 9342186fc03SZhangfei Gao rte_vdev_uninit(UADK_PMD); 9352186fc03SZhangfei Gao } 9362186fc03SZhangfei Gao 93705e7a8a8SJim Harris pthread_mutex_destroy(&g_comp_device_qp_lock); 93805e7a8a8SJim Harris 93905e7a8a8SJim Harris rte_mempool_free(g_comp_op_mp); 94005e7a8a8SJim Harris rte_mempool_free(g_mbuf_mp); 941976f8b09Spaul luse 942976f8b09Spaul luse spdk_accel_module_finish(); 94305e7a8a8SJim Harris } 9448222c5baSpaul luse 9458222c5baSpaul luse static void 9468222c5baSpaul luse accel_compress_exit(void *ctx) 9478222c5baSpaul luse { 9488222c5baSpaul luse if (g_compressdev_initialized) { 9498222c5baSpaul luse spdk_io_device_unregister(&g_compress_module, _device_unregister_cb); 9508222c5baSpaul luse g_compressdev_initialized = false; 9518222c5baSpaul luse } else { 9528222c5baSpaul luse spdk_accel_module_finish(); 9538222c5baSpaul luse } 9548222c5baSpaul luse } 955