1d570ad49Spaul luse /* SPDX-License-Identifier: BSD-3-Clause 2d570ad49Spaul luse * Copyright (C) 2018 Intel Corporation. 3d570ad49Spaul luse * All rights reserved. 4ee020824SAlexey Marchuk * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5d570ad49Spaul luse */ 6d570ad49Spaul luse 7ae431e31SKonrad Sztyber #include "spdk_internal/cunit.h" 8d570ad49Spaul luse /* We have our own mock for this */ 9d570ad49Spaul luse #define UNIT_TEST_NO_VTOPHYS 10d570ad49Spaul luse #include "common/lib/test_env.c" 11d570ad49Spaul luse #include "spdk_internal/mock.h" 12d570ad49Spaul luse #include "thread/thread_internal.h" 13d570ad49Spaul luse #include "unit/lib/json_mock.c" 14d570ad49Spaul luse 15d570ad49Spaul luse #include <rte_compressdev.h> 16d570ad49Spaul luse 17d570ad49Spaul luse /* There will be one if the data perfectly matches the chunk size, 18d570ad49Spaul luse * or there could be an offset into the data and a remainder after 19d570ad49Spaul luse * the data or both for a max of 3. 20d570ad49Spaul luse */ 21d570ad49Spaul luse #define UT_MBUFS_PER_OP 3 22d570ad49Spaul luse /* For testing the crossing of a huge page boundary on address translation, 23d570ad49Spaul luse * we'll have an extra one but we only test on the source side. 24d570ad49Spaul luse */ 25d570ad49Spaul luse #define UT_MBUFS_PER_OP_BOUND_TEST 4 26d570ad49Spaul luse 27d570ad49Spaul luse struct spdk_io_channel *g_io_ch; 28d570ad49Spaul luse struct rte_comp_op g_comp_op[2]; 29d570ad49Spaul luse struct comp_device_qp g_device_qp; 30d570ad49Spaul luse struct compress_dev g_device; 31d570ad49Spaul luse struct rte_compressdev_capabilities g_cdev_cap; 32d570ad49Spaul luse static struct rte_mbuf *g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST]; 33d570ad49Spaul luse static struct rte_mbuf *g_dst_mbufs[UT_MBUFS_PER_OP]; 34d570ad49Spaul luse static struct rte_mbuf g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST]; 35d570ad49Spaul luse static struct rte_mbuf g_expected_dst_mbufs[UT_MBUFS_PER_OP]; 36de8fb128Spaul luse struct compress_io_channel *g_comp_ch; 37d570ad49Spaul luse 38d570ad49Spaul luse /* Those functions are defined as static inline in DPDK, so we can't 39d570ad49Spaul luse * mock them straight away. We use defines to redirect them into 40d570ad49Spaul luse * our custom functions. 41d570ad49Spaul luse */ 42d570ad49Spaul luse 430485fb47Spaul luse static int ut_total_rte_pktmbuf_attach_extbuf = 0; 44d570ad49Spaul luse static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, 45d570ad49Spaul luse uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo); 46d570ad49Spaul luse #define rte_pktmbuf_attach_extbuf mock_rte_pktmbuf_attach_extbuf 47d570ad49Spaul luse static void 48d570ad49Spaul luse mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, 49d570ad49Spaul luse uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo) 50d570ad49Spaul luse { 51d570ad49Spaul luse assert(m != NULL); 52d570ad49Spaul luse m->buf_addr = buf_addr; 53d570ad49Spaul luse m->buf_iova = buf_iova; 54d570ad49Spaul luse m->buf_len = buf_len; 55d570ad49Spaul luse m->data_len = m->pkt_len = 0; 560485fb47Spaul luse ut_total_rte_pktmbuf_attach_extbuf++; 57d570ad49Spaul luse } 58d570ad49Spaul luse 59d570ad49Spaul luse static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len); 60d570ad49Spaul luse #define rte_pktmbuf_append mock_rte_pktmbuf_append 61d570ad49Spaul luse static char * 62d570ad49Spaul luse mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len) 63d570ad49Spaul luse { 64d570ad49Spaul luse m->pkt_len = m->pkt_len + len; 65d570ad49Spaul luse return NULL; 66d570ad49Spaul luse } 67d570ad49Spaul luse 68d570ad49Spaul luse static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail); 69d570ad49Spaul luse #define rte_pktmbuf_chain mock_rte_pktmbuf_chain 70d570ad49Spaul luse static inline int 71d570ad49Spaul luse mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) 72d570ad49Spaul luse { 73d570ad49Spaul luse struct rte_mbuf *cur_tail; 74d570ad49Spaul luse 75d570ad49Spaul luse cur_tail = rte_pktmbuf_lastseg(head); 76d570ad49Spaul luse cur_tail->next = tail; 77d570ad49Spaul luse 78d570ad49Spaul luse return 0; 79d570ad49Spaul luse } 80d570ad49Spaul luse 81d570ad49Spaul luse uint16_t ut_max_nb_queue_pairs = 0; 82d570ad49Spaul luse void __rte_experimental mock_rte_compressdev_info_get(uint8_t dev_id, 83d570ad49Spaul luse struct rte_compressdev_info *dev_info); 84d570ad49Spaul luse #define rte_compressdev_info_get mock_rte_compressdev_info_get 85d570ad49Spaul luse void __rte_experimental 86d570ad49Spaul luse mock_rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info) 87d570ad49Spaul luse { 88d570ad49Spaul luse dev_info->max_nb_queue_pairs = ut_max_nb_queue_pairs; 89d570ad49Spaul luse dev_info->capabilities = &g_cdev_cap; 90de8fb128Spaul luse dev_info->driver_name = "compressdev"; 91d570ad49Spaul luse } 92d570ad49Spaul luse 93d570ad49Spaul luse int ut_rte_compressdev_configure = 0; 94d570ad49Spaul luse int __rte_experimental mock_rte_compressdev_configure(uint8_t dev_id, 95d570ad49Spaul luse struct rte_compressdev_config *config); 96d570ad49Spaul luse #define rte_compressdev_configure mock_rte_compressdev_configure 97d570ad49Spaul luse int __rte_experimental 98d570ad49Spaul luse mock_rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config) 99d570ad49Spaul luse { 100d570ad49Spaul luse return ut_rte_compressdev_configure; 101d570ad49Spaul luse } 102d570ad49Spaul luse 103d570ad49Spaul luse int ut_rte_compressdev_queue_pair_setup = 0; 104d570ad49Spaul luse int __rte_experimental mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 105d570ad49Spaul luse uint32_t max_inflight_ops, int socket_id); 106d570ad49Spaul luse #define rte_compressdev_queue_pair_setup mock_rte_compressdev_queue_pair_setup 107d570ad49Spaul luse int __rte_experimental 108d570ad49Spaul luse mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 109d570ad49Spaul luse uint32_t max_inflight_ops, int socket_id) 110d570ad49Spaul luse { 111d570ad49Spaul luse return ut_rte_compressdev_queue_pair_setup; 112d570ad49Spaul luse } 113d570ad49Spaul luse 114d570ad49Spaul luse int ut_rte_compressdev_start = 0; 115d570ad49Spaul luse int __rte_experimental mock_rte_compressdev_start(uint8_t dev_id); 116d570ad49Spaul luse #define rte_compressdev_start mock_rte_compressdev_start 117d570ad49Spaul luse int __rte_experimental 118d570ad49Spaul luse mock_rte_compressdev_start(uint8_t dev_id) 119d570ad49Spaul luse { 120d570ad49Spaul luse return ut_rte_compressdev_start; 121d570ad49Spaul luse } 122d570ad49Spaul luse 123d570ad49Spaul luse int ut_rte_compressdev_private_xform_create = 0; 124d570ad49Spaul luse int __rte_experimental mock_rte_compressdev_private_xform_create(uint8_t dev_id, 125d570ad49Spaul luse const struct rte_comp_xform *xform, void **private_xform); 126d570ad49Spaul luse #define rte_compressdev_private_xform_create mock_rte_compressdev_private_xform_create 127d570ad49Spaul luse int __rte_experimental 128d570ad49Spaul luse mock_rte_compressdev_private_xform_create(uint8_t dev_id, 129d570ad49Spaul luse const struct rte_comp_xform *xform, void **private_xform) 130d570ad49Spaul luse { 131d570ad49Spaul luse return ut_rte_compressdev_private_xform_create; 132d570ad49Spaul luse } 133d570ad49Spaul luse 134d570ad49Spaul luse uint8_t ut_rte_compressdev_count = 0; 135d570ad49Spaul luse uint8_t __rte_experimental mock_rte_compressdev_count(void); 136d570ad49Spaul luse #define rte_compressdev_count mock_rte_compressdev_count 137d570ad49Spaul luse uint8_t __rte_experimental 138d570ad49Spaul luse mock_rte_compressdev_count(void) 139d570ad49Spaul luse { 140d570ad49Spaul luse return ut_rte_compressdev_count; 141d570ad49Spaul luse } 142d570ad49Spaul luse 143d570ad49Spaul luse struct rte_mempool *ut_rte_comp_op_pool_create = NULL; 144d570ad49Spaul luse struct rte_mempool *__rte_experimental mock_rte_comp_op_pool_create(const char *name, 145d570ad49Spaul luse unsigned int nb_elts, unsigned int cache_size, uint16_t user_size, 146d570ad49Spaul luse int socket_id); 147d570ad49Spaul luse #define rte_comp_op_pool_create mock_rte_comp_op_pool_create 148d570ad49Spaul luse struct rte_mempool *__rte_experimental 149d570ad49Spaul luse mock_rte_comp_op_pool_create(const char *name, unsigned int nb_elts, 150d570ad49Spaul luse unsigned int cache_size, uint16_t user_size, int socket_id) 151d570ad49Spaul luse { 152d570ad49Spaul luse return ut_rte_comp_op_pool_create; 153d570ad49Spaul luse } 154d570ad49Spaul luse 155d570ad49Spaul luse void mock_rte_pktmbuf_free(struct rte_mbuf *m); 156d570ad49Spaul luse #define rte_pktmbuf_free mock_rte_pktmbuf_free 157d570ad49Spaul luse void 158d570ad49Spaul luse mock_rte_pktmbuf_free(struct rte_mbuf *m) 159d570ad49Spaul luse { 160d570ad49Spaul luse } 161d570ad49Spaul luse 162d570ad49Spaul luse void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt); 163d570ad49Spaul luse #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk 164d570ad49Spaul luse void 165d570ad49Spaul luse mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt) 166d570ad49Spaul luse { 167d570ad49Spaul luse } 168d570ad49Spaul luse 169d570ad49Spaul luse static bool ut_boundary_alloc = false; 170d570ad49Spaul luse static int ut_rte_pktmbuf_alloc_bulk = 0; 171d570ad49Spaul luse int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, 172d570ad49Spaul luse unsigned count); 173d570ad49Spaul luse #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk 174d570ad49Spaul luse int 175d570ad49Spaul luse mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, 176d570ad49Spaul luse unsigned count) 177d570ad49Spaul luse { 178d570ad49Spaul luse int i; 179d570ad49Spaul luse 180d570ad49Spaul luse /* This mocked function only supports the alloc of up to 3 src and 3 dst. */ 181d570ad49Spaul luse ut_rte_pktmbuf_alloc_bulk += count; 182d570ad49Spaul luse 183d570ad49Spaul luse if (ut_rte_pktmbuf_alloc_bulk == 1) { 184d570ad49Spaul luse /* allocation of an extra mbuf for boundary cross test */ 185d570ad49Spaul luse ut_boundary_alloc = true; 186d570ad49Spaul luse g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]->next = NULL; 187d570ad49Spaul luse *mbufs = g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]; 188d570ad49Spaul luse ut_rte_pktmbuf_alloc_bulk = 0; 189d570ad49Spaul luse } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP) { 190d570ad49Spaul luse /* first test allocation, src mbufs */ 191d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 192d570ad49Spaul luse g_src_mbufs[i]->next = NULL; 193d570ad49Spaul luse *mbufs++ = g_src_mbufs[i]; 194d570ad49Spaul luse } 195d570ad49Spaul luse } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP * 2) { 196d570ad49Spaul luse /* second test allocation, dst mbufs */ 197d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 198d570ad49Spaul luse g_dst_mbufs[i]->next = NULL; 199d570ad49Spaul luse *mbufs++ = g_dst_mbufs[i]; 200d570ad49Spaul luse } 201d570ad49Spaul luse ut_rte_pktmbuf_alloc_bulk = 0; 202d570ad49Spaul luse } else { 203d570ad49Spaul luse return -1; 204d570ad49Spaul luse } 205d570ad49Spaul luse return 0; 206d570ad49Spaul luse } 207d570ad49Spaul luse 208d570ad49Spaul luse struct rte_mempool * 209d570ad49Spaul luse rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, 210d570ad49Spaul luse uint16_t priv_size, uint16_t data_room_size, int socket_id) 211d570ad49Spaul luse { 212d570ad49Spaul luse struct spdk_mempool *tmp; 213d570ad49Spaul luse 214d570ad49Spaul luse tmp = spdk_mempool_create("mbuf_mp", 1024, sizeof(struct rte_mbuf), 215d570ad49Spaul luse SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 216186b109dSJim Harris SPDK_ENV_NUMA_ID_ANY); 217d570ad49Spaul luse 218d570ad49Spaul luse return (struct rte_mempool *)tmp; 219d570ad49Spaul luse } 220d570ad49Spaul luse 221d570ad49Spaul luse void 222d570ad49Spaul luse rte_mempool_free(struct rte_mempool *mp) 223d570ad49Spaul luse { 224d570ad49Spaul luse if (mp) { 225d570ad49Spaul luse spdk_mempool_free((struct spdk_mempool *)mp); 226d570ad49Spaul luse } 227d570ad49Spaul luse } 228d570ad49Spaul luse 229de8fb128Spaul luse #include "accel/dpdk_compressdev/accel_dpdk_compressdev.c" 230d570ad49Spaul luse 231de8fb128Spaul luse static void _compress_done(void *arg, int status); 232de8fb128Spaul luse static int ut_expected_task_status = 0; 233d570ad49Spaul luse void 234de8fb128Spaul luse spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status) 235d570ad49Spaul luse { 236de8fb128Spaul luse CU_ASSERT(status == ut_expected_task_status); 237de8fb128Spaul luse accel_task->cb_fn(accel_task, status); 238d570ad49Spaul luse } 239d570ad49Spaul luse 240d570ad49Spaul luse /* SPDK stubs */ 241de8fb128Spaul luse DEFINE_STUB_V(spdk_accel_module_finish, (void)); 242de8fb128Spaul luse DEFINE_STUB_V(spdk_accel_module_list_add, (struct spdk_accel_module_if *accel_module)); 243d570ad49Spaul luse 244d570ad49Spaul luse /* DPDK stubs */ 245de8fb128Spaul luse DEFINE_STUB(rte_compressdev_capability_get, const struct rte_compressdev_capabilities *, 246de8fb128Spaul luse (uint8_t dev_id, 247de8fb128Spaul luse enum rte_comp_algorithm algo), NULL); 248d570ad49Spaul luse #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1]) 249d570ad49Spaul luse DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params), 250d570ad49Spaul luse DPDK_DYNFIELD_OFFSET); 251d570ad49Spaul luse DEFINE_STUB(rte_socket_id, unsigned, (void), 0); 252d570ad49Spaul luse DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0); 2532828b121SZhangfei Gao DEFINE_STUB(rte_vdev_uninit, int, (const char *name), 0); 2542828b121SZhangfei Gao DEFINE_STUB_V(rte_compressdev_stop, (uint8_t dev_id)); 2552828b121SZhangfei Gao DEFINE_STUB(rte_compressdev_close, int, (uint8_t dev_id), 0); 256d570ad49Spaul luse DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op)); 257d570ad49Spaul luse DEFINE_STUB(rte_comp_op_alloc, struct rte_comp_op *, (struct rte_mempool *mempool), NULL); 258d570ad49Spaul luse 259d570ad49Spaul luse int g_small_size_counter = 0; 260d570ad49Spaul luse int g_small_size_modify = 0; 261d570ad49Spaul luse uint64_t g_small_size = 0; 262d570ad49Spaul luse uint64_t 263d570ad49Spaul luse spdk_vtophys(const void *buf, uint64_t *size) 264d570ad49Spaul luse { 265d570ad49Spaul luse g_small_size_counter++; 266d570ad49Spaul luse if (g_small_size_counter == g_small_size_modify) { 267d570ad49Spaul luse *size = g_small_size; 268d570ad49Spaul luse g_small_size_counter = 0; 269d570ad49Spaul luse g_small_size_modify = 0; 270d570ad49Spaul luse } 271d570ad49Spaul luse return (uint64_t)buf; 272d570ad49Spaul luse } 273d570ad49Spaul luse 274d570ad49Spaul luse static uint16_t ut_rte_compressdev_dequeue_burst = 0; 275d570ad49Spaul luse uint16_t 276d570ad49Spaul luse rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops, 277d570ad49Spaul luse uint16_t nb_op) 278d570ad49Spaul luse { 279d570ad49Spaul luse if (ut_rte_compressdev_dequeue_burst == 0) { 280d570ad49Spaul luse return 0; 281d570ad49Spaul luse } 282d570ad49Spaul luse 283d570ad49Spaul luse ops[0] = &g_comp_op[0]; 284d570ad49Spaul luse ops[1] = &g_comp_op[1]; 285d570ad49Spaul luse 286d570ad49Spaul luse return ut_rte_compressdev_dequeue_burst; 287d570ad49Spaul luse } 288d570ad49Spaul luse 289de8fb128Spaul luse static uint16_t g_done_count = 1; 290d570ad49Spaul luse static void 291de8fb128Spaul luse _compress_done(void *arg, int status) 292d570ad49Spaul luse { 293de8fb128Spaul luse struct spdk_accel_task *task = arg; 294de8fb128Spaul luse 295de8fb128Spaul luse if (status == 0) { 296de8fb128Spaul luse CU_ASSERT(*task->output_size == g_comp_op[g_done_count++].produced); 297d570ad49Spaul luse } 298d570ad49Spaul luse } 299d570ad49Spaul luse 300d570ad49Spaul luse static void 301d570ad49Spaul luse _get_mbuf_array(struct rte_mbuf **mbuf_array, struct rte_mbuf *mbuf_head, 302d570ad49Spaul luse int mbuf_count, bool null_final) 303d570ad49Spaul luse { 304d570ad49Spaul luse int i; 305d570ad49Spaul luse 306d570ad49Spaul luse for (i = 0; i < mbuf_count; i++) { 307d570ad49Spaul luse mbuf_array[i] = mbuf_head; 308d570ad49Spaul luse if (mbuf_head) { 309d570ad49Spaul luse mbuf_head = mbuf_head->next; 310d570ad49Spaul luse } 311d570ad49Spaul luse } 312d570ad49Spaul luse if (null_final) { 313d570ad49Spaul luse mbuf_array[i - 1] = NULL; 314d570ad49Spaul luse } 315d570ad49Spaul luse } 316d570ad49Spaul luse 317d570ad49Spaul luse #define FAKE_ENQUEUE_SUCCESS 255 318d570ad49Spaul luse #define FAKE_ENQUEUE_ERROR 128 319d570ad49Spaul luse #define FAKE_ENQUEUE_BUSY 64 320d570ad49Spaul luse static uint16_t ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; 321d570ad49Spaul luse static struct rte_comp_op ut_expected_op; 322d570ad49Spaul luse uint16_t 323d570ad49Spaul luse rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops, 324d570ad49Spaul luse uint16_t nb_ops) 325d570ad49Spaul luse { 326d570ad49Spaul luse struct rte_comp_op *op = *ops; 327d570ad49Spaul luse struct rte_mbuf *op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 328d570ad49Spaul luse struct rte_mbuf *exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 329d570ad49Spaul luse int i, num_src_mbufs = UT_MBUFS_PER_OP; 330d570ad49Spaul luse 331d570ad49Spaul luse switch (ut_enqueue_value) { 332d570ad49Spaul luse case FAKE_ENQUEUE_BUSY: 333d570ad49Spaul luse op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED; 334d570ad49Spaul luse return 0; 335d570ad49Spaul luse case FAKE_ENQUEUE_SUCCESS: 336d570ad49Spaul luse op->status = RTE_COMP_OP_STATUS_SUCCESS; 337d570ad49Spaul luse return 1; 338d570ad49Spaul luse case FAKE_ENQUEUE_ERROR: 339d570ad49Spaul luse op->status = RTE_COMP_OP_STATUS_ERROR; 340d570ad49Spaul luse return 0; 341d570ad49Spaul luse default: 342d570ad49Spaul luse break; 343d570ad49Spaul luse } 344d570ad49Spaul luse 345d570ad49Spaul luse /* by design the compress module will never send more than 1 op at a time */ 346d570ad49Spaul luse CU_ASSERT(op->private_xform == ut_expected_op.private_xform); 347d570ad49Spaul luse 348d570ad49Spaul luse /* setup our local pointers to the chained mbufs, those pointed to in the 349d570ad49Spaul luse * operation struct and the expected values. 350d570ad49Spaul luse */ 351d570ad49Spaul luse _get_mbuf_array(op_mbuf, op->m_src, SPDK_COUNTOF(op_mbuf), true); 352d570ad49Spaul luse _get_mbuf_array(exp_mbuf, ut_expected_op.m_src, SPDK_COUNTOF(exp_mbuf), true); 353d570ad49Spaul luse 354d570ad49Spaul luse if (ut_boundary_alloc == true) { 355d570ad49Spaul luse /* if we crossed a boundary, we need to check the 4th src mbuf and 356d570ad49Spaul luse * reset the global that is used to identify whether we crossed 357d570ad49Spaul luse * or not 358d570ad49Spaul luse */ 359d570ad49Spaul luse num_src_mbufs = UT_MBUFS_PER_OP_BOUND_TEST; 360d570ad49Spaul luse exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = ut_expected_op.m_src->next->next->next; 361d570ad49Spaul luse op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = op->m_src->next->next->next; 362d570ad49Spaul luse ut_boundary_alloc = false; 363d570ad49Spaul luse } 364d570ad49Spaul luse 365d570ad49Spaul luse for (i = 0; i < num_src_mbufs; i++) { 366d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr); 367d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova); 368d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len); 369d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len); 370d570ad49Spaul luse } 371d570ad49Spaul luse 372d570ad49Spaul luse /* if only 3 mbufs were used in the test, the 4th should be zeroed */ 373d570ad49Spaul luse if (num_src_mbufs == UT_MBUFS_PER_OP) { 374d570ad49Spaul luse CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL); 375d570ad49Spaul luse CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL); 376d570ad49Spaul luse } 377d570ad49Spaul luse CU_ASSERT(*RTE_MBUF_DYNFIELD(op->m_src, g_mbuf_offset, uint64_t *) == 378d570ad49Spaul luse *RTE_MBUF_DYNFIELD(ut_expected_op.m_src, g_mbuf_offset, uint64_t *)); 379d570ad49Spaul luse CU_ASSERT(op->src.offset == ut_expected_op.src.offset); 380d570ad49Spaul luse CU_ASSERT(op->src.length == ut_expected_op.src.length); 381d570ad49Spaul luse 382d570ad49Spaul luse /* check dst mbuf values */ 383d570ad49Spaul luse _get_mbuf_array(op_mbuf, op->m_dst, SPDK_COUNTOF(op_mbuf), true); 384d570ad49Spaul luse _get_mbuf_array(exp_mbuf, ut_expected_op.m_dst, SPDK_COUNTOF(exp_mbuf), true); 385d570ad49Spaul luse 386d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 387d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr); 388d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova); 389d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len); 390d570ad49Spaul luse CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len); 391d570ad49Spaul luse } 392d570ad49Spaul luse CU_ASSERT(op->dst.offset == ut_expected_op.dst.offset); 393d570ad49Spaul luse 394d570ad49Spaul luse return ut_enqueue_value; 395d570ad49Spaul luse } 396d570ad49Spaul luse 397d570ad49Spaul luse /* Global setup for all tests that share a bunch of preparation... */ 398d570ad49Spaul luse static int 399d570ad49Spaul luse test_setup(void) 400d570ad49Spaul luse { 401d570ad49Spaul luse struct spdk_thread *thread; 402d570ad49Spaul luse int i; 403d570ad49Spaul luse 404d570ad49Spaul luse spdk_thread_lib_init(NULL, 0); 405d570ad49Spaul luse 406d570ad49Spaul luse thread = spdk_thread_create(NULL, NULL); 407d570ad49Spaul luse spdk_set_thread(thread); 408d570ad49Spaul luse 409d570ad49Spaul luse g_comp_xform = (struct rte_comp_xform) { 410d570ad49Spaul luse .type = RTE_COMP_COMPRESS, 411d570ad49Spaul luse .compress = { 412d570ad49Spaul luse .algo = RTE_COMP_ALGO_DEFLATE, 413d570ad49Spaul luse .deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT, 414d570ad49Spaul luse .level = RTE_COMP_LEVEL_MAX, 415d570ad49Spaul luse .window_size = DEFAULT_WINDOW_SIZE, 416d570ad49Spaul luse .chksum = RTE_COMP_CHECKSUM_NONE, 417d570ad49Spaul luse .hash_algo = RTE_COMP_HASH_ALGO_NONE 418d570ad49Spaul luse } 419d570ad49Spaul luse }; 420d570ad49Spaul luse 421d570ad49Spaul luse g_decomp_xform = (struct rte_comp_xform) { 422d570ad49Spaul luse .type = RTE_COMP_DECOMPRESS, 423d570ad49Spaul luse .decompress = { 424d570ad49Spaul luse .algo = RTE_COMP_ALGO_DEFLATE, 425d570ad49Spaul luse .chksum = RTE_COMP_CHECKSUM_NONE, 426d570ad49Spaul luse .window_size = DEFAULT_WINDOW_SIZE, 427d570ad49Spaul luse .hash_algo = RTE_COMP_HASH_ALGO_NONE 428d570ad49Spaul luse } 429d570ad49Spaul luse }; 430d570ad49Spaul luse g_device.comp_xform = &g_comp_xform; 431d570ad49Spaul luse g_device.decomp_xform = &g_decomp_xform; 432d570ad49Spaul luse g_cdev_cap.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM; 433de8fb128Spaul luse g_device.cdev_info.driver_name = "compressdev"; 434d570ad49Spaul luse g_device.cdev_info.capabilities = &g_cdev_cap; 435d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) { 436de8fb128Spaul luse g_src_mbufs[i] = spdk_zmalloc(sizeof(struct rte_mbuf), 0x40, NULL, 437de8fb128Spaul luse SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 438d570ad49Spaul luse } 439d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 440de8fb128Spaul luse g_dst_mbufs[i] = spdk_zmalloc(sizeof(struct rte_mbuf), 0x40, NULL, 441de8fb128Spaul luse SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 442d570ad49Spaul luse } 443d570ad49Spaul luse 444de8fb128Spaul luse g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct compress_io_channel)); 445d570ad49Spaul luse g_io_ch->thread = thread; 446de8fb128Spaul luse g_comp_ch = (struct compress_io_channel *)spdk_io_channel_get_ctx(g_io_ch); 447de8fb128Spaul luse g_comp_ch->device_qp = &g_device_qp; 448de8fb128Spaul luse g_comp_ch->device_qp->device = &g_device; 449de8fb128Spaul luse g_device_qp.device->sgl_in = true; 450de8fb128Spaul luse g_device_qp.device->sgl_out = true; 451de8fb128Spaul luse g_comp_ch->src_mbufs = calloc(UT_MBUFS_PER_OP_BOUND_TEST, sizeof(void *)); 452de8fb128Spaul luse g_comp_ch->dst_mbufs = calloc(UT_MBUFS_PER_OP, sizeof(void *)); 453ee020824SAlexey Marchuk STAILQ_INIT(&g_comp_ch->queued_tasks); 454d570ad49Spaul luse 455d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) { 456d570ad49Spaul luse g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1]; 457d570ad49Spaul luse } 458d570ad49Spaul luse g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1].next = NULL; 459d570ad49Spaul luse 460d570ad49Spaul luse /* we only test w/4 mbufs on src side */ 461d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP - 1; i++) { 462d570ad49Spaul luse g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1]; 463d570ad49Spaul luse } 464d570ad49Spaul luse g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL; 465d570ad49Spaul luse g_mbuf_offset = DPDK_DYNFIELD_OFFSET; 466d570ad49Spaul luse 467d570ad49Spaul luse return 0; 468d570ad49Spaul luse } 469d570ad49Spaul luse 470d570ad49Spaul luse /* Global teardown for all tests */ 471d570ad49Spaul luse static int 472d570ad49Spaul luse test_cleanup(void) 473d570ad49Spaul luse { 474d570ad49Spaul luse struct spdk_thread *thread; 475d570ad49Spaul luse int i; 476d570ad49Spaul luse 477d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) { 478de8fb128Spaul luse spdk_free(g_src_mbufs[i]); 479d570ad49Spaul luse } 480d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 481de8fb128Spaul luse spdk_free(g_dst_mbufs[i]); 482d570ad49Spaul luse } 483de8fb128Spaul luse free(g_comp_ch->src_mbufs); 484de8fb128Spaul luse free(g_comp_ch->dst_mbufs); 485d570ad49Spaul luse free(g_io_ch); 486d570ad49Spaul luse 487d570ad49Spaul luse thread = spdk_get_thread(); 488d570ad49Spaul luse spdk_thread_exit(thread); 489d570ad49Spaul luse while (!spdk_thread_is_exited(thread)) { 490d570ad49Spaul luse spdk_thread_poll(thread, 0, 0); 491d570ad49Spaul luse } 492d570ad49Spaul luse spdk_thread_destroy(thread); 493d570ad49Spaul luse 494d570ad49Spaul luse spdk_thread_lib_fini(); 495d570ad49Spaul luse 496d570ad49Spaul luse return 0; 497d570ad49Spaul luse } 498d570ad49Spaul luse 499d570ad49Spaul luse static void 500d570ad49Spaul luse test_compress_operation(void) 501d570ad49Spaul luse { 502d570ad49Spaul luse struct iovec src_iovs[3] = {}; 503d570ad49Spaul luse int src_iovcnt; 504d570ad49Spaul luse struct iovec dst_iovs[3] = {}; 505d570ad49Spaul luse int dst_iovcnt; 506ee020824SAlexey Marchuk struct spdk_accel_task task = {}; 507d570ad49Spaul luse int rc, i; 508d570ad49Spaul luse struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP]; 509d570ad49Spaul luse struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP]; 510de8fb128Spaul luse uint32_t output_size; 511d570ad49Spaul luse 512d570ad49Spaul luse src_iovcnt = dst_iovcnt = 3; 513d570ad49Spaul luse for (i = 0; i < dst_iovcnt; i++) { 514d570ad49Spaul luse src_iovs[i].iov_len = 0x1000; 515d570ad49Spaul luse dst_iovs[i].iov_len = 0x1000; 516d570ad49Spaul luse src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; 517d570ad49Spaul luse dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; 518d570ad49Spaul luse } 519d570ad49Spaul luse 520de8fb128Spaul luse task.cb_fn = _compress_done; 5215105dc5dSKonrad Sztyber task.op_code = SPDK_ACCEL_OPC_COMPRESS; 522de8fb128Spaul luse task.output_size = &output_size; 523de8fb128Spaul luse task.d.iovs = dst_iovs; 524de8fb128Spaul luse task.d.iovcnt = dst_iovcnt; 525de8fb128Spaul luse task.s.iovs = src_iovs; 526de8fb128Spaul luse task.s.iovcnt = src_iovcnt; 527de8fb128Spaul luse 528d570ad49Spaul luse /* test rte_comp_op_alloc failure */ 529d570ad49Spaul luse MOCK_SET(rte_comp_op_alloc, NULL); 530ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 531de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 532d570ad49Spaul luse CU_ASSERT(rc == 0); 533ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false); 534ee020824SAlexey Marchuk while (!STAILQ_EMPTY(&g_comp_ch->queued_tasks)) { 535ee020824SAlexey Marchuk STAILQ_REMOVE_HEAD(&g_comp_ch->queued_tasks, link); 536de8fb128Spaul luse } 537ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 538d570ad49Spaul luse 539d570ad49Spaul luse /* test mempool get failure */ 540de8fb128Spaul luse MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]); 541d570ad49Spaul luse ut_rte_pktmbuf_alloc_bulk = -1; 542ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 543de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 544ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false); 545ee020824SAlexey Marchuk while (!STAILQ_EMPTY(&g_comp_ch->queued_tasks)) { 546ee020824SAlexey Marchuk STAILQ_REMOVE_HEAD(&g_comp_ch->queued_tasks, link); 547d570ad49Spaul luse } 548ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 549d570ad49Spaul luse CU_ASSERT(rc == 0); 550d570ad49Spaul luse ut_rte_pktmbuf_alloc_bulk = 0; 551d570ad49Spaul luse 552d570ad49Spaul luse /* test enqueue failure busy */ 553d570ad49Spaul luse ut_enqueue_value = FAKE_ENQUEUE_BUSY; 554ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 555de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 556ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false); 557ee020824SAlexey Marchuk while (!STAILQ_EMPTY(&g_comp_ch->queued_tasks)) { 558ee020824SAlexey Marchuk STAILQ_REMOVE_HEAD(&g_comp_ch->queued_tasks, link); 559d570ad49Spaul luse } 560ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 561d570ad49Spaul luse CU_ASSERT(rc == 0); 562d570ad49Spaul luse ut_enqueue_value = 1; 563d570ad49Spaul luse 564d570ad49Spaul luse /* test enqueue failure error */ 565d570ad49Spaul luse ut_enqueue_value = FAKE_ENQUEUE_ERROR; 566ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 567de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 568ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 569d570ad49Spaul luse CU_ASSERT(rc == -EINVAL); 570d570ad49Spaul luse ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; 571d570ad49Spaul luse 572d570ad49Spaul luse /* test success with 3 vector iovec */ 573d570ad49Spaul luse ut_expected_op.private_xform = &g_decomp_xform; 574d570ad49Spaul luse ut_expected_op.src.offset = 0; 575d570ad49Spaul luse ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len; 576d570ad49Spaul luse 577d570ad49Spaul luse /* setup the src expected values */ 578d570ad49Spaul luse _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); 579d570ad49Spaul luse ut_expected_op.m_src = exp_src_mbuf[0]; 580d570ad49Spaul luse 581d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 582de8fb128Spaul luse *RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&task; 583d570ad49Spaul luse exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base; 584d570ad49Spaul luse exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len); 585d570ad49Spaul luse exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len; 586d570ad49Spaul luse exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len; 587d570ad49Spaul luse } 588d570ad49Spaul luse 589d570ad49Spaul luse /* setup the dst expected values */ 590d570ad49Spaul luse _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false); 591d570ad49Spaul luse ut_expected_op.dst.offset = 0; 592d570ad49Spaul luse ut_expected_op.m_dst = exp_dst_mbuf[0]; 593d570ad49Spaul luse 594d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 595d570ad49Spaul luse exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base; 596d570ad49Spaul luse exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len); 597d570ad49Spaul luse exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len; 598d570ad49Spaul luse exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len; 599d570ad49Spaul luse } 600d570ad49Spaul luse 601de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 602ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 603d570ad49Spaul luse CU_ASSERT(rc == 0); 604d570ad49Spaul luse 605d570ad49Spaul luse /* test sgl out failure */ 606de8fb128Spaul luse g_device.sgl_out = false; 607ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 608de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 609d570ad49Spaul luse CU_ASSERT(rc == -EINVAL); 610ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 611de8fb128Spaul luse g_device.sgl_out = true; 612d570ad49Spaul luse 613d570ad49Spaul luse /* test sgl in failure */ 614de8fb128Spaul luse g_device.sgl_in = false; 615ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 616de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 617d570ad49Spaul luse CU_ASSERT(rc == -EINVAL); 618ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 619de8fb128Spaul luse g_device.sgl_in = true; 620d570ad49Spaul luse } 621d570ad49Spaul luse 622d570ad49Spaul luse static void 623d570ad49Spaul luse test_compress_operation_cross_boundary(void) 624d570ad49Spaul luse { 625d570ad49Spaul luse struct iovec src_iovs[3] = {}; 626d570ad49Spaul luse int src_iovcnt; 627d570ad49Spaul luse struct iovec dst_iovs[3] = {}; 628d570ad49Spaul luse int dst_iovcnt; 629d570ad49Spaul luse int rc, i; 630d570ad49Spaul luse struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 631d570ad49Spaul luse struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 632de8fb128Spaul luse struct spdk_accel_task task = {}; 633de8fb128Spaul luse uint32_t output_size; 634d570ad49Spaul luse 635d570ad49Spaul luse /* Setup the same basic 3 IOV test as used in the simple success case 636d570ad49Spaul luse * but then we'll start testing a vtophy boundary crossing at each 637d570ad49Spaul luse * position. 638d570ad49Spaul luse */ 639d570ad49Spaul luse src_iovcnt = dst_iovcnt = 3; 640d570ad49Spaul luse for (i = 0; i < dst_iovcnt; i++) { 641d570ad49Spaul luse src_iovs[i].iov_len = 0x1000; 642d570ad49Spaul luse dst_iovs[i].iov_len = 0x1000; 643d570ad49Spaul luse src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; 644d570ad49Spaul luse dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; 645d570ad49Spaul luse } 646d570ad49Spaul luse 647d570ad49Spaul luse ut_expected_op.private_xform = &g_decomp_xform; 648d570ad49Spaul luse ut_expected_op.src.offset = 0; 649d570ad49Spaul luse ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len; 650d570ad49Spaul luse 651d570ad49Spaul luse /* setup the src expected values */ 652d570ad49Spaul luse _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); 653d570ad49Spaul luse ut_expected_op.m_src = exp_src_mbuf[0]; 654d570ad49Spaul luse 655d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 656de8fb128Spaul luse *RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&task; 657d570ad49Spaul luse exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base; 658d570ad49Spaul luse exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len); 659d570ad49Spaul luse exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len; 660d570ad49Spaul luse exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len; 661d570ad49Spaul luse } 662d570ad49Spaul luse 663d570ad49Spaul luse /* setup the dst expected values, we don't test needing a 4th dst mbuf */ 664d570ad49Spaul luse _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false); 665d570ad49Spaul luse ut_expected_op.dst.offset = 0; 666d570ad49Spaul luse ut_expected_op.m_dst = exp_dst_mbuf[0]; 667d570ad49Spaul luse 668d570ad49Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 669d570ad49Spaul luse exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base; 670d570ad49Spaul luse exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len); 671d570ad49Spaul luse exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len; 672d570ad49Spaul luse exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len; 673d570ad49Spaul luse } 674d570ad49Spaul luse 675d570ad49Spaul luse /* force the 1st IOV to get partial length from spdk_vtophys */ 676d570ad49Spaul luse g_small_size_counter = 0; 677d570ad49Spaul luse g_small_size_modify = 1; 678d570ad49Spaul luse g_small_size = 0x800; 679de8fb128Spaul luse *RTE_MBUF_DYNFIELD(exp_src_mbuf[3], g_mbuf_offset, uint64_t *) = (uint64_t)&task; 680d570ad49Spaul luse 681d570ad49Spaul luse /* first only has shorter length */ 682d570ad49Spaul luse exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800; 683d570ad49Spaul luse 684d570ad49Spaul luse /* 2nd was inserted by the boundary crossing condition and finishes off 685d570ad49Spaul luse * the length from the first */ 686d570ad49Spaul luse exp_src_mbuf[1]->buf_addr = (void *)0x10000800; 687d570ad49Spaul luse exp_src_mbuf[1]->buf_iova = 0x10000800; 688d570ad49Spaul luse exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800; 689d570ad49Spaul luse 690d570ad49Spaul luse /* 3rd looks like that the 2nd would have */ 691d570ad49Spaul luse exp_src_mbuf[2]->buf_addr = (void *)0x10001000; 692d570ad49Spaul luse exp_src_mbuf[2]->buf_iova = 0x10001000; 693d570ad49Spaul luse exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x1000; 694d570ad49Spaul luse 695d570ad49Spaul luse /* a new 4th looks like what the 3rd would have */ 696d570ad49Spaul luse exp_src_mbuf[3]->buf_addr = (void *)0x10002000; 697d570ad49Spaul luse exp_src_mbuf[3]->buf_iova = 0x10002000; 698d570ad49Spaul luse exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000; 699d570ad49Spaul luse 700de8fb128Spaul luse task.cb_fn = _compress_done; 7015105dc5dSKonrad Sztyber task.op_code = SPDK_ACCEL_OPC_COMPRESS; 702de8fb128Spaul luse task.output_size = &output_size; 703de8fb128Spaul luse task.d.iovs = dst_iovs; 704de8fb128Spaul luse task.d.iovcnt = dst_iovcnt; 705de8fb128Spaul luse task.s.iovs = src_iovs; 706de8fb128Spaul luse task.s.iovcnt = src_iovcnt; 707de8fb128Spaul luse 708de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 709ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 710d570ad49Spaul luse CU_ASSERT(rc == 0); 711d570ad49Spaul luse 712d570ad49Spaul luse /* Now force the 2nd IOV to get partial length from spdk_vtophys */ 713d570ad49Spaul luse g_small_size_counter = 0; 714d570ad49Spaul luse g_small_size_modify = 2; 715d570ad49Spaul luse g_small_size = 0x800; 716d570ad49Spaul luse 717d570ad49Spaul luse /* first is normal */ 718d570ad49Spaul luse exp_src_mbuf[0]->buf_addr = (void *)0x10000000; 719d570ad49Spaul luse exp_src_mbuf[0]->buf_iova = 0x10000000; 720d570ad49Spaul luse exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000; 721d570ad49Spaul luse 722d570ad49Spaul luse /* second only has shorter length */ 723d570ad49Spaul luse exp_src_mbuf[1]->buf_addr = (void *)0x10001000; 724d570ad49Spaul luse exp_src_mbuf[1]->buf_iova = 0x10001000; 725d570ad49Spaul luse exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800; 726d570ad49Spaul luse 727d570ad49Spaul luse /* 3rd was inserted by the boundary crossing condition and finishes off 728d570ad49Spaul luse * the length from the first */ 729d570ad49Spaul luse exp_src_mbuf[2]->buf_addr = (void *)0x10001800; 730d570ad49Spaul luse exp_src_mbuf[2]->buf_iova = 0x10001800; 731d570ad49Spaul luse exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800; 732d570ad49Spaul luse 733d570ad49Spaul luse /* a new 4th looks like what the 3rd would have */ 734d570ad49Spaul luse exp_src_mbuf[3]->buf_addr = (void *)0x10002000; 735d570ad49Spaul luse exp_src_mbuf[3]->buf_iova = 0x10002000; 736d570ad49Spaul luse exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000; 737d570ad49Spaul luse 738de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 739ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 740d570ad49Spaul luse CU_ASSERT(rc == 0); 741d570ad49Spaul luse 742d570ad49Spaul luse /* Finally force the 3rd IOV to get partial length from spdk_vtophys */ 743d570ad49Spaul luse g_small_size_counter = 0; 744d570ad49Spaul luse g_small_size_modify = 3; 745d570ad49Spaul luse g_small_size = 0x800; 746d570ad49Spaul luse 747d570ad49Spaul luse /* first is normal */ 748d570ad49Spaul luse exp_src_mbuf[0]->buf_addr = (void *)0x10000000; 749d570ad49Spaul luse exp_src_mbuf[0]->buf_iova = 0x10000000; 750d570ad49Spaul luse exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000; 751d570ad49Spaul luse 752d570ad49Spaul luse /* second is normal */ 753d570ad49Spaul luse exp_src_mbuf[1]->buf_addr = (void *)0x10001000; 754d570ad49Spaul luse exp_src_mbuf[1]->buf_iova = 0x10001000; 755d570ad49Spaul luse exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x1000; 756d570ad49Spaul luse 757d570ad49Spaul luse /* 3rd has shorter length */ 758d570ad49Spaul luse exp_src_mbuf[2]->buf_addr = (void *)0x10002000; 759d570ad49Spaul luse exp_src_mbuf[2]->buf_iova = 0x10002000; 760d570ad49Spaul luse exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800; 761d570ad49Spaul luse 762d570ad49Spaul luse /* a new 4th handles the remainder from the 3rd */ 763d570ad49Spaul luse exp_src_mbuf[3]->buf_addr = (void *)0x10002800; 764d570ad49Spaul luse exp_src_mbuf[3]->buf_iova = 0x10002800; 765d570ad49Spaul luse exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x800; 766d570ad49Spaul luse 767de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 768ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 769d570ad49Spaul luse CU_ASSERT(rc == 0); 770d570ad49Spaul luse 771d570ad49Spaul luse /* Single input iov is split on page boundary, sgl_in is not supported */ 772de8fb128Spaul luse g_device.sgl_in = false; 773d570ad49Spaul luse g_small_size_counter = 0; 774d570ad49Spaul luse g_small_size_modify = 1; 775d570ad49Spaul luse g_small_size = 0x800; 776de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 777d570ad49Spaul luse CU_ASSERT(rc == -EINVAL); 778de8fb128Spaul luse g_device.sgl_in = true; 779d570ad49Spaul luse 780d570ad49Spaul luse /* Single output iov is split on page boundary, sgl_out is not supported */ 781de8fb128Spaul luse g_device.sgl_out = false; 782d570ad49Spaul luse g_small_size_counter = 0; 783d570ad49Spaul luse g_small_size_modify = 2; 784d570ad49Spaul luse g_small_size = 0x800; 785de8fb128Spaul luse rc = _compress_operation(g_comp_ch, &task); 786d570ad49Spaul luse CU_ASSERT(rc == -EINVAL); 787de8fb128Spaul luse g_device.sgl_out = true; 788d570ad49Spaul luse } 789d570ad49Spaul luse 790d570ad49Spaul luse static void 7910485fb47Spaul luse test_setup_compress_mbuf(void) 7920485fb47Spaul luse { 7930485fb47Spaul luse struct iovec src_iovs = {}; 7940485fb47Spaul luse int src_iovcnt = 1; 7950485fb47Spaul luse struct spdk_accel_task task = {}; 7960485fb47Spaul luse int src_mbuf_added = 0; 7970485fb47Spaul luse uint64_t total_length; 7980485fb47Spaul luse struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 7990485fb47Spaul luse int rc, i; 8000485fb47Spaul luse 8010485fb47Spaul luse /* setup the src expected values */ 8020485fb47Spaul luse _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); 8030485fb47Spaul luse 8040485fb47Spaul luse /* no splitting */ 8050485fb47Spaul luse total_length = 0; 8060485fb47Spaul luse ut_total_rte_pktmbuf_attach_extbuf = 0; 8070485fb47Spaul luse src_iovs.iov_len = 0x1000; 8080485fb47Spaul luse src_iovs.iov_base = (void *)0x10000000 + 0x1000; 8090485fb47Spaul luse rc = _setup_compress_mbuf(exp_src_mbuf, &src_mbuf_added, &total_length, 8100485fb47Spaul luse &src_iovs, src_iovcnt, &task); 8110485fb47Spaul luse CU_ASSERT(rc == 0); 812*75a12cbfSSlawomir Ptak CU_ASSERT(total_length == src_iovs.iov_len); 8130485fb47Spaul luse CU_ASSERT(src_mbuf_added == 0); 8140485fb47Spaul luse CU_ASSERT(ut_total_rte_pktmbuf_attach_extbuf == 1); 8150485fb47Spaul luse 8160485fb47Spaul luse /* one split, for splitting tests we need the global mbuf array unlinked, 8170485fb47Spaul luse * otherwise the functional code will attempt to link them but if they are 8180485fb47Spaul luse * already linked, it will just create a chain that links to itself */ 8190485fb47Spaul luse for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) { 8200485fb47Spaul luse g_expected_src_mbufs[i].next = NULL; 8210485fb47Spaul luse } 8220485fb47Spaul luse total_length = 0; 8230485fb47Spaul luse ut_total_rte_pktmbuf_attach_extbuf = 0; 8240485fb47Spaul luse src_iovs.iov_len = 0x1000 + MBUF_SPLIT; 8250485fb47Spaul luse exp_src_mbuf[0]->buf_len = src_iovs.iov_len; 8260485fb47Spaul luse exp_src_mbuf[0]->pkt_len = src_iovs.iov_len; 8270485fb47Spaul luse rc = _setup_compress_mbuf(exp_src_mbuf, &src_mbuf_added, &total_length, 8280485fb47Spaul luse &src_iovs, src_iovcnt, &task); 8290485fb47Spaul luse CU_ASSERT(rc == 0); 830*75a12cbfSSlawomir Ptak CU_ASSERT(total_length == src_iovs.iov_len); 8310485fb47Spaul luse CU_ASSERT(src_mbuf_added == 0); 8320485fb47Spaul luse CU_ASSERT(ut_total_rte_pktmbuf_attach_extbuf == 2); 8330485fb47Spaul luse 8340485fb47Spaul luse /* two splits */ 8350485fb47Spaul luse for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) { 8360485fb47Spaul luse g_expected_src_mbufs[i].next = NULL; 8370485fb47Spaul luse } 8380485fb47Spaul luse total_length = 0; 8390485fb47Spaul luse ut_total_rte_pktmbuf_attach_extbuf = 0; 8400485fb47Spaul luse src_iovs.iov_len = 0x1000 + 2 * MBUF_SPLIT; 8410485fb47Spaul luse exp_src_mbuf[0]->buf_len = src_iovs.iov_len; 8420485fb47Spaul luse exp_src_mbuf[0]->pkt_len = src_iovs.iov_len; 8430485fb47Spaul luse 8440485fb47Spaul luse rc = _setup_compress_mbuf(exp_src_mbuf, &src_mbuf_added, &total_length, 8450485fb47Spaul luse &src_iovs, src_iovcnt, &task); 8460485fb47Spaul luse CU_ASSERT(rc == 0); 847*75a12cbfSSlawomir Ptak CU_ASSERT(total_length == src_iovs.iov_len); 8480485fb47Spaul luse CU_ASSERT(src_mbuf_added == 0); 8490485fb47Spaul luse CU_ASSERT(ut_total_rte_pktmbuf_attach_extbuf == 3); 8500485fb47Spaul luse 8510485fb47Spaul luse /* relink the global mbuf array */ 8520485fb47Spaul luse for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) { 8530485fb47Spaul luse g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1]; 8540485fb47Spaul luse } 8550485fb47Spaul luse } 8560485fb47Spaul luse 8570485fb47Spaul luse static void 858d570ad49Spaul luse test_poller(void) 859d570ad49Spaul luse { 860d570ad49Spaul luse int rc; 861de8fb128Spaul luse struct compress_io_channel *args; 862d570ad49Spaul luse struct rte_mbuf mbuf[4]; /* one src, one dst, 2 ops */ 863d570ad49Spaul luse struct iovec src_iovs[3] = {}; 864d570ad49Spaul luse struct iovec dst_iovs[3] = {}; 865de8fb128Spaul luse uint32_t output_size[2]; 866de8fb128Spaul luse struct spdk_accel_task task[2] = {}; 867de8fb128Spaul luse struct spdk_accel_task *task_to_resubmit; 868de8fb128Spaul luse struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP]; 869de8fb128Spaul luse struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP]; 870d570ad49Spaul luse int i; 871d570ad49Spaul luse 872de8fb128Spaul luse args = calloc(1, sizeof(*args)); 873de8fb128Spaul luse SPDK_CU_ASSERT_FATAL(args != NULL); 874d570ad49Spaul luse memset(&g_comp_op[0], 0, sizeof(struct rte_comp_op)); 875d570ad49Spaul luse g_comp_op[0].m_src = &mbuf[0]; 876d570ad49Spaul luse g_comp_op[1].m_src = &mbuf[1]; 877d570ad49Spaul luse g_comp_op[0].m_dst = &mbuf[2]; 878d570ad49Spaul luse g_comp_op[1].m_dst = &mbuf[3]; 879d570ad49Spaul luse for (i = 0; i < 3; i++) { 880d570ad49Spaul luse src_iovs[i].iov_len = 0x1000; 881d570ad49Spaul luse dst_iovs[i].iov_len = 0x1000; 882d570ad49Spaul luse src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; 883d570ad49Spaul luse dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; 884d570ad49Spaul luse } 885de8fb128Spaul luse task[0].cb_fn = task[1].cb_fn = _compress_done; 886de8fb128Spaul luse task[0].output_size = &output_size[0]; 887de8fb128Spaul luse task[1].output_size = &output_size[1]; 888d570ad49Spaul luse 889d570ad49Spaul luse /* Error from dequeue, nothing needing to be resubmitted. 890d570ad49Spaul luse */ 891d570ad49Spaul luse ut_rte_compressdev_dequeue_burst = 1; 89272c6ad5fSKonrad Sztyber ut_expected_task_status = -EIO; 893d570ad49Spaul luse /* setup what we want dequeue to return for the op */ 894de8fb128Spaul luse *RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[0]; 895d570ad49Spaul luse g_comp_op[0].produced = 1; 896de8fb128Spaul luse g_done_count = 0; 897de8fb128Spaul luse g_comp_op[0].status = RTE_COMP_OP_STATUS_NOT_PROCESSED; 898ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 899de8fb128Spaul luse rc = comp_dev_poller((void *)g_comp_ch); 900ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 901d570ad49Spaul luse CU_ASSERT(rc == SPDK_POLLER_BUSY); 90272c6ad5fSKonrad Sztyber ut_expected_task_status = 0; 903d570ad49Spaul luse 904d570ad49Spaul luse /* Success from dequeue, 2 ops. nothing needing to be resubmitted. 905d570ad49Spaul luse */ 906d570ad49Spaul luse ut_rte_compressdev_dequeue_burst = 2; 907d570ad49Spaul luse /* setup what we want dequeue to return for the op */ 908de8fb128Spaul luse *RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[0]; 909d570ad49Spaul luse g_comp_op[0].produced = 16; 910de8fb128Spaul luse g_comp_op[0].status = RTE_COMP_OP_STATUS_SUCCESS; 911de8fb128Spaul luse *RTE_MBUF_DYNFIELD(g_comp_op[1].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[1]; 912d570ad49Spaul luse g_comp_op[1].produced = 32; 913de8fb128Spaul luse g_comp_op[1].status = RTE_COMP_OP_STATUS_SUCCESS; 914de8fb128Spaul luse g_done_count = 0; 915de8fb128Spaul luse ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; 916ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 917de8fb128Spaul luse rc = comp_dev_poller((void *)g_comp_ch); 918ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 919d570ad49Spaul luse CU_ASSERT(rc == SPDK_POLLER_BUSY); 920d570ad49Spaul luse 921de8fb128Spaul luse /* One to dequeue, one op to be resubmitted. */ 922d570ad49Spaul luse ut_rte_compressdev_dequeue_burst = 1; 923d570ad49Spaul luse /* setup what we want dequeue to return for the op */ 924de8fb128Spaul luse *RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)&task[0]; 925d570ad49Spaul luse g_comp_op[0].produced = 16; 926d570ad49Spaul luse g_comp_op[0].status = 0; 927de8fb128Spaul luse g_done_count = 0; 928de8fb128Spaul luse task_to_resubmit = calloc(1, sizeof(struct spdk_accel_task)); 929de8fb128Spaul luse SPDK_CU_ASSERT_FATAL(task_to_resubmit != NULL); 930de8fb128Spaul luse task_to_resubmit->s.iovs = &src_iovs[0]; 931de8fb128Spaul luse task_to_resubmit->s.iovcnt = 3; 932de8fb128Spaul luse task_to_resubmit->d.iovs = &dst_iovs[0]; 933de8fb128Spaul luse task_to_resubmit->d.iovcnt = 3; 9345105dc5dSKonrad Sztyber task_to_resubmit->op_code = SPDK_ACCEL_OPC_COMPRESS; 935de8fb128Spaul luse task_to_resubmit->cb_arg = args; 936d570ad49Spaul luse ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; 937de8fb128Spaul luse ut_expected_op.private_xform = &g_decomp_xform; 938de8fb128Spaul luse ut_expected_op.src.offset = 0; 939de8fb128Spaul luse ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len; 940de8fb128Spaul luse 941de8fb128Spaul luse /* setup the src expected values */ 942de8fb128Spaul luse _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); 943de8fb128Spaul luse ut_expected_op.m_src = exp_src_mbuf[0]; 944de8fb128Spaul luse 945de8fb128Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 946de8fb128Spaul luse *RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&task[0]; 947de8fb128Spaul luse exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base; 948de8fb128Spaul luse exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len); 949de8fb128Spaul luse exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len; 950de8fb128Spaul luse exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len; 951de8fb128Spaul luse } 952de8fb128Spaul luse 953de8fb128Spaul luse /* setup the dst expected values */ 954de8fb128Spaul luse _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false); 955de8fb128Spaul luse ut_expected_op.dst.offset = 0; 956de8fb128Spaul luse ut_expected_op.m_dst = exp_dst_mbuf[0]; 957de8fb128Spaul luse 958de8fb128Spaul luse for (i = 0; i < UT_MBUFS_PER_OP; i++) { 959de8fb128Spaul luse exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base; 960de8fb128Spaul luse exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len); 961de8fb128Spaul luse exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len; 962de8fb128Spaul luse exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len; 963de8fb128Spaul luse } 964de8fb128Spaul luse MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]); 965ee020824SAlexey Marchuk STAILQ_INSERT_TAIL(&g_comp_ch->queued_tasks, 966de8fb128Spaul luse task_to_resubmit, 967d570ad49Spaul luse link); 968ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == false); 969de8fb128Spaul luse rc = comp_dev_poller((void *)g_comp_ch); 970ee020824SAlexey Marchuk CU_ASSERT(STAILQ_EMPTY(&g_comp_ch->queued_tasks) == true); 971d570ad49Spaul luse CU_ASSERT(rc == SPDK_POLLER_BUSY); 972d570ad49Spaul luse 973de8fb128Spaul luse free(task_to_resubmit); 974de8fb128Spaul luse free(args); 975d570ad49Spaul luse } 976d570ad49Spaul luse 977d570ad49Spaul luse static void 978d570ad49Spaul luse test_initdrivers(void) 979d570ad49Spaul luse { 980d570ad49Spaul luse int rc; 981d570ad49Spaul luse 982d570ad49Spaul luse /* compressdev count 0 */ 983de8fb128Spaul luse rc = accel_init_compress_drivers(); 9841f88c365STomasz Zawadzki CU_ASSERT(rc == -ENODEV); 985d570ad49Spaul luse 986d570ad49Spaul luse /* bogus count */ 987d570ad49Spaul luse ut_rte_compressdev_count = RTE_COMPRESS_MAX_DEVS + 1; 988de8fb128Spaul luse rc = accel_init_compress_drivers(); 989d570ad49Spaul luse CU_ASSERT(rc == -EINVAL); 990d570ad49Spaul luse 991de8fb128Spaul luse /* failure with rte_mbuf_dynfield_register */ 992d570ad49Spaul luse ut_rte_compressdev_count = 1; 993de8fb128Spaul luse MOCK_SET(rte_mbuf_dynfield_register, -1); 994de8fb128Spaul luse rc = accel_init_compress_drivers(); 995de8fb128Spaul luse CU_ASSERT(rc == -EINVAL); 996de8fb128Spaul luse MOCK_SET(rte_mbuf_dynfield_register, DPDK_DYNFIELD_OFFSET); 997d570ad49Spaul luse 998d570ad49Spaul luse /* error on create_compress_dev() */ 999de8fb128Spaul luse ut_rte_comp_op_pool_create = (struct rte_mempool *)0xDEADBEEF; 1000de8fb128Spaul luse ut_rte_compressdev_count = 1; 1001d570ad49Spaul luse ut_rte_compressdev_configure = -1; 1002de8fb128Spaul luse rc = accel_init_compress_drivers(); 1003d570ad49Spaul luse CU_ASSERT(rc == -1); 1004d570ad49Spaul luse 1005d570ad49Spaul luse /* error on create_compress_dev() but coverage for large num queues */ 1006d570ad49Spaul luse ut_max_nb_queue_pairs = 99; 1007de8fb128Spaul luse rc = accel_init_compress_drivers(); 1008d570ad49Spaul luse CU_ASSERT(rc == -1); 1009d570ad49Spaul luse 1010d570ad49Spaul luse /* qpair setup fails */ 1011d570ad49Spaul luse ut_rte_compressdev_configure = 0; 1012d570ad49Spaul luse ut_max_nb_queue_pairs = 0; 1013d570ad49Spaul luse ut_rte_compressdev_queue_pair_setup = -1; 1014de8fb128Spaul luse rc = accel_init_compress_drivers(); 1015d570ad49Spaul luse CU_ASSERT(rc == -EINVAL); 1016d570ad49Spaul luse 1017d570ad49Spaul luse /* rte_compressdev_start fails */ 1018d570ad49Spaul luse ut_rte_compressdev_queue_pair_setup = 0; 1019d570ad49Spaul luse ut_rte_compressdev_start = -1; 1020de8fb128Spaul luse rc = accel_init_compress_drivers(); 1021d570ad49Spaul luse CU_ASSERT(rc == -1); 1022d570ad49Spaul luse 1023d570ad49Spaul luse /* rte_compressdev_private_xform_create() fails */ 1024d570ad49Spaul luse ut_rte_compressdev_start = 0; 1025d570ad49Spaul luse ut_rte_compressdev_private_xform_create = -2; 1026de8fb128Spaul luse rc = accel_init_compress_drivers(); 1027d570ad49Spaul luse CU_ASSERT(rc == -2); 1028d570ad49Spaul luse 1029d570ad49Spaul luse /* success */ 1030d570ad49Spaul luse ut_rte_compressdev_private_xform_create = 0; 1031de8fb128Spaul luse rc = accel_init_compress_drivers(); 1032d570ad49Spaul luse CU_ASSERT(rc == 0); 1033d570ad49Spaul luse } 1034d570ad49Spaul luse 1035d570ad49Spaul luse int 1036d570ad49Spaul luse main(int argc, char **argv) 1037d570ad49Spaul luse { 1038d570ad49Spaul luse CU_pSuite suite = NULL; 1039d570ad49Spaul luse unsigned int num_failures; 1040d570ad49Spaul luse 1041d570ad49Spaul luse CU_initialize_registry(); 1042d570ad49Spaul luse 1043d570ad49Spaul luse suite = CU_add_suite("compress", test_setup, test_cleanup); 1044d570ad49Spaul luse CU_ADD_TEST(suite, test_compress_operation); 1045d570ad49Spaul luse CU_ADD_TEST(suite, test_compress_operation_cross_boundary); 10460485fb47Spaul luse CU_ADD_TEST(suite, test_setup_compress_mbuf); 1047d570ad49Spaul luse CU_ADD_TEST(suite, test_initdrivers); 1048d570ad49Spaul luse CU_ADD_TEST(suite, test_poller); 1049d570ad49Spaul luse 1050ea941caeSKonrad Sztyber num_failures = spdk_ut_run_tests(argc, argv, NULL); 1051d570ad49Spaul luse CU_cleanup_registry(); 1052d570ad49Spaul luse return num_failures; 1053d570ad49Spaul luse } 1054