1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk_cunit.h" 36 /* We have our own mock for this */ 37 #define UNIT_TEST_NO_VTOPHYS 38 #include "common/lib/test_env.c" 39 #include "spdk_internal/mock.h" 40 #include "thread/thread_internal.h" 41 #include "unit/lib/json_mock.c" 42 #include "spdk/reduce.h" 43 44 #include <rte_compressdev.h> 45 46 /* There will be one if the data perfectly matches the chunk size, 47 * or there could be an offset into the data and a remainder after 48 * the data or both for a max of 3. 49 */ 50 #define UT_MBUFS_PER_OP 3 51 /* For testing the crossing of a huge page boundary on address translation, 52 * we'll have an extra one but we only test on the source side. 53 */ 54 #define UT_MBUFS_PER_OP_BOUND_TEST 4 55 56 struct spdk_bdev_io *g_bdev_io; 57 struct spdk_io_channel *g_io_ch; 58 struct rte_comp_op g_comp_op[2]; 59 struct vbdev_compress g_comp_bdev; 60 struct comp_device_qp g_device_qp; 61 struct compress_dev g_device; 62 struct rte_compressdev_capabilities g_cdev_cap; 63 static struct rte_mbuf *g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST]; 64 static struct rte_mbuf *g_dst_mbufs[UT_MBUFS_PER_OP]; 65 static struct rte_mbuf g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST]; 66 static struct rte_mbuf g_expected_dst_mbufs[UT_MBUFS_PER_OP]; 67 struct comp_bdev_io *g_io_ctx; 68 struct comp_io_channel *g_comp_ch; 69 70 /* Those functions are defined as static inline in DPDK, so we can't 71 * mock them straight away. We use defines to redirect them into 72 * our custom functions. 73 */ 74 75 static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, 76 uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo); 77 #define rte_pktmbuf_attach_extbuf mock_rte_pktmbuf_attach_extbuf 78 static void mock_rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, 79 uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo) 80 { 81 assert(m != NULL); 82 m->buf_addr = buf_addr; 83 m->buf_iova = buf_iova; 84 m->buf_len = buf_len; 85 m->data_len = m->pkt_len = 0; 86 } 87 88 static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len); 89 #define rte_pktmbuf_append mock_rte_pktmbuf_append 90 static char *mock_rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len) 91 { 92 m->pkt_len = m->pkt_len + len; 93 return NULL; 94 } 95 96 static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail); 97 #define rte_pktmbuf_chain mock_rte_pktmbuf_chain 98 static inline int mock_rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail) 99 { 100 struct rte_mbuf *cur_tail; 101 102 cur_tail = rte_pktmbuf_lastseg(head); 103 cur_tail->next = tail; 104 105 return 0; 106 } 107 108 uint16_t ut_max_nb_queue_pairs = 0; 109 void __rte_experimental mock_rte_compressdev_info_get(uint8_t dev_id, 110 struct rte_compressdev_info *dev_info); 111 #define rte_compressdev_info_get mock_rte_compressdev_info_get 112 void __rte_experimental 113 mock_rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info) 114 { 115 dev_info->max_nb_queue_pairs = ut_max_nb_queue_pairs; 116 dev_info->capabilities = &g_cdev_cap; 117 dev_info->driver_name = "compress_isal"; 118 } 119 120 int ut_rte_compressdev_configure = 0; 121 int __rte_experimental mock_rte_compressdev_configure(uint8_t dev_id, 122 struct rte_compressdev_config *config); 123 #define rte_compressdev_configure mock_rte_compressdev_configure 124 int __rte_experimental 125 mock_rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config) 126 { 127 return ut_rte_compressdev_configure; 128 } 129 130 int ut_rte_compressdev_queue_pair_setup = 0; 131 int __rte_experimental mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 132 uint32_t max_inflight_ops, int socket_id); 133 #define rte_compressdev_queue_pair_setup mock_rte_compressdev_queue_pair_setup 134 int __rte_experimental 135 mock_rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 136 uint32_t max_inflight_ops, int socket_id) 137 { 138 return ut_rte_compressdev_queue_pair_setup; 139 } 140 141 int ut_rte_compressdev_start = 0; 142 int __rte_experimental mock_rte_compressdev_start(uint8_t dev_id); 143 #define rte_compressdev_start mock_rte_compressdev_start 144 int __rte_experimental 145 mock_rte_compressdev_start(uint8_t dev_id) 146 { 147 return ut_rte_compressdev_start; 148 } 149 150 int ut_rte_compressdev_private_xform_create = 0; 151 int __rte_experimental mock_rte_compressdev_private_xform_create(uint8_t dev_id, 152 const struct rte_comp_xform *xform, void **private_xform); 153 #define rte_compressdev_private_xform_create mock_rte_compressdev_private_xform_create 154 int __rte_experimental 155 mock_rte_compressdev_private_xform_create(uint8_t dev_id, 156 const struct rte_comp_xform *xform, void **private_xform) 157 { 158 return ut_rte_compressdev_private_xform_create; 159 } 160 161 uint8_t ut_rte_compressdev_count = 0; 162 uint8_t __rte_experimental mock_rte_compressdev_count(void); 163 #define rte_compressdev_count mock_rte_compressdev_count 164 uint8_t __rte_experimental 165 mock_rte_compressdev_count(void) 166 { 167 return ut_rte_compressdev_count; 168 } 169 170 struct rte_mempool *ut_rte_comp_op_pool_create = NULL; 171 struct rte_mempool *__rte_experimental mock_rte_comp_op_pool_create(const char *name, 172 unsigned int nb_elts, unsigned int cache_size, uint16_t user_size, 173 int socket_id); 174 #define rte_comp_op_pool_create mock_rte_comp_op_pool_create 175 struct rte_mempool *__rte_experimental 176 mock_rte_comp_op_pool_create(const char *name, unsigned int nb_elts, 177 unsigned int cache_size, uint16_t user_size, int socket_id) 178 { 179 return ut_rte_comp_op_pool_create; 180 } 181 182 void mock_rte_pktmbuf_free(struct rte_mbuf *m); 183 #define rte_pktmbuf_free mock_rte_pktmbuf_free 184 void mock_rte_pktmbuf_free(struct rte_mbuf *m) 185 { 186 } 187 188 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt); 189 #define rte_pktmbuf_free_bulk mock_rte_pktmbuf_free_bulk 190 void mock_rte_pktmbuf_free_bulk(struct rte_mbuf **m, unsigned int cnt) 191 { 192 } 193 194 static bool ut_boundary_alloc = false; 195 static int ut_rte_pktmbuf_alloc_bulk = 0; 196 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, 197 unsigned count); 198 #define rte_pktmbuf_alloc_bulk mock_rte_pktmbuf_alloc_bulk 199 int mock_rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, 200 unsigned count) 201 { 202 int i; 203 204 /* This mocked function only supports the alloc of up to 3 src and 3 dst. */ 205 ut_rte_pktmbuf_alloc_bulk += count; 206 207 if (ut_rte_pktmbuf_alloc_bulk == 1) { 208 /* allocation of an extra mbuf for boundary cross test */ 209 ut_boundary_alloc = true; 210 g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]->next = NULL; 211 *mbufs = g_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1]; 212 ut_rte_pktmbuf_alloc_bulk = 0; 213 } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP) { 214 /* first test allocation, src mbufs */ 215 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 216 g_src_mbufs[i]->next = NULL; 217 *mbufs++ = g_src_mbufs[i]; 218 } 219 } else if (ut_rte_pktmbuf_alloc_bulk == UT_MBUFS_PER_OP * 2) { 220 /* second test allocation, dst mbufs */ 221 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 222 g_dst_mbufs[i]->next = NULL; 223 *mbufs++ = g_dst_mbufs[i]; 224 } 225 ut_rte_pktmbuf_alloc_bulk = 0; 226 } else { 227 return -1; 228 } 229 return 0; 230 } 231 232 struct rte_mempool * 233 rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, 234 uint16_t priv_size, uint16_t data_room_size, int socket_id) 235 { 236 struct spdk_mempool *tmp; 237 238 tmp = spdk_mempool_create("mbuf_mp", 1024, sizeof(struct rte_mbuf), 239 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 240 SPDK_ENV_SOCKET_ID_ANY); 241 242 return (struct rte_mempool *)tmp; 243 } 244 245 void 246 rte_mempool_free(struct rte_mempool *mp) 247 { 248 if (mp) { 249 spdk_mempool_free((struct spdk_mempool *)mp); 250 } 251 } 252 253 static int ut_spdk_reduce_vol_op_complete_err = 0; 254 void 255 spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, 256 uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, 257 void *cb_arg) 258 { 259 cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); 260 } 261 262 void 263 spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, 264 uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, 265 void *cb_arg) 266 { 267 cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); 268 } 269 270 #include "bdev/compress/vbdev_compress.c" 271 272 /* SPDK stubs */ 273 DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *, 274 (const struct spdk_bdev *bdev), NULL); 275 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 276 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io)); 277 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, 278 enum spdk_bdev_io_type io_type), 0); 279 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev)); 280 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 281 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0); 282 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0); 283 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, 284 void *cb_arg)); 285 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write, 286 spdk_bdev_event_cb_t event_cb, 287 void *event_ctx, struct spdk_bdev_desc **_desc), 0); 288 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL); 289 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 290 struct spdk_bdev_module *module), 0); 291 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); 292 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0); 293 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL); 294 DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io), 295 0); 296 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, 297 struct spdk_bdev_io_wait_entry *entry), 0); 298 DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol, 299 spdk_reduce_vol_op_complete cb_fn, void *cb_arg)); 300 DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev, 301 spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg)); 302 DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *, 303 (struct spdk_reduce_vol *vol), NULL); 304 DEFINE_STUB_V(spdk_reduce_vol_init, (struct spdk_reduce_vol_params *params, 305 struct spdk_reduce_backing_dev *backing_dev, 306 const char *pm_file_dir, 307 spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg)); 308 DEFINE_STUB_V(spdk_reduce_vol_destroy, (struct spdk_reduce_backing_dev *backing_dev, 309 spdk_reduce_vol_op_complete cb_fn, void *cb_arg)); 310 311 /* DPDK stubs */ 312 #define DPDK_DYNFIELD_OFFSET offsetof(struct rte_mbuf, dynfield1[1]) 313 DEFINE_STUB(rte_mbuf_dynfield_register, int, (const struct rte_mbuf_dynfield *params), 314 DPDK_DYNFIELD_OFFSET); 315 DEFINE_STUB(rte_socket_id, unsigned, (void), 0); 316 DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0); 317 DEFINE_STUB_V(rte_comp_op_free, (struct rte_comp_op *op)); 318 DEFINE_STUB(rte_comp_op_alloc, struct rte_comp_op *, (struct rte_mempool *mempool), NULL); 319 320 int g_small_size_counter = 0; 321 int g_small_size_modify = 0; 322 uint64_t g_small_size = 0; 323 uint64_t 324 spdk_vtophys(const void *buf, uint64_t *size) 325 { 326 g_small_size_counter++; 327 if (g_small_size_counter == g_small_size_modify) { 328 *size = g_small_size; 329 g_small_size_counter = 0; 330 g_small_size_modify = 0; 331 } 332 return (uint64_t)buf; 333 } 334 335 void 336 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 337 { 338 cb(g_io_ch, g_bdev_io, true); 339 } 340 341 /* Mock these functions to call the callback and then return the value we require */ 342 int ut_spdk_bdev_readv_blocks = 0; 343 int 344 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 345 struct iovec *iov, int iovcnt, 346 uint64_t offset_blocks, uint64_t num_blocks, 347 spdk_bdev_io_completion_cb cb, void *cb_arg) 348 { 349 cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg); 350 return ut_spdk_bdev_readv_blocks; 351 } 352 353 int ut_spdk_bdev_writev_blocks = 0; 354 bool ut_spdk_bdev_writev_blocks_mocked = false; 355 int 356 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 357 struct iovec *iov, int iovcnt, 358 uint64_t offset_blocks, uint64_t num_blocks, 359 spdk_bdev_io_completion_cb cb, void *cb_arg) 360 { 361 cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg); 362 return ut_spdk_bdev_writev_blocks; 363 } 364 365 int ut_spdk_bdev_unmap_blocks = 0; 366 bool ut_spdk_bdev_unmap_blocks_mocked = false; 367 int 368 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 369 uint64_t offset_blocks, uint64_t num_blocks, 370 spdk_bdev_io_completion_cb cb, void *cb_arg) 371 { 372 cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg); 373 return ut_spdk_bdev_unmap_blocks; 374 } 375 376 int ut_spdk_bdev_flush_blocks = 0; 377 bool ut_spdk_bdev_flush_blocks_mocked = false; 378 int 379 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 380 uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, 381 void *cb_arg) 382 { 383 cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg); 384 return ut_spdk_bdev_flush_blocks; 385 } 386 387 int ut_spdk_bdev_reset = 0; 388 bool ut_spdk_bdev_reset_mocked = false; 389 int 390 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 391 spdk_bdev_io_completion_cb cb, void *cb_arg) 392 { 393 cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg); 394 return ut_spdk_bdev_reset; 395 } 396 397 bool g_completion_called = false; 398 void 399 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 400 { 401 bdev_io->internal.status = status; 402 g_completion_called = true; 403 } 404 405 static uint16_t ut_rte_compressdev_dequeue_burst = 0; 406 uint16_t 407 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops, 408 uint16_t nb_op) 409 { 410 if (ut_rte_compressdev_dequeue_burst == 0) { 411 return 0; 412 } 413 414 ops[0] = &g_comp_op[0]; 415 ops[1] = &g_comp_op[1]; 416 417 return ut_rte_compressdev_dequeue_burst; 418 } 419 420 static int ut_compress_done[2]; 421 /* done_count and done_idx together control which expected assertion 422 * value to use when dequeuing 2 operations. 423 */ 424 static uint16_t done_count = 1; 425 static uint16_t done_idx = 0; 426 static void 427 _compress_done(void *_req, int reduce_errno) 428 { 429 if (done_count == 1) { 430 CU_ASSERT(reduce_errno == ut_compress_done[0]); 431 } else if (done_count == 2) { 432 CU_ASSERT(reduce_errno == ut_compress_done[done_idx++]); 433 } 434 } 435 436 static void 437 _get_mbuf_array(struct rte_mbuf **mbuf_array, struct rte_mbuf *mbuf_head, 438 int mbuf_count, bool null_final) 439 { 440 int i; 441 442 for (i = 0; i < mbuf_count; i++) { 443 mbuf_array[i] = mbuf_head; 444 if (mbuf_head) { 445 mbuf_head = mbuf_head->next; 446 } 447 } 448 if (null_final) { 449 mbuf_array[i - 1] = NULL; 450 } 451 } 452 453 #define FAKE_ENQUEUE_SUCCESS 255 454 #define FAKE_ENQUEUE_ERROR 128 455 #define FAKE_ENQUEUE_BUSY 64 456 static uint16_t ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; 457 static struct rte_comp_op ut_expected_op; 458 uint16_t 459 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_comp_op **ops, 460 uint16_t nb_ops) 461 { 462 struct rte_comp_op *op = *ops; 463 struct rte_mbuf *op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 464 struct rte_mbuf *exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 465 int i, num_src_mbufs = UT_MBUFS_PER_OP; 466 467 switch (ut_enqueue_value) { 468 case FAKE_ENQUEUE_BUSY: 469 op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED; 470 return 0; 471 case FAKE_ENQUEUE_SUCCESS: 472 op->status = RTE_COMP_OP_STATUS_SUCCESS; 473 return 1; 474 case FAKE_ENQUEUE_ERROR: 475 op->status = RTE_COMP_OP_STATUS_ERROR; 476 return 0; 477 default: 478 break; 479 } 480 481 /* by design the compress module will never send more than 1 op at a time */ 482 CU_ASSERT(op->private_xform == ut_expected_op.private_xform); 483 484 /* setup our local pointers to the chained mbufs, those pointed to in the 485 * operation struct and the expected values. 486 */ 487 _get_mbuf_array(op_mbuf, op->m_src, SPDK_COUNTOF(op_mbuf), true); 488 _get_mbuf_array(exp_mbuf, ut_expected_op.m_src, SPDK_COUNTOF(exp_mbuf), true); 489 490 if (ut_boundary_alloc == true) { 491 /* if we crossed a boundary, we need to check the 4th src mbuf and 492 * reset the global that is used to identify whether we crossed 493 * or not 494 */ 495 num_src_mbufs = UT_MBUFS_PER_OP_BOUND_TEST; 496 exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = ut_expected_op.m_src->next->next->next; 497 op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] = op->m_src->next->next->next; 498 ut_boundary_alloc = false; 499 } 500 501 for (i = 0; i < num_src_mbufs; i++) { 502 CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr); 503 CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova); 504 CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len); 505 CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len); 506 } 507 508 /* if only 3 mbufs were used in the test, the 4th should be zeroed */ 509 if (num_src_mbufs == UT_MBUFS_PER_OP) { 510 CU_ASSERT(op_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL); 511 CU_ASSERT(exp_mbuf[UT_MBUFS_PER_OP_BOUND_TEST - 1] == NULL); 512 } 513 CU_ASSERT(*RTE_MBUF_DYNFIELD(op->m_src, g_mbuf_offset, uint64_t *) == 514 *RTE_MBUF_DYNFIELD(ut_expected_op.m_src, g_mbuf_offset, uint64_t *)); 515 CU_ASSERT(op->src.offset == ut_expected_op.src.offset); 516 CU_ASSERT(op->src.length == ut_expected_op.src.length); 517 518 /* check dst mbuf values */ 519 _get_mbuf_array(op_mbuf, op->m_dst, SPDK_COUNTOF(op_mbuf), true); 520 _get_mbuf_array(exp_mbuf, ut_expected_op.m_dst, SPDK_COUNTOF(exp_mbuf), true); 521 522 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 523 CU_ASSERT(op_mbuf[i]->buf_addr == exp_mbuf[i]->buf_addr); 524 CU_ASSERT(op_mbuf[i]->buf_iova == exp_mbuf[i]->buf_iova); 525 CU_ASSERT(op_mbuf[i]->buf_len == exp_mbuf[i]->buf_len); 526 CU_ASSERT(op_mbuf[i]->pkt_len == exp_mbuf[i]->pkt_len); 527 } 528 CU_ASSERT(op->dst.offset == ut_expected_op.dst.offset); 529 530 return ut_enqueue_value; 531 } 532 533 /* Global setup for all tests that share a bunch of preparation... */ 534 static int 535 test_setup(void) 536 { 537 struct spdk_thread *thread; 538 int i; 539 540 spdk_thread_lib_init(NULL, 0); 541 542 thread = spdk_thread_create(NULL, NULL); 543 spdk_set_thread(thread); 544 545 g_comp_bdev.drv_name = "test"; 546 g_comp_bdev.reduce_thread = thread; 547 g_comp_bdev.backing_dev.unmap = _comp_reduce_unmap; 548 g_comp_bdev.backing_dev.readv = _comp_reduce_readv; 549 g_comp_bdev.backing_dev.writev = _comp_reduce_writev; 550 g_comp_bdev.backing_dev.compress = _comp_reduce_compress; 551 g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress; 552 g_comp_bdev.backing_dev.blocklen = 512; 553 g_comp_bdev.backing_dev.blockcnt = 1024 * 16; 554 g_comp_bdev.backing_dev.sgl_in = true; 555 g_comp_bdev.backing_dev.sgl_out = true; 556 557 g_comp_bdev.device_qp = &g_device_qp; 558 g_comp_bdev.device_qp->device = &g_device; 559 560 TAILQ_INIT(&g_comp_bdev.queued_comp_ops); 561 562 g_comp_xform = (struct rte_comp_xform) { 563 .type = RTE_COMP_COMPRESS, 564 .compress = { 565 .algo = RTE_COMP_ALGO_DEFLATE, 566 .deflate.huffman = RTE_COMP_HUFFMAN_DEFAULT, 567 .level = RTE_COMP_LEVEL_MAX, 568 .window_size = DEFAULT_WINDOW_SIZE, 569 .chksum = RTE_COMP_CHECKSUM_NONE, 570 .hash_algo = RTE_COMP_HASH_ALGO_NONE 571 } 572 }; 573 574 g_decomp_xform = (struct rte_comp_xform) { 575 .type = RTE_COMP_DECOMPRESS, 576 .decompress = { 577 .algo = RTE_COMP_ALGO_DEFLATE, 578 .chksum = RTE_COMP_CHECKSUM_NONE, 579 .window_size = DEFAULT_WINDOW_SIZE, 580 .hash_algo = RTE_COMP_HASH_ALGO_NONE 581 } 582 }; 583 g_device.comp_xform = &g_comp_xform; 584 g_device.decomp_xform = &g_decomp_xform; 585 g_cdev_cap.comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM; 586 g_device.cdev_info.driver_name = "compress_isal"; 587 g_device.cdev_info.capabilities = &g_cdev_cap; 588 for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) { 589 g_src_mbufs[i] = calloc(1, sizeof(struct rte_mbuf)); 590 } 591 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 592 g_dst_mbufs[i] = calloc(1, sizeof(struct rte_mbuf)); 593 } 594 595 g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io)); 596 g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec)); 597 g_bdev_io->bdev = &g_comp_bdev.comp_bdev; 598 g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel)); 599 g_io_ch->thread = thread; 600 g_comp_ch = (struct comp_io_channel *)spdk_io_channel_get_ctx(g_io_ch); 601 g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx; 602 603 g_io_ctx->comp_ch = g_comp_ch; 604 g_io_ctx->comp_bdev = &g_comp_bdev; 605 g_comp_bdev.device_qp = &g_device_qp; 606 607 for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST - 1; i++) { 608 g_expected_src_mbufs[i].next = &g_expected_src_mbufs[i + 1]; 609 } 610 g_expected_src_mbufs[UT_MBUFS_PER_OP_BOUND_TEST - 1].next = NULL; 611 612 /* we only test w/4 mbufs on src side */ 613 for (i = 0; i < UT_MBUFS_PER_OP - 1; i++) { 614 g_expected_dst_mbufs[i].next = &g_expected_dst_mbufs[i + 1]; 615 } 616 g_expected_dst_mbufs[UT_MBUFS_PER_OP - 1].next = NULL; 617 g_mbuf_offset = DPDK_DYNFIELD_OFFSET; 618 619 return 0; 620 } 621 622 /* Global teardown for all tests */ 623 static int 624 test_cleanup(void) 625 { 626 struct spdk_thread *thread; 627 int i; 628 629 for (i = 0; i < UT_MBUFS_PER_OP_BOUND_TEST; i++) { 630 free(g_src_mbufs[i]); 631 } 632 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 633 free(g_dst_mbufs[i]); 634 } 635 free(g_bdev_io->u.bdev.iovs); 636 free(g_bdev_io); 637 free(g_io_ch); 638 639 thread = spdk_get_thread(); 640 spdk_thread_exit(thread); 641 while (!spdk_thread_is_exited(thread)) { 642 spdk_thread_poll(thread, 0, 0); 643 } 644 spdk_thread_destroy(thread); 645 646 spdk_thread_lib_fini(); 647 648 return 0; 649 } 650 651 static void 652 test_compress_operation(void) 653 { 654 struct iovec src_iovs[3] = {}; 655 int src_iovcnt; 656 struct iovec dst_iovs[3] = {}; 657 int dst_iovcnt; 658 struct spdk_reduce_vol_cb_args cb_arg; 659 int rc, i; 660 struct vbdev_comp_op *op; 661 struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP]; 662 struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP]; 663 664 src_iovcnt = dst_iovcnt = 3; 665 for (i = 0; i < dst_iovcnt; i++) { 666 src_iovs[i].iov_len = 0x1000; 667 dst_iovs[i].iov_len = 0x1000; 668 src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; 669 dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; 670 } 671 672 /* test rte_comp_op_alloc failure */ 673 MOCK_SET(rte_comp_op_alloc, NULL); 674 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 675 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 676 &dst_iovs[0], dst_iovcnt, true, &cb_arg); 677 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); 678 while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) { 679 op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops); 680 TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link); 681 free(op); 682 } 683 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 684 CU_ASSERT(rc == 0); 685 MOCK_SET(rte_comp_op_alloc, &g_comp_op[0]); 686 687 /* test mempool get failure */ 688 ut_rte_pktmbuf_alloc_bulk = -1; 689 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 690 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 691 &dst_iovs[0], dst_iovcnt, true, &cb_arg); 692 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); 693 while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) { 694 op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops); 695 TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link); 696 free(op); 697 } 698 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 699 CU_ASSERT(rc == 0); 700 ut_rte_pktmbuf_alloc_bulk = 0; 701 702 /* test enqueue failure busy */ 703 ut_enqueue_value = FAKE_ENQUEUE_BUSY; 704 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 705 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 706 &dst_iovs[0], dst_iovcnt, true, &cb_arg); 707 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); 708 while (!TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops)) { 709 op = TAILQ_FIRST(&g_comp_bdev.queued_comp_ops); 710 TAILQ_REMOVE(&g_comp_bdev.queued_comp_ops, op, link); 711 free(op); 712 } 713 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 714 CU_ASSERT(rc == 0); 715 ut_enqueue_value = 1; 716 717 /* test enqueue failure error */ 718 ut_enqueue_value = FAKE_ENQUEUE_ERROR; 719 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 720 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 721 &dst_iovs[0], dst_iovcnt, true, &cb_arg); 722 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 723 CU_ASSERT(rc == -EINVAL); 724 ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; 725 726 /* test success with 3 vector iovec */ 727 ut_expected_op.private_xform = &g_decomp_xform; 728 ut_expected_op.src.offset = 0; 729 ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len; 730 731 /* setup the src expected values */ 732 _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); 733 ut_expected_op.m_src = exp_src_mbuf[0]; 734 735 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 736 *RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&cb_arg; 737 exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base; 738 exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len); 739 exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len; 740 exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len; 741 } 742 743 /* setup the dst expected values */ 744 _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false); 745 ut_expected_op.dst.offset = 0; 746 ut_expected_op.m_dst = exp_dst_mbuf[0]; 747 748 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 749 exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base; 750 exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len); 751 exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len; 752 exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len; 753 } 754 755 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 756 &dst_iovs[0], dst_iovcnt, false, &cb_arg); 757 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 758 CU_ASSERT(rc == 0); 759 760 /* test sgl out failure */ 761 g_comp_bdev.backing_dev.sgl_out = false; 762 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 763 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], 1, 764 &dst_iovs[0], dst_iovcnt, true, &cb_arg); 765 CU_ASSERT(rc == -EINVAL); 766 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 767 g_comp_bdev.backing_dev.sgl_out = true; 768 769 /* test sgl in failure */ 770 g_comp_bdev.backing_dev.sgl_in = false; 771 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 772 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 773 &dst_iovs[0], 1, true, &cb_arg); 774 CU_ASSERT(rc == -EINVAL); 775 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 776 g_comp_bdev.backing_dev.sgl_in = true; 777 778 779 } 780 781 static void 782 test_compress_operation_cross_boundary(void) 783 { 784 struct iovec src_iovs[3] = {}; 785 int src_iovcnt; 786 struct iovec dst_iovs[3] = {}; 787 int dst_iovcnt; 788 struct spdk_reduce_vol_cb_args cb_arg; 789 int rc, i; 790 struct rte_mbuf *exp_src_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 791 struct rte_mbuf *exp_dst_mbuf[UT_MBUFS_PER_OP_BOUND_TEST]; 792 793 /* Setup the same basic 3 IOV test as used in the simple success case 794 * but then we'll start testing a vtophy boundary crossing at each 795 * position. 796 */ 797 src_iovcnt = dst_iovcnt = 3; 798 for (i = 0; i < dst_iovcnt; i++) { 799 src_iovs[i].iov_len = 0x1000; 800 dst_iovs[i].iov_len = 0x1000; 801 src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; 802 dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; 803 } 804 805 ut_expected_op.private_xform = &g_decomp_xform; 806 ut_expected_op.src.offset = 0; 807 ut_expected_op.src.length = src_iovs[0].iov_len + src_iovs[1].iov_len + src_iovs[2].iov_len; 808 809 /* setup the src expected values */ 810 _get_mbuf_array(exp_src_mbuf, &g_expected_src_mbufs[0], SPDK_COUNTOF(exp_src_mbuf), false); 811 ut_expected_op.m_src = exp_src_mbuf[0]; 812 813 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 814 *RTE_MBUF_DYNFIELD(exp_src_mbuf[i], g_mbuf_offset, uint64_t *) = (uint64_t)&cb_arg; 815 exp_src_mbuf[i]->buf_addr = src_iovs[i].iov_base; 816 exp_src_mbuf[i]->buf_iova = spdk_vtophys(src_iovs[i].iov_base, &src_iovs[i].iov_len); 817 exp_src_mbuf[i]->buf_len = src_iovs[i].iov_len; 818 exp_src_mbuf[i]->pkt_len = src_iovs[i].iov_len; 819 } 820 821 /* setup the dst expected values, we don't test needing a 4th dst mbuf */ 822 _get_mbuf_array(exp_dst_mbuf, &g_expected_dst_mbufs[0], SPDK_COUNTOF(exp_dst_mbuf), false); 823 ut_expected_op.dst.offset = 0; 824 ut_expected_op.m_dst = exp_dst_mbuf[0]; 825 826 for (i = 0; i < UT_MBUFS_PER_OP; i++) { 827 exp_dst_mbuf[i]->buf_addr = dst_iovs[i].iov_base; 828 exp_dst_mbuf[i]->buf_iova = spdk_vtophys(dst_iovs[i].iov_base, &dst_iovs[i].iov_len); 829 exp_dst_mbuf[i]->buf_len = dst_iovs[i].iov_len; 830 exp_dst_mbuf[i]->pkt_len = dst_iovs[i].iov_len; 831 } 832 833 /* force the 1st IOV to get partial length from spdk_vtophys */ 834 g_small_size_counter = 0; 835 g_small_size_modify = 1; 836 g_small_size = 0x800; 837 *RTE_MBUF_DYNFIELD(exp_src_mbuf[3], g_mbuf_offset, uint64_t *) = (uint64_t)&cb_arg; 838 839 /* first only has shorter length */ 840 exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x800; 841 842 /* 2nd was inserted by the boundary crossing condition and finishes off 843 * the length from the first */ 844 exp_src_mbuf[1]->buf_addr = (void *)0x10000800; 845 exp_src_mbuf[1]->buf_iova = 0x10000800; 846 exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800; 847 848 /* 3rd looks like that the 2nd would have */ 849 exp_src_mbuf[2]->buf_addr = (void *)0x10001000; 850 exp_src_mbuf[2]->buf_iova = 0x10001000; 851 exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x1000; 852 853 /* a new 4th looks like what the 3rd would have */ 854 exp_src_mbuf[3]->buf_addr = (void *)0x10002000; 855 exp_src_mbuf[3]->buf_iova = 0x10002000; 856 exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000; 857 858 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 859 &dst_iovs[0], dst_iovcnt, false, &cb_arg); 860 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 861 CU_ASSERT(rc == 0); 862 863 /* Now force the 2nd IOV to get partial length from spdk_vtophys */ 864 g_small_size_counter = 0; 865 g_small_size_modify = 2; 866 g_small_size = 0x800; 867 868 /* first is normal */ 869 exp_src_mbuf[0]->buf_addr = (void *)0x10000000; 870 exp_src_mbuf[0]->buf_iova = 0x10000000; 871 exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000; 872 873 /* second only has shorter length */ 874 exp_src_mbuf[1]->buf_addr = (void *)0x10001000; 875 exp_src_mbuf[1]->buf_iova = 0x10001000; 876 exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x800; 877 878 /* 3rd was inserted by the boundary crossing condition and finishes off 879 * the length from the first */ 880 exp_src_mbuf[2]->buf_addr = (void *)0x10001800; 881 exp_src_mbuf[2]->buf_iova = 0x10001800; 882 exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800; 883 884 /* a new 4th looks like what the 3rd would have */ 885 exp_src_mbuf[3]->buf_addr = (void *)0x10002000; 886 exp_src_mbuf[3]->buf_iova = 0x10002000; 887 exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x1000; 888 889 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 890 &dst_iovs[0], dst_iovcnt, false, &cb_arg); 891 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 892 CU_ASSERT(rc == 0); 893 894 /* Finally force the 3rd IOV to get partial length from spdk_vtophys */ 895 g_small_size_counter = 0; 896 g_small_size_modify = 3; 897 g_small_size = 0x800; 898 899 /* first is normal */ 900 exp_src_mbuf[0]->buf_addr = (void *)0x10000000; 901 exp_src_mbuf[0]->buf_iova = 0x10000000; 902 exp_src_mbuf[0]->pkt_len = exp_src_mbuf[0]->buf_len = 0x1000; 903 904 /* second is normal */ 905 exp_src_mbuf[1]->buf_addr = (void *)0x10001000; 906 exp_src_mbuf[1]->buf_iova = 0x10001000; 907 exp_src_mbuf[1]->pkt_len = exp_src_mbuf[1]->buf_len = 0x1000; 908 909 /* 3rd has shorter length */ 910 exp_src_mbuf[2]->buf_addr = (void *)0x10002000; 911 exp_src_mbuf[2]->buf_iova = 0x10002000; 912 exp_src_mbuf[2]->pkt_len = exp_src_mbuf[2]->buf_len = 0x800; 913 914 /* a new 4th handles the remainder from the 3rd */ 915 exp_src_mbuf[3]->buf_addr = (void *)0x10002800; 916 exp_src_mbuf[3]->buf_iova = 0x10002800; 917 exp_src_mbuf[3]->pkt_len = exp_src_mbuf[3]->buf_len = 0x800; 918 919 rc = _compress_operation(&g_comp_bdev.backing_dev, &src_iovs[0], src_iovcnt, 920 &dst_iovs[0], dst_iovcnt, false, &cb_arg); 921 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 922 CU_ASSERT(rc == 0); 923 924 /* Single input iov is split on page boundary, sgl_in is not supported */ 925 g_comp_bdev.backing_dev.sgl_in = false; 926 g_small_size_counter = 0; 927 g_small_size_modify = 1; 928 g_small_size = 0x800; 929 rc = _compress_operation(&g_comp_bdev.backing_dev, src_iovs, 1, 930 dst_iovs, 1, false, &cb_arg); 931 CU_ASSERT(rc == -EINVAL); 932 g_comp_bdev.backing_dev.sgl_in = true; 933 934 /* Single output iov is split on page boundary, sgl_out is not supported */ 935 g_comp_bdev.backing_dev.sgl_out = false; 936 g_small_size_counter = 0; 937 g_small_size_modify = 2; 938 g_small_size = 0x800; 939 rc = _compress_operation(&g_comp_bdev.backing_dev, src_iovs, 1, 940 dst_iovs, 1, false, &cb_arg); 941 CU_ASSERT(rc == -EINVAL); 942 g_comp_bdev.backing_dev.sgl_out = true; 943 } 944 945 static void 946 test_poller(void) 947 { 948 int rc; 949 struct spdk_reduce_vol_cb_args *cb_args; 950 struct rte_mbuf mbuf[4]; /* one src, one dst, 2 ops */ 951 struct vbdev_comp_op *op_to_queue; 952 struct iovec src_iovs[3] = {}; 953 struct iovec dst_iovs[3] = {}; 954 int i; 955 956 cb_args = calloc(1, sizeof(*cb_args)); 957 SPDK_CU_ASSERT_FATAL(cb_args != NULL); 958 cb_args->cb_fn = _compress_done; 959 memset(&g_comp_op[0], 0, sizeof(struct rte_comp_op)); 960 g_comp_op[0].m_src = &mbuf[0]; 961 g_comp_op[1].m_src = &mbuf[1]; 962 g_comp_op[0].m_dst = &mbuf[2]; 963 g_comp_op[1].m_dst = &mbuf[3]; 964 for (i = 0; i < 3; i++) { 965 src_iovs[i].iov_len = 0x1000; 966 dst_iovs[i].iov_len = 0x1000; 967 src_iovs[i].iov_base = (void *)0x10000000 + 0x1000 * i; 968 dst_iovs[i].iov_base = (void *)0x20000000 + 0x1000 * i; 969 } 970 971 /* Error from dequeue, nothing needing to be resubmitted. 972 */ 973 ut_rte_compressdev_dequeue_burst = 1; 974 /* setup what we want dequeue to return for the op */ 975 *RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args; 976 g_comp_op[0].produced = 1; 977 g_comp_op[0].status = 1; 978 /* value asserted in the reduce callback */ 979 ut_compress_done[0] = -EINVAL; 980 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 981 rc = comp_dev_poller((void *)&g_comp_bdev); 982 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 983 CU_ASSERT(rc == SPDK_POLLER_BUSY); 984 985 /* Success from dequeue, 2 ops. nothing needing to be resubmitted. 986 */ 987 ut_rte_compressdev_dequeue_burst = 2; 988 /* setup what we want dequeue to return for the op */ 989 *RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args; 990 g_comp_op[0].produced = 16; 991 g_comp_op[0].status = 0; 992 *RTE_MBUF_DYNFIELD(g_comp_op[1].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args; 993 g_comp_op[1].produced = 32; 994 g_comp_op[1].status = 0; 995 /* value asserted in the reduce callback */ 996 ut_compress_done[0] = 16; 997 ut_compress_done[1] = 32; 998 done_count = 2; 999 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 1000 rc = comp_dev_poller((void *)&g_comp_bdev); 1001 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 1002 CU_ASSERT(rc == SPDK_POLLER_BUSY); 1003 1004 /* Success from dequeue, one op to be resubmitted. 1005 */ 1006 ut_rte_compressdev_dequeue_burst = 1; 1007 /* setup what we want dequeue to return for the op */ 1008 *RTE_MBUF_DYNFIELD(g_comp_op[0].m_src, g_mbuf_offset, uint64_t *) = (uint64_t)cb_args; 1009 g_comp_op[0].produced = 16; 1010 g_comp_op[0].status = 0; 1011 /* value asserted in the reduce callback */ 1012 ut_compress_done[0] = 16; 1013 done_count = 1; 1014 op_to_queue = calloc(1, sizeof(struct vbdev_comp_op)); 1015 SPDK_CU_ASSERT_FATAL(op_to_queue != NULL); 1016 op_to_queue->backing_dev = &g_comp_bdev.backing_dev; 1017 op_to_queue->src_iovs = &src_iovs[0]; 1018 op_to_queue->src_iovcnt = 3; 1019 op_to_queue->dst_iovs = &dst_iovs[0]; 1020 op_to_queue->dst_iovcnt = 3; 1021 op_to_queue->compress = true; 1022 op_to_queue->cb_arg = cb_args; 1023 ut_enqueue_value = FAKE_ENQUEUE_SUCCESS; 1024 TAILQ_INSERT_TAIL(&g_comp_bdev.queued_comp_ops, 1025 op_to_queue, 1026 link); 1027 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == false); 1028 rc = comp_dev_poller((void *)&g_comp_bdev); 1029 CU_ASSERT(TAILQ_EMPTY(&g_comp_bdev.queued_comp_ops) == true); 1030 CU_ASSERT(rc == SPDK_POLLER_BUSY); 1031 1032 /* op_to_queue is freed in code under test */ 1033 free(cb_args); 1034 } 1035 1036 static void 1037 test_vbdev_compress_submit_request(void) 1038 { 1039 /* Single element block size write */ 1040 g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1041 g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; 1042 g_completion_called = false; 1043 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 1044 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1045 CU_ASSERT(g_completion_called == true); 1046 CU_ASSERT(g_io_ctx->orig_io == g_bdev_io); 1047 CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev); 1048 CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch); 1049 1050 /* same write but now fail it */ 1051 ut_spdk_reduce_vol_op_complete_err = 1; 1052 g_completion_called = false; 1053 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 1054 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1055 CU_ASSERT(g_completion_called == true); 1056 1057 /* test a read success */ 1058 g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; 1059 ut_spdk_reduce_vol_op_complete_err = 0; 1060 g_completion_called = false; 1061 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 1062 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1063 CU_ASSERT(g_completion_called == true); 1064 1065 /* test a read failure */ 1066 ut_spdk_reduce_vol_op_complete_err = 1; 1067 g_completion_called = false; 1068 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 1069 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1070 CU_ASSERT(g_completion_called == true); 1071 } 1072 1073 static void 1074 test_passthru(void) 1075 { 1076 1077 } 1078 1079 static void 1080 test_reset(void) 1081 { 1082 /* TODO: There are a few different ways to do this given that 1083 * the code uses spdk_for_each_channel() to implement reset 1084 * handling. SUbmitting w/o UT for this function for now and 1085 * will follow up with something shortly. 1086 */ 1087 } 1088 1089 static void 1090 test_initdrivers(void) 1091 { 1092 int rc; 1093 1094 /* test return values from rte_vdev_init() */ 1095 MOCK_SET(rte_vdev_init, -EEXIST); 1096 rc = vbdev_init_compress_drivers(); 1097 /* This is not an error condition, we already have one */ 1098 CU_ASSERT(rc == 0); 1099 1100 /* error */ 1101 MOCK_SET(rte_vdev_init, -2); 1102 rc = vbdev_init_compress_drivers(); 1103 CU_ASSERT(rc == -EINVAL); 1104 CU_ASSERT(g_mbuf_mp == NULL); 1105 CU_ASSERT(g_comp_op_mp == NULL); 1106 1107 /* compressdev count 0 */ 1108 ut_rte_compressdev_count = 0; 1109 MOCK_SET(rte_vdev_init, 0); 1110 rc = vbdev_init_compress_drivers(); 1111 CU_ASSERT(rc == 0); 1112 1113 /* bogus count */ 1114 ut_rte_compressdev_count = RTE_COMPRESS_MAX_DEVS + 1; 1115 rc = vbdev_init_compress_drivers(); 1116 CU_ASSERT(rc == -EINVAL); 1117 1118 /* can't get mbuf pool */ 1119 ut_rte_compressdev_count = 1; 1120 MOCK_SET(spdk_mempool_create, NULL); 1121 rc = vbdev_init_compress_drivers(); 1122 CU_ASSERT(rc == -ENOMEM); 1123 MOCK_CLEAR(spdk_mempool_create); 1124 1125 /* can't get comp op pool */ 1126 ut_rte_comp_op_pool_create = NULL; 1127 rc = vbdev_init_compress_drivers(); 1128 CU_ASSERT(rc == -ENOMEM); 1129 1130 /* error on create_compress_dev() */ 1131 ut_rte_comp_op_pool_create = (struct rte_mempool *)&test_initdrivers; 1132 ut_rte_compressdev_configure = -1; 1133 rc = vbdev_init_compress_drivers(); 1134 CU_ASSERT(rc == -1); 1135 1136 /* error on create_compress_dev() but coverage for large num queues */ 1137 ut_max_nb_queue_pairs = 99; 1138 rc = vbdev_init_compress_drivers(); 1139 CU_ASSERT(rc == -1); 1140 1141 /* qpair setup fails */ 1142 ut_rte_compressdev_configure = 0; 1143 ut_max_nb_queue_pairs = 0; 1144 ut_rte_compressdev_queue_pair_setup = -1; 1145 rc = vbdev_init_compress_drivers(); 1146 CU_ASSERT(rc == -EINVAL); 1147 1148 /* rte_compressdev_start fails */ 1149 ut_rte_compressdev_queue_pair_setup = 0; 1150 ut_rte_compressdev_start = -1; 1151 rc = vbdev_init_compress_drivers(); 1152 CU_ASSERT(rc == -1); 1153 1154 /* rte_compressdev_private_xform_create() fails */ 1155 ut_rte_compressdev_start = 0; 1156 ut_rte_compressdev_private_xform_create = -2; 1157 rc = vbdev_init_compress_drivers(); 1158 CU_ASSERT(rc == -2); 1159 1160 /* success */ 1161 ut_rte_compressdev_private_xform_create = 0; 1162 rc = vbdev_init_compress_drivers(); 1163 CU_ASSERT(rc == 0); 1164 CU_ASSERT(g_mbuf_offset == DPDK_DYNFIELD_OFFSET); 1165 spdk_mempool_free((struct spdk_mempool *)g_mbuf_mp); 1166 } 1167 1168 static void 1169 test_supported_io(void) 1170 { 1171 1172 } 1173 1174 int 1175 main(int argc, char **argv) 1176 { 1177 CU_pSuite suite = NULL; 1178 unsigned int num_failures; 1179 1180 CU_set_error_action(CUEA_ABORT); 1181 CU_initialize_registry(); 1182 1183 suite = CU_add_suite("compress", test_setup, test_cleanup); 1184 CU_ADD_TEST(suite, test_compress_operation); 1185 CU_ADD_TEST(suite, test_compress_operation_cross_boundary); 1186 CU_ADD_TEST(suite, test_vbdev_compress_submit_request); 1187 CU_ADD_TEST(suite, test_passthru); 1188 CU_ADD_TEST(suite, test_initdrivers); 1189 CU_ADD_TEST(suite, test_supported_io); 1190 CU_ADD_TEST(suite, test_poller); 1191 CU_ADD_TEST(suite, test_reset); 1192 1193 CU_basic_set_mode(CU_BRM_VERBOSE); 1194 CU_basic_run_tests(); 1195 num_failures = CU_get_number_of_failures(); 1196 CU_cleanup_registry(); 1197 return num_failures; 1198 } 1199