1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 - 2019 Intel Corporation 3 */ 4 #include <string.h> 5 #include <zlib.h> 6 #include <math.h> 7 #include <stdlib.h> 8 #include <unistd.h> 9 #include <stdio.h> 10 11 #include <rte_cycles.h> 12 #include <rte_malloc.h> 13 #include <rte_mempool.h> 14 #include <rte_mbuf.h> 15 #include <rte_compressdev.h> 16 #include <rte_string_fns.h> 17 18 #include "test_compressdev_test_buffer.h" 19 #include "test.h" 20 21 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0)) 22 23 #define DEFAULT_WINDOW_SIZE 15 24 #define DEFAULT_MEM_LEVEL 8 25 #define MAX_DEQD_RETRIES 10 26 #define DEQUEUE_WAIT_TIME 10000 27 28 /* 29 * 30% extra size for compressed data compared to original data, 30 * in case data size cannot be reduced and it is actually bigger 31 * due to the compress block headers 32 */ 33 #define COMPRESS_BUF_SIZE_RATIO 1.3 34 #define COMPRESS_BUF_SIZE_RATIO_DISABLED 1.0 35 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2 36 #define NUM_LARGE_MBUFS 16 37 #define SMALL_SEG_SIZE 256 38 #define MAX_SEGS 16 39 #define NUM_OPS 16 40 #define NUM_MAX_XFORMS 16 41 #define NUM_MAX_INFLIGHT_OPS 128 42 #define CACHE_SIZE 0 43 44 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31 45 #define ZLIB_HEADER_SIZE 2 46 #define ZLIB_TRAILER_SIZE 4 47 #define GZIP_HEADER_SIZE 10 48 #define GZIP_TRAILER_SIZE 8 49 50 #define OUT_OF_SPACE_BUF 1 51 52 #define MAX_MBUF_SEGMENT_SIZE 65535 53 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM) 54 #define NUM_BIG_MBUFS (512 + 1) 55 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * 2) 56 57 /* constants for "im buffer" tests start here */ 58 59 /* number of mbufs lower than number of inflight ops */ 60 #define IM_BUF_NUM_MBUFS 3 61 /* above threshold (QAT_FALLBACK_THLD) and below max mbuf size */ 62 #define IM_BUF_DATA_TEST_SIZE_LB 59600 63 /* data size smaller than the queue capacity */ 64 #define IM_BUF_DATA_TEST_SIZE_SGL (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS) 65 /* number of mbufs bigger than number of inflight ops */ 66 #define IM_BUF_NUM_MBUFS_OVER (NUM_MAX_INFLIGHT_OPS + 1) 67 /* data size bigger than the queue capacity */ 68 #define IM_BUF_DATA_TEST_SIZE_OVER (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_OVER) 69 /* number of mid-size mbufs */ 70 #define IM_BUF_NUM_MBUFS_MID ((NUM_MAX_INFLIGHT_OPS / 3) + 1) 71 /* capacity of mid-size mbufs */ 72 #define IM_BUF_DATA_TEST_SIZE_MID (MAX_DATA_MBUF_SIZE * IM_BUF_NUM_MBUFS_MID) 73 74 75 const char * 76 huffman_type_strings[] = { 77 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default", 78 [RTE_COMP_HUFFMAN_FIXED] = "Fixed", 79 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic" 80 }; 81 82 enum zlib_direction { 83 ZLIB_NONE, 84 ZLIB_COMPRESS, 85 ZLIB_DECOMPRESS, 86 ZLIB_ALL 87 }; 88 89 enum varied_buff { 90 LB_BOTH = 0, /* both input and output are linear*/ 91 SGL_BOTH, /* both input and output are chained */ 92 SGL_TO_LB, /* input buffer is chained */ 93 LB_TO_SGL /* output buffer is chained */ 94 }; 95 96 enum overflow_test { 97 OVERFLOW_DISABLED, 98 OVERFLOW_ENABLED 99 }; 100 101 enum ratio_switch { 102 RATIO_DISABLED, 103 RATIO_ENABLED 104 }; 105 106 enum operation_type { 107 OPERATION_COMPRESSION, 108 OPERATION_DECOMPRESSION 109 }; 110 111 struct priv_op_data { 112 uint16_t orig_idx; 113 }; 114 115 struct comp_testsuite_params { 116 struct rte_mempool *large_mbuf_pool; 117 struct rte_mempool *small_mbuf_pool; 118 struct rte_mempool *big_mbuf_pool; 119 struct rte_mempool *op_pool; 120 struct rte_comp_xform *def_comp_xform; 121 struct rte_comp_xform *def_decomp_xform; 122 }; 123 124 struct interim_data_params { 125 const char * const *test_bufs; 126 unsigned int num_bufs; 127 uint16_t *buf_idx; 128 struct rte_comp_xform **compress_xforms; 129 struct rte_comp_xform **decompress_xforms; 130 unsigned int num_xforms; 131 }; 132 133 struct test_data_params { 134 enum rte_comp_op_type compress_state; 135 enum rte_comp_op_type decompress_state; 136 enum varied_buff buff_type; 137 enum zlib_direction zlib_dir; 138 unsigned int out_of_space; 139 unsigned int big_data; 140 /* stateful decompression specific parameters */ 141 unsigned int decompress_output_block_size; 142 unsigned int decompress_steps_max; 143 /* external mbufs specific parameters */ 144 unsigned int use_external_mbufs; 145 unsigned int inbuf_data_size; 146 const struct rte_memzone *inbuf_memzone; 147 const struct rte_memzone *compbuf_memzone; 148 const struct rte_memzone *uncompbuf_memzone; 149 /* overflow test activation */ 150 enum overflow_test overflow; 151 enum ratio_switch ratio; 152 }; 153 154 struct test_private_arrays { 155 struct rte_mbuf **uncomp_bufs; 156 struct rte_mbuf **comp_bufs; 157 struct rte_comp_op **ops; 158 struct rte_comp_op **ops_processed; 159 void **priv_xforms; 160 uint64_t *compress_checksum; 161 uint32_t *compressed_data_size; 162 void **stream; 163 char **all_decomp_data; 164 unsigned int *decomp_produced_data_size; 165 uint16_t num_priv_xforms; 166 }; 167 168 static struct comp_testsuite_params testsuite_params = { 0 }; 169 170 171 static void 172 testsuite_teardown(void) 173 { 174 struct comp_testsuite_params *ts_params = &testsuite_params; 175 176 if (rte_mempool_in_use_count(ts_params->large_mbuf_pool)) 177 RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n"); 178 if (rte_mempool_in_use_count(ts_params->small_mbuf_pool)) 179 RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n"); 180 if (rte_mempool_in_use_count(ts_params->big_mbuf_pool)) 181 RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n"); 182 if (rte_mempool_in_use_count(ts_params->op_pool)) 183 RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n"); 184 185 rte_mempool_free(ts_params->large_mbuf_pool); 186 rte_mempool_free(ts_params->small_mbuf_pool); 187 rte_mempool_free(ts_params->big_mbuf_pool); 188 rte_mempool_free(ts_params->op_pool); 189 rte_free(ts_params->def_comp_xform); 190 rte_free(ts_params->def_decomp_xform); 191 } 192 193 static int 194 testsuite_setup(void) 195 { 196 struct comp_testsuite_params *ts_params = &testsuite_params; 197 uint32_t max_buf_size = 0; 198 unsigned int i; 199 200 if (rte_compressdev_count() == 0) { 201 RTE_LOG(WARNING, USER1, "Need at least one compress device\n"); 202 return TEST_SKIPPED; 203 } 204 205 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n", 206 rte_compressdev_name_get(0)); 207 208 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) 209 max_buf_size = RTE_MAX(max_buf_size, 210 strlen(compress_test_bufs[i]) + 1); 211 212 /* 213 * Buffers to be used in compression and decompression. 214 * Since decompressed data might be larger than 215 * compressed data (due to block header), 216 * buffers should be big enough for both cases. 217 */ 218 max_buf_size *= COMPRESS_BUF_SIZE_RATIO; 219 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool", 220 NUM_LARGE_MBUFS, 221 CACHE_SIZE, 0, 222 max_buf_size + RTE_PKTMBUF_HEADROOM, 223 rte_socket_id()); 224 if (ts_params->large_mbuf_pool == NULL) { 225 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n"); 226 return TEST_FAILED; 227 } 228 229 /* Create mempool with smaller buffers for SGL testing */ 230 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool", 231 NUM_LARGE_MBUFS * MAX_SEGS, 232 CACHE_SIZE, 0, 233 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM, 234 rte_socket_id()); 235 if (ts_params->small_mbuf_pool == NULL) { 236 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n"); 237 goto exit; 238 } 239 240 /* Create mempool with big buffers for SGL testing */ 241 ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool", 242 NUM_BIG_MBUFS + 1, 243 CACHE_SIZE, 0, 244 MAX_MBUF_SEGMENT_SIZE, 245 rte_socket_id()); 246 if (ts_params->big_mbuf_pool == NULL) { 247 RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n"); 248 goto exit; 249 } 250 251 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS, 252 0, sizeof(struct priv_op_data), 253 rte_socket_id()); 254 if (ts_params->op_pool == NULL) { 255 RTE_LOG(ERR, USER1, "Operation pool could not be created\n"); 256 goto exit; 257 } 258 259 ts_params->def_comp_xform = 260 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 261 if (ts_params->def_comp_xform == NULL) { 262 RTE_LOG(ERR, USER1, 263 "Default compress xform could not be created\n"); 264 goto exit; 265 } 266 ts_params->def_decomp_xform = 267 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 268 if (ts_params->def_decomp_xform == NULL) { 269 RTE_LOG(ERR, USER1, 270 "Default decompress xform could not be created\n"); 271 goto exit; 272 } 273 274 /* Initializes default values for compress/decompress xforms */ 275 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS; 276 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE, 277 ts_params->def_comp_xform->compress.deflate.huffman = 278 RTE_COMP_HUFFMAN_DEFAULT; 279 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT; 280 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE; 281 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE; 282 283 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS; 284 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE, 285 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE; 286 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE; 287 288 return TEST_SUCCESS; 289 290 exit: 291 testsuite_teardown(); 292 293 return TEST_FAILED; 294 } 295 296 static int 297 generic_ut_setup(void) 298 { 299 /* Configure compressdev (one device, one queue pair) */ 300 struct rte_compressdev_config config = { 301 .socket_id = rte_socket_id(), 302 .nb_queue_pairs = 1, 303 .max_nb_priv_xforms = NUM_MAX_XFORMS, 304 .max_nb_streams = 1 305 }; 306 307 if (rte_compressdev_configure(0, &config) < 0) { 308 RTE_LOG(ERR, USER1, "Device configuration failed\n"); 309 return -1; 310 } 311 312 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS, 313 rte_socket_id()) < 0) { 314 RTE_LOG(ERR, USER1, "Queue pair setup failed\n"); 315 return -1; 316 } 317 318 if (rte_compressdev_start(0) < 0) { 319 RTE_LOG(ERR, USER1, "Device could not be started\n"); 320 return -1; 321 } 322 323 return 0; 324 } 325 326 static void 327 generic_ut_teardown(void) 328 { 329 rte_compressdev_stop(0); 330 if (rte_compressdev_close(0) < 0) 331 RTE_LOG(ERR, USER1, "Device could not be closed\n"); 332 } 333 334 static int 335 test_compressdev_invalid_configuration(void) 336 { 337 struct rte_compressdev_config invalid_config; 338 struct rte_compressdev_config valid_config = { 339 .socket_id = rte_socket_id(), 340 .nb_queue_pairs = 1, 341 .max_nb_priv_xforms = NUM_MAX_XFORMS, 342 .max_nb_streams = 1 343 }; 344 struct rte_compressdev_info dev_info; 345 346 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); 347 348 /* Invalid configuration with 0 queue pairs */ 349 memcpy(&invalid_config, &valid_config, 350 sizeof(struct rte_compressdev_config)); 351 invalid_config.nb_queue_pairs = 0; 352 353 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config), 354 "Device configuration was successful " 355 "with no queue pairs (invalid)\n"); 356 357 /* 358 * Invalid configuration with too many queue pairs 359 * (if there is an actual maximum number of queue pairs) 360 */ 361 rte_compressdev_info_get(0, &dev_info); 362 if (dev_info.max_nb_queue_pairs != 0) { 363 memcpy(&invalid_config, &valid_config, 364 sizeof(struct rte_compressdev_config)); 365 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1; 366 367 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config), 368 "Device configuration was successful " 369 "with too many queue pairs (invalid)\n"); 370 } 371 372 /* Invalid queue pair setup, with no number of queue pairs set */ 373 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0, 374 NUM_MAX_INFLIGHT_OPS, rte_socket_id()), 375 "Queue pair setup was successful " 376 "with no queue pairs set (invalid)\n"); 377 378 return TEST_SUCCESS; 379 } 380 381 static int 382 compare_buffers(const char *buffer1, uint32_t buffer1_len, 383 const char *buffer2, uint32_t buffer2_len) 384 { 385 if (buffer1_len != buffer2_len) { 386 RTE_LOG(ERR, USER1, "Buffer lengths are different\n"); 387 return -1; 388 } 389 390 if (memcmp(buffer1, buffer2, buffer1_len) != 0) { 391 RTE_LOG(ERR, USER1, "Buffers are different\n"); 392 return -1; 393 } 394 395 return 0; 396 } 397 398 /* 399 * Maps compressdev and Zlib flush flags 400 */ 401 static int 402 map_zlib_flush_flag(enum rte_comp_flush_flag flag) 403 { 404 switch (flag) { 405 case RTE_COMP_FLUSH_NONE: 406 return Z_NO_FLUSH; 407 case RTE_COMP_FLUSH_SYNC: 408 return Z_SYNC_FLUSH; 409 case RTE_COMP_FLUSH_FULL: 410 return Z_FULL_FLUSH; 411 case RTE_COMP_FLUSH_FINAL: 412 return Z_FINISH; 413 /* 414 * There should be only the values above, 415 * so this should never happen 416 */ 417 default: 418 return -1; 419 } 420 } 421 422 static int 423 compress_zlib(struct rte_comp_op *op, 424 const struct rte_comp_xform *xform, int mem_level) 425 { 426 z_stream stream; 427 int zlib_flush; 428 int strategy, window_bits, comp_level; 429 int ret = TEST_FAILED; 430 uint8_t *single_src_buf = NULL; 431 uint8_t *single_dst_buf = NULL; 432 433 /* initialize zlib stream */ 434 stream.zalloc = Z_NULL; 435 stream.zfree = Z_NULL; 436 stream.opaque = Z_NULL; 437 438 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED) 439 strategy = Z_FIXED; 440 else 441 strategy = Z_DEFAULT_STRATEGY; 442 443 /* 444 * Window bits is the base two logarithm of the window size (in bytes). 445 * When doing raw DEFLATE, this number will be negative. 446 */ 447 window_bits = -(xform->compress.window_size); 448 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) 449 window_bits *= -1; 450 else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) 451 window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS; 452 453 comp_level = xform->compress.level; 454 455 if (comp_level != RTE_COMP_LEVEL_NONE) 456 ret = deflateInit2(&stream, comp_level, Z_DEFLATED, 457 window_bits, mem_level, strategy); 458 else 459 ret = deflateInit(&stream, Z_NO_COMPRESSION); 460 461 if (ret != Z_OK) { 462 printf("Zlib deflate could not be initialized\n"); 463 goto exit; 464 } 465 466 /* Assuming stateless operation */ 467 /* SGL Input */ 468 if (op->m_src->nb_segs > 1) { 469 single_src_buf = rte_malloc(NULL, 470 rte_pktmbuf_pkt_len(op->m_src), 0); 471 if (single_src_buf == NULL) { 472 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n"); 473 goto exit; 474 } 475 476 if (rte_pktmbuf_read(op->m_src, op->src.offset, 477 rte_pktmbuf_pkt_len(op->m_src) - 478 op->src.offset, 479 single_src_buf) == NULL) { 480 RTE_LOG(ERR, USER1, 481 "Buffer could not be read entirely\n"); 482 goto exit; 483 } 484 485 stream.avail_in = op->src.length; 486 stream.next_in = single_src_buf; 487 488 } else { 489 stream.avail_in = op->src.length; 490 stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *, 491 op->src.offset); 492 } 493 /* SGL output */ 494 if (op->m_dst->nb_segs > 1) { 495 496 single_dst_buf = rte_malloc(NULL, 497 rte_pktmbuf_pkt_len(op->m_dst), 0); 498 if (single_dst_buf == NULL) { 499 RTE_LOG(ERR, USER1, 500 "Buffer could not be allocated\n"); 501 goto exit; 502 } 503 504 stream.avail_out = op->m_dst->pkt_len; 505 stream.next_out = single_dst_buf; 506 507 } else {/* linear output */ 508 stream.avail_out = op->m_dst->data_len; 509 stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *, 510 op->dst.offset); 511 } 512 513 /* Stateless operation, all buffer will be compressed in one go */ 514 zlib_flush = map_zlib_flush_flag(op->flush_flag); 515 ret = deflate(&stream, zlib_flush); 516 517 if (stream.avail_in != 0) { 518 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n"); 519 goto exit; 520 } 521 522 if (ret != Z_STREAM_END) 523 goto exit; 524 525 /* Copy data to destination SGL */ 526 if (op->m_dst->nb_segs > 1) { 527 uint32_t remaining_data = stream.total_out; 528 uint8_t *src_data = single_dst_buf; 529 struct rte_mbuf *dst_buf = op->m_dst; 530 531 while (remaining_data > 0) { 532 uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf, 533 uint8_t *, op->dst.offset); 534 /* Last segment */ 535 if (remaining_data < dst_buf->data_len) { 536 memcpy(dst_data, src_data, remaining_data); 537 remaining_data = 0; 538 } else { 539 memcpy(dst_data, src_data, dst_buf->data_len); 540 remaining_data -= dst_buf->data_len; 541 src_data += dst_buf->data_len; 542 dst_buf = dst_buf->next; 543 } 544 } 545 } 546 547 op->consumed = stream.total_in; 548 if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) { 549 rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE); 550 rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE); 551 op->produced = stream.total_out - (ZLIB_HEADER_SIZE + 552 ZLIB_TRAILER_SIZE); 553 } else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) { 554 rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE); 555 rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE); 556 op->produced = stream.total_out - (GZIP_HEADER_SIZE + 557 GZIP_TRAILER_SIZE); 558 } else 559 op->produced = stream.total_out; 560 561 op->status = RTE_COMP_OP_STATUS_SUCCESS; 562 op->output_chksum = stream.adler; 563 564 deflateReset(&stream); 565 566 ret = 0; 567 exit: 568 deflateEnd(&stream); 569 rte_free(single_src_buf); 570 rte_free(single_dst_buf); 571 572 return ret; 573 } 574 575 static int 576 decompress_zlib(struct rte_comp_op *op, 577 const struct rte_comp_xform *xform) 578 { 579 z_stream stream; 580 int window_bits; 581 int zlib_flush; 582 int ret = TEST_FAILED; 583 uint8_t *single_src_buf = NULL; 584 uint8_t *single_dst_buf = NULL; 585 586 /* initialize zlib stream */ 587 stream.zalloc = Z_NULL; 588 stream.zfree = Z_NULL; 589 stream.opaque = Z_NULL; 590 591 /* 592 * Window bits is the base two logarithm of the window size (in bytes). 593 * When doing raw DEFLATE, this number will be negative. 594 */ 595 window_bits = -(xform->decompress.window_size); 596 ret = inflateInit2(&stream, window_bits); 597 598 if (ret != Z_OK) { 599 printf("Zlib deflate could not be initialized\n"); 600 goto exit; 601 } 602 603 /* Assuming stateless operation */ 604 /* SGL */ 605 if (op->m_src->nb_segs > 1) { 606 single_src_buf = rte_malloc(NULL, 607 rte_pktmbuf_pkt_len(op->m_src), 0); 608 if (single_src_buf == NULL) { 609 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n"); 610 goto exit; 611 } 612 single_dst_buf = rte_malloc(NULL, 613 rte_pktmbuf_pkt_len(op->m_dst), 0); 614 if (single_dst_buf == NULL) { 615 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n"); 616 goto exit; 617 } 618 if (rte_pktmbuf_read(op->m_src, 0, 619 rte_pktmbuf_pkt_len(op->m_src), 620 single_src_buf) == NULL) { 621 RTE_LOG(ERR, USER1, 622 "Buffer could not be read entirely\n"); 623 goto exit; 624 } 625 626 stream.avail_in = op->src.length; 627 stream.next_in = single_src_buf; 628 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst); 629 stream.next_out = single_dst_buf; 630 631 } else { 632 stream.avail_in = op->src.length; 633 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *); 634 stream.avail_out = op->m_dst->data_len; 635 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *); 636 } 637 638 /* Stateless operation, all buffer will be compressed in one go */ 639 zlib_flush = map_zlib_flush_flag(op->flush_flag); 640 ret = inflate(&stream, zlib_flush); 641 642 if (stream.avail_in != 0) { 643 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n"); 644 goto exit; 645 } 646 647 if (ret != Z_STREAM_END) 648 goto exit; 649 650 if (op->m_src->nb_segs > 1) { 651 uint32_t remaining_data = stream.total_out; 652 uint8_t *src_data = single_dst_buf; 653 struct rte_mbuf *dst_buf = op->m_dst; 654 655 while (remaining_data > 0) { 656 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf, 657 uint8_t *); 658 /* Last segment */ 659 if (remaining_data < dst_buf->data_len) { 660 memcpy(dst_data, src_data, remaining_data); 661 remaining_data = 0; 662 } else { 663 memcpy(dst_data, src_data, dst_buf->data_len); 664 remaining_data -= dst_buf->data_len; 665 src_data += dst_buf->data_len; 666 dst_buf = dst_buf->next; 667 } 668 } 669 } 670 671 op->consumed = stream.total_in; 672 op->produced = stream.total_out; 673 op->status = RTE_COMP_OP_STATUS_SUCCESS; 674 675 inflateReset(&stream); 676 677 ret = 0; 678 exit: 679 inflateEnd(&stream); 680 681 return ret; 682 } 683 684 static int 685 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf, 686 uint32_t total_data_size, 687 struct rte_mempool *small_mbuf_pool, 688 struct rte_mempool *large_mbuf_pool, 689 uint8_t limit_segs_in_sgl, 690 uint16_t seg_size) 691 { 692 uint32_t remaining_data = total_data_size; 693 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size); 694 struct rte_mempool *pool; 695 struct rte_mbuf *next_seg; 696 uint32_t data_size; 697 char *buf_ptr; 698 const char *data_ptr = test_buf; 699 uint16_t i; 700 int ret; 701 702 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl) 703 num_remaining_segs = limit_segs_in_sgl - 1; 704 705 /* 706 * Allocate data in the first segment (header) and 707 * copy data if test buffer is provided 708 */ 709 if (remaining_data < seg_size) 710 data_size = remaining_data; 711 else 712 data_size = seg_size; 713 714 buf_ptr = rte_pktmbuf_append(head_buf, data_size); 715 if (buf_ptr == NULL) { 716 RTE_LOG(ERR, USER1, 717 "Not enough space in the 1st buffer\n"); 718 return -1; 719 } 720 721 if (data_ptr != NULL) { 722 /* Copy characters without NULL terminator */ 723 memcpy(buf_ptr, data_ptr, data_size); 724 data_ptr += data_size; 725 } 726 remaining_data -= data_size; 727 num_remaining_segs--; 728 729 /* 730 * Allocate the rest of the segments, 731 * copy the rest of the data and chain the segments. 732 */ 733 for (i = 0; i < num_remaining_segs; i++) { 734 735 if (i == (num_remaining_segs - 1)) { 736 /* last segment */ 737 if (remaining_data > seg_size) 738 pool = large_mbuf_pool; 739 else 740 pool = small_mbuf_pool; 741 data_size = remaining_data; 742 } else { 743 data_size = seg_size; 744 pool = small_mbuf_pool; 745 } 746 747 next_seg = rte_pktmbuf_alloc(pool); 748 if (next_seg == NULL) { 749 RTE_LOG(ERR, USER1, 750 "New segment could not be allocated " 751 "from the mempool\n"); 752 return -1; 753 } 754 buf_ptr = rte_pktmbuf_append(next_seg, data_size); 755 if (buf_ptr == NULL) { 756 RTE_LOG(ERR, USER1, 757 "Not enough space in the buffer\n"); 758 rte_pktmbuf_free(next_seg); 759 return -1; 760 } 761 if (data_ptr != NULL) { 762 /* Copy characters without NULL terminator */ 763 memcpy(buf_ptr, data_ptr, data_size); 764 data_ptr += data_size; 765 } 766 remaining_data -= data_size; 767 768 ret = rte_pktmbuf_chain(head_buf, next_seg); 769 if (ret != 0) { 770 rte_pktmbuf_free(next_seg); 771 RTE_LOG(ERR, USER1, 772 "Segment could not chained\n"); 773 return -1; 774 } 775 } 776 777 return 0; 778 } 779 780 static void 781 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused) 782 { 783 } 784 785 static int 786 test_run_enqueue_dequeue(struct rte_comp_op **ops, 787 struct rte_comp_op **ops_processed, 788 unsigned int num_bufs) 789 { 790 uint16_t num_enqd, num_deqd, num_total_deqd; 791 unsigned int deqd_retries = 0; 792 int res = 0; 793 794 /* Enqueue and dequeue all operations */ 795 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs); 796 if (num_enqd < num_bufs) { 797 RTE_LOG(ERR, USER1, 798 "Some operations could not be enqueued\n"); 799 res = -1; 800 } 801 802 /* dequeue ops even on error (same number of ops as was enqueued) */ 803 804 num_total_deqd = 0; 805 while (num_total_deqd < num_enqd) { 806 /* 807 * If retrying a dequeue call, wait for 10 ms to allow 808 * enough time to the driver to process the operations 809 */ 810 if (deqd_retries != 0) { 811 /* 812 * Avoid infinite loop if not all the 813 * operations get out of the device 814 */ 815 if (deqd_retries == MAX_DEQD_RETRIES) { 816 RTE_LOG(ERR, USER1, 817 "Not all operations could be dequeued\n"); 818 res = -1; 819 break; 820 } 821 usleep(DEQUEUE_WAIT_TIME); 822 } 823 num_deqd = rte_compressdev_dequeue_burst(0, 0, 824 &ops_processed[num_total_deqd], num_bufs); 825 num_total_deqd += num_deqd; 826 deqd_retries++; 827 828 } 829 830 return res; 831 } 832 833 /** 834 * Arrays initialization. Input buffers preparation for compression. 835 * 836 * API that initializes all the private arrays to NULL 837 * and allocates input buffers to perform compression operations. 838 * 839 * @param int_data 840 * Interim data containing session/transformation objects. 841 * @param test_data 842 * The test parameters set by users (command line parameters). 843 * @param test_priv_data 844 * A container used for aggregation all the private test arrays. 845 * @return 846 * - 0: On success. 847 * - -1: On error. 848 */ 849 static int 850 test_setup_com_bufs(const struct interim_data_params *int_data, 851 const struct test_data_params *test_data, 852 const struct test_private_arrays *test_priv_data) 853 { 854 /* local variables: */ 855 unsigned int i; 856 uint32_t data_size; 857 char *buf_ptr; 858 int ret; 859 char **all_decomp_data = test_priv_data->all_decomp_data; 860 861 struct comp_testsuite_params *ts_params = &testsuite_params; 862 863 /* from int_data: */ 864 const char * const *test_bufs = int_data->test_bufs; 865 unsigned int num_bufs = int_data->num_bufs; 866 867 /* from test_data: */ 868 unsigned int buff_type = test_data->buff_type; 869 unsigned int big_data = test_data->big_data; 870 871 /* from test_priv_data: */ 872 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs; 873 struct rte_mempool *buf_pool; 874 875 static struct rte_mbuf_ext_shared_info inbuf_info; 876 877 size_t array_size = sizeof(void *) * num_bufs; 878 879 /* Initialize all arrays to NULL */ 880 memset(test_priv_data->uncomp_bufs, 0, array_size); 881 memset(test_priv_data->comp_bufs, 0, array_size); 882 memset(test_priv_data->ops, 0, array_size); 883 memset(test_priv_data->ops_processed, 0, array_size); 884 memset(test_priv_data->priv_xforms, 0, array_size); 885 memset(test_priv_data->compressed_data_size, 886 0, sizeof(uint32_t) * num_bufs); 887 888 if (test_data->decompress_state == RTE_COMP_OP_STATEFUL) { 889 data_size = strlen(test_bufs[0]) + 1; 890 *all_decomp_data = rte_malloc(NULL, data_size, 891 RTE_CACHE_LINE_SIZE); 892 } 893 894 if (big_data) 895 buf_pool = ts_params->big_mbuf_pool; 896 else if (buff_type == SGL_BOTH) 897 buf_pool = ts_params->small_mbuf_pool; 898 else 899 buf_pool = ts_params->large_mbuf_pool; 900 901 /* for compression uncomp_bufs is used as a source buffer */ 902 /* allocation from buf_pool (mempool type) */ 903 ret = rte_pktmbuf_alloc_bulk(buf_pool, 904 uncomp_bufs, num_bufs); 905 if (ret < 0) { 906 RTE_LOG(ERR, USER1, 907 "Source mbufs could not be allocated " 908 "from the mempool\n"); 909 return -1; 910 } 911 912 if (test_data->use_external_mbufs) { 913 inbuf_info.free_cb = extbuf_free_callback; 914 inbuf_info.fcb_opaque = NULL; 915 rte_mbuf_ext_refcnt_set(&inbuf_info, 1); 916 for (i = 0; i < num_bufs; i++) { 917 rte_pktmbuf_attach_extbuf(uncomp_bufs[i], 918 test_data->inbuf_memzone->addr, 919 test_data->inbuf_memzone->iova, 920 test_data->inbuf_data_size, 921 &inbuf_info); 922 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], 923 test_data->inbuf_data_size); 924 if (buf_ptr == NULL) { 925 RTE_LOG(ERR, USER1, 926 "Append extra bytes to the source mbuf failed\n"); 927 return -1; 928 } 929 } 930 } else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) { 931 for (i = 0; i < num_bufs; i++) { 932 data_size = strlen(test_bufs[i]) + 1; 933 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i], 934 data_size, 935 big_data ? buf_pool : ts_params->small_mbuf_pool, 936 big_data ? buf_pool : ts_params->large_mbuf_pool, 937 big_data ? 0 : MAX_SEGS, 938 big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0) 939 return -1; 940 } 941 } else { 942 for (i = 0; i < num_bufs; i++) { 943 data_size = strlen(test_bufs[i]) + 1; 944 945 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size); 946 if (buf_ptr == NULL) { 947 RTE_LOG(ERR, USER1, 948 "Append extra bytes to the source mbuf failed\n"); 949 return -1; 950 } 951 strlcpy(buf_ptr, test_bufs[i], data_size); 952 } 953 } 954 955 return 0; 956 } 957 958 /** 959 * Data size calculation (for both compression and decompression). 960 * 961 * Calculate size of anticipated output buffer required for both 962 * compression and decompression operations based on input int_data. 963 * 964 * @param op_type 965 * Operation type: compress or decompress 966 * @param out_of_space_and_zlib 967 * Boolean value to switch into "out of space" buffer if set. 968 * To test "out-of-space" data size, zlib_decompress must be set as well. 969 * @param test_priv_data 970 * A container used for aggregation all the private test arrays. 971 * @param int_data 972 * Interim data containing session/transformation objects. 973 * @param test_data 974 * The test parameters set by users (command line parameters). 975 * @param i 976 * current buffer index 977 * @return 978 * data size 979 */ 980 static inline uint32_t 981 test_mbufs_calculate_data_size( 982 enum operation_type op_type, 983 unsigned int out_of_space_and_zlib, 984 const struct test_private_arrays *test_priv_data, 985 const struct interim_data_params *int_data, 986 const struct test_data_params *test_data, 987 unsigned int i) 988 { 989 /* local variables: */ 990 uint32_t data_size; 991 struct priv_op_data *priv_data; 992 float ratio_val; 993 enum ratio_switch ratio = test_data->ratio; 994 995 uint8_t not_zlib_compr; /* true if zlib isn't current compression dev */ 996 enum overflow_test overflow = test_data->overflow; 997 998 /* from test_priv_data: */ 999 struct rte_comp_op **ops_processed = test_priv_data->ops_processed; 1000 1001 /* from int_data: */ 1002 const char * const *test_bufs = int_data->test_bufs; 1003 1004 if (out_of_space_and_zlib) 1005 data_size = OUT_OF_SPACE_BUF; 1006 else { 1007 if (op_type == OPERATION_COMPRESSION) { 1008 not_zlib_compr = (test_data->zlib_dir == ZLIB_DECOMPRESS 1009 || test_data->zlib_dir == ZLIB_NONE); 1010 1011 ratio_val = (ratio == RATIO_ENABLED) ? 1012 COMPRESS_BUF_SIZE_RATIO : 1013 COMPRESS_BUF_SIZE_RATIO_DISABLED; 1014 1015 ratio_val = (not_zlib_compr && 1016 (overflow == OVERFLOW_ENABLED)) ? 1017 COMPRESS_BUF_SIZE_RATIO_OVERFLOW : 1018 ratio_val; 1019 1020 data_size = strlen(test_bufs[i]) * ratio_val; 1021 } else { 1022 priv_data = (struct priv_op_data *) 1023 (ops_processed[i] + 1); 1024 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1; 1025 } 1026 } 1027 1028 return data_size; 1029 } 1030 1031 1032 /** 1033 * Memory buffers preparation (for both compression and decompression). 1034 * 1035 * Function allocates output buffers to perform compression 1036 * or decompression operations depending on value of op_type. 1037 * 1038 * @param op_type 1039 * Operation type: compress or decompress 1040 * @param out_of_space_and_zlib 1041 * Boolean value to switch into "out of space" buffer if set. 1042 * To test "out-of-space" data size, zlib_decompress must be set as well. 1043 * @param test_priv_data 1044 * A container used for aggregation all the private test arrays. 1045 * @param int_data 1046 * Interim data containing session/transformation objects. 1047 * @param test_data 1048 * The test parameters set by users (command line parameters). 1049 * @param current_extbuf_info, 1050 * The structure containing all the information related to external mbufs 1051 * @return 1052 * - 0: On success. 1053 * - -1: On error. 1054 */ 1055 static int 1056 test_setup_output_bufs( 1057 enum operation_type op_type, 1058 unsigned int out_of_space_and_zlib, 1059 const struct test_private_arrays *test_priv_data, 1060 const struct interim_data_params *int_data, 1061 const struct test_data_params *test_data, 1062 struct rte_mbuf_ext_shared_info *current_extbuf_info) 1063 { 1064 /* local variables: */ 1065 unsigned int i; 1066 uint32_t data_size; 1067 int ret; 1068 char *buf_ptr; 1069 1070 /* from test_priv_data: */ 1071 struct rte_mbuf **current_bufs; 1072 1073 /* from int_data: */ 1074 unsigned int num_bufs = int_data->num_bufs; 1075 1076 /* from test_data: */ 1077 unsigned int buff_type = test_data->buff_type; 1078 unsigned int big_data = test_data->big_data; 1079 const struct rte_memzone *current_memzone; 1080 1081 struct comp_testsuite_params *ts_params = &testsuite_params; 1082 struct rte_mempool *buf_pool; 1083 1084 if (big_data) 1085 buf_pool = ts_params->big_mbuf_pool; 1086 else if (buff_type == SGL_BOTH) 1087 buf_pool = ts_params->small_mbuf_pool; 1088 else 1089 buf_pool = ts_params->large_mbuf_pool; 1090 1091 if (op_type == OPERATION_COMPRESSION) 1092 current_bufs = test_priv_data->comp_bufs; 1093 else 1094 current_bufs = test_priv_data->uncomp_bufs; 1095 1096 /* the mbufs allocation*/ 1097 ret = rte_pktmbuf_alloc_bulk(buf_pool, current_bufs, num_bufs); 1098 if (ret < 0) { 1099 RTE_LOG(ERR, USER1, 1100 "Destination mbufs could not be allocated " 1101 "from the mempool\n"); 1102 return -1; 1103 } 1104 1105 if (test_data->use_external_mbufs) { 1106 current_extbuf_info->free_cb = extbuf_free_callback; 1107 current_extbuf_info->fcb_opaque = NULL; 1108 rte_mbuf_ext_refcnt_set(current_extbuf_info, 1); 1109 if (op_type == OPERATION_COMPRESSION) 1110 current_memzone = test_data->compbuf_memzone; 1111 else 1112 current_memzone = test_data->uncompbuf_memzone; 1113 1114 for (i = 0; i < num_bufs; i++) { 1115 rte_pktmbuf_attach_extbuf(current_bufs[i], 1116 current_memzone->addr, 1117 current_memzone->iova, 1118 current_memzone->len, 1119 current_extbuf_info); 1120 rte_pktmbuf_append(current_bufs[i], 1121 current_memzone->len); 1122 } 1123 } else { 1124 for (i = 0; i < num_bufs; i++) { 1125 1126 enum rte_comp_huffman comp_huffman = 1127 ts_params->def_comp_xform->compress.deflate.huffman; 1128 1129 /* data size calculation */ 1130 data_size = test_mbufs_calculate_data_size( 1131 op_type, 1132 out_of_space_and_zlib, 1133 test_priv_data, 1134 int_data, 1135 test_data, 1136 i); 1137 1138 if (comp_huffman != RTE_COMP_HUFFMAN_DYNAMIC) { 1139 if (op_type == OPERATION_DECOMPRESSION) 1140 data_size *= COMPRESS_BUF_SIZE_RATIO; 1141 } 1142 1143 /* data allocation */ 1144 if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) { 1145 ret = prepare_sgl_bufs(NULL, current_bufs[i], 1146 data_size, 1147 big_data ? buf_pool : 1148 ts_params->small_mbuf_pool, 1149 big_data ? buf_pool : 1150 ts_params->large_mbuf_pool, 1151 big_data ? 0 : MAX_SEGS, 1152 big_data ? MAX_DATA_MBUF_SIZE : 1153 SMALL_SEG_SIZE); 1154 if (ret < 0) 1155 return -1; 1156 } else { 1157 buf_ptr = rte_pktmbuf_append(current_bufs[i], 1158 data_size); 1159 if (buf_ptr == NULL) { 1160 RTE_LOG(ERR, USER1, 1161 "Append extra bytes to the destination mbuf failed\n"); 1162 return -1; 1163 } 1164 } 1165 } 1166 } 1167 1168 return 0; 1169 } 1170 1171 /** 1172 * The main compression function. 1173 * 1174 * Function performs compression operation. 1175 * Operation(s) configuration, depending on CLI parameters. 1176 * Operation(s) processing. 1177 * 1178 * @param int_data 1179 * Interim data containing session/transformation objects. 1180 * @param test_data 1181 * The test parameters set by users (command line parameters). 1182 * @param test_priv_data 1183 * A container used for aggregation all the private test arrays. 1184 * @return 1185 * - 0: On success. 1186 * - -1: On error. 1187 */ 1188 static int 1189 test_deflate_comp_run(const struct interim_data_params *int_data, 1190 const struct test_data_params *test_data, 1191 const struct test_private_arrays *test_priv_data) 1192 { 1193 /* local variables: */ 1194 struct priv_op_data *priv_data; 1195 unsigned int i; 1196 uint16_t num_priv_xforms = 0; 1197 int ret; 1198 int ret_status = 0; 1199 char *buf_ptr; 1200 1201 struct comp_testsuite_params *ts_params = &testsuite_params; 1202 1203 /* from test_data: */ 1204 enum rte_comp_op_type operation_type = test_data->compress_state; 1205 unsigned int zlib_compress = 1206 (test_data->zlib_dir == ZLIB_ALL || 1207 test_data->zlib_dir == ZLIB_COMPRESS); 1208 1209 /* from int_data: */ 1210 struct rte_comp_xform **compress_xforms = int_data->compress_xforms; 1211 unsigned int num_xforms = int_data->num_xforms; 1212 unsigned int num_bufs = int_data->num_bufs; 1213 1214 /* from test_priv_data: */ 1215 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs; 1216 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs; 1217 struct rte_comp_op **ops = test_priv_data->ops; 1218 struct rte_comp_op **ops_processed = test_priv_data->ops_processed; 1219 void **priv_xforms = test_priv_data->priv_xforms; 1220 1221 const struct rte_compressdev_capabilities *capa = 1222 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 1223 1224 /* Build the compression operations */ 1225 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs); 1226 if (ret < 0) { 1227 RTE_LOG(ERR, USER1, 1228 "Compress operations could not be allocated " 1229 "from the mempool\n"); 1230 ret_status = -1; 1231 goto exit; 1232 } 1233 1234 for (i = 0; i < num_bufs; i++) { 1235 ops[i]->m_src = uncomp_bufs[i]; 1236 ops[i]->m_dst = comp_bufs[i]; 1237 ops[i]->src.offset = 0; 1238 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]); 1239 ops[i]->dst.offset = 0; 1240 1241 RTE_LOG(DEBUG, USER1, 1242 "Uncompressed buffer length = %u compressed buffer length = %u", 1243 rte_pktmbuf_pkt_len(uncomp_bufs[i]), 1244 rte_pktmbuf_pkt_len(comp_bufs[i])); 1245 1246 if (operation_type == RTE_COMP_OP_STATELESS) { 1247 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL; 1248 } else { 1249 RTE_LOG(ERR, USER1, 1250 "Compression: stateful operations are not " 1251 "supported in these tests yet\n"); 1252 ret_status = -1; 1253 goto exit; 1254 } 1255 ops[i]->input_chksum = 0; 1256 /* 1257 * Store original operation index in private data, 1258 * since ordering does not have to be maintained, 1259 * when dequeueing from compressdev, so a comparison 1260 * at the end of the test can be done. 1261 */ 1262 priv_data = (struct priv_op_data *) (ops[i] + 1); 1263 priv_data->orig_idx = i; 1264 } 1265 1266 /* Compress data (either with Zlib API or compressdev API */ 1267 if (zlib_compress) { 1268 for (i = 0; i < num_bufs; i++) { 1269 const struct rte_comp_xform *compress_xform = 1270 compress_xforms[i % num_xforms]; 1271 ret = compress_zlib(ops[i], compress_xform, 1272 DEFAULT_MEM_LEVEL); 1273 if (ret < 0) { 1274 ret_status = -1; 1275 goto exit; 1276 } 1277 1278 ops_processed[i] = ops[i]; 1279 } 1280 } else { 1281 /* Create compress private xform data */ 1282 for (i = 0; i < num_xforms; i++) { 1283 ret = rte_compressdev_private_xform_create(0, 1284 (const struct rte_comp_xform *) 1285 compress_xforms[i], 1286 &priv_xforms[i]); 1287 if (ret < 0) { 1288 RTE_LOG(ERR, USER1, 1289 "Compression private xform " 1290 "could not be created\n"); 1291 ret_status = -1; 1292 goto exit; 1293 } 1294 num_priv_xforms++; 1295 } 1296 if (capa->comp_feature_flags & 1297 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) { 1298 /* Attach shareable private xform data to ops */ 1299 for (i = 0; i < num_bufs; i++) 1300 ops[i]->private_xform = 1301 priv_xforms[i % num_xforms]; 1302 } else { 1303 /* Create rest of the private xforms for the other ops */ 1304 for (i = num_xforms; i < num_bufs; i++) { 1305 ret = rte_compressdev_private_xform_create(0, 1306 compress_xforms[i % num_xforms], 1307 &priv_xforms[i]); 1308 if (ret < 0) { 1309 RTE_LOG(ERR, USER1, 1310 "Compression private xform " 1311 "could not be created\n"); 1312 ret_status = -1; 1313 goto exit; 1314 } 1315 num_priv_xforms++; 1316 } 1317 /* Attach non shareable private xform data to ops */ 1318 for (i = 0; i < num_bufs; i++) 1319 ops[i]->private_xform = priv_xforms[i]; 1320 } 1321 1322 recovery_lb: 1323 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs); 1324 if (ret < 0) { 1325 RTE_LOG(ERR, USER1, 1326 "Compression: enqueue/dequeue operation failed\n"); 1327 ret_status = -1; 1328 goto exit; 1329 } 1330 1331 for (i = 0; i < num_bufs; i++) { 1332 test_priv_data->compressed_data_size[i] += 1333 ops_processed[i]->produced; 1334 1335 if (ops_processed[i]->status == 1336 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) { 1337 1338 ops[i]->status = 1339 RTE_COMP_OP_STATUS_NOT_PROCESSED; 1340 ops[i]->src.offset += 1341 ops_processed[i]->consumed; 1342 ops[i]->src.length -= 1343 ops_processed[i]->consumed; 1344 ops[i]->dst.offset += 1345 ops_processed[i]->produced; 1346 1347 buf_ptr = rte_pktmbuf_append( 1348 ops[i]->m_dst, 1349 ops_processed[i]->produced); 1350 1351 if (buf_ptr == NULL) { 1352 RTE_LOG(ERR, USER1, 1353 "Data recovery: append extra bytes to the current mbuf failed\n"); 1354 ret_status = -1; 1355 goto exit; 1356 } 1357 goto recovery_lb; 1358 } 1359 } 1360 } 1361 1362 exit: 1363 /* Free resources */ 1364 if (ret_status < 0) 1365 for (i = 0; i < num_bufs; i++) { 1366 rte_comp_op_free(ops[i]); 1367 ops[i] = NULL; 1368 ops_processed[i] = NULL; 1369 } 1370 1371 /* Free compress private xforms */ 1372 for (i = 0; i < num_priv_xforms; i++) { 1373 if (priv_xforms[i] != NULL) { 1374 rte_compressdev_private_xform_free(0, priv_xforms[i]); 1375 priv_xforms[i] = NULL; 1376 } 1377 } 1378 1379 return ret_status; 1380 } 1381 1382 /** 1383 * Prints out the test report. Memory freeing. 1384 * 1385 * Called after successful compression. 1386 * Operation(s) status validation and decompression buffers freeing. 1387 1388 * -1 returned if function fail. 1389 * 1390 * @param int_data 1391 * Interim data containing session/transformation objects. 1392 * @param test_data 1393 * The test parameters set by users (command line parameters). 1394 * @param test_priv_data 1395 * A container used for aggregation all the private test arrays. 1396 * @return 1397 * - 2: Some operation is not supported 1398 * - 1: Decompression should be skipped 1399 * - 0: On success. 1400 * - -1: On error. 1401 */ 1402 static int 1403 test_deflate_comp_finalize(const struct interim_data_params *int_data, 1404 const struct test_data_params *test_data, 1405 const struct test_private_arrays *test_priv_data) 1406 { 1407 /* local variables: */ 1408 unsigned int i; 1409 struct priv_op_data *priv_data; 1410 1411 /* from int_data: */ 1412 unsigned int num_xforms = int_data->num_xforms; 1413 struct rte_comp_xform **compress_xforms = int_data->compress_xforms; 1414 unsigned int num_bufs = int_data->num_bufs; 1415 1416 /* from test_priv_data: */ 1417 struct rte_comp_op **ops_processed = test_priv_data->ops_processed; 1418 uint64_t *compress_checksum = test_priv_data->compress_checksum; 1419 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs; 1420 struct rte_comp_op **ops = test_priv_data->ops; 1421 1422 /* from test_data: */ 1423 unsigned int out_of_space = test_data->out_of_space; 1424 unsigned int zlib_compress = 1425 (test_data->zlib_dir == ZLIB_ALL || 1426 test_data->zlib_dir == ZLIB_COMPRESS); 1427 unsigned int zlib_decompress = 1428 (test_data->zlib_dir == ZLIB_ALL || 1429 test_data->zlib_dir == ZLIB_DECOMPRESS); 1430 1431 for (i = 0; i < num_bufs; i++) { 1432 priv_data = (struct priv_op_data *)(ops_processed[i] + 1); 1433 uint16_t xform_idx = priv_data->orig_idx % num_xforms; 1434 const struct rte_comp_compress_xform *compress_xform = 1435 &compress_xforms[xform_idx]->compress; 1436 enum rte_comp_huffman huffman_type = 1437 compress_xform->deflate.huffman; 1438 char engine[] = "zlib (directly, not PMD)"; 1439 if (zlib_decompress) 1440 strlcpy(engine, "PMD", sizeof(engine)); 1441 1442 RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to" 1443 " %u bytes (level = %d, huffman = %s)\n", 1444 i, engine, 1445 ops_processed[i]->consumed, ops_processed[i]->produced, 1446 compress_xform->level, 1447 huffman_type_strings[huffman_type]); 1448 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n", 1449 ops_processed[i]->consumed == 0 ? 0 : 1450 (float)ops_processed[i]->produced / 1451 ops_processed[i]->consumed * 100); 1452 if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE) 1453 compress_checksum[i] = ops_processed[i]->output_chksum; 1454 ops[i] = NULL; 1455 } 1456 1457 /* 1458 * Check operation status and free source mbufs (destination mbuf and 1459 * compress operation information is needed for the decompression stage) 1460 */ 1461 for (i = 0; i < num_bufs; i++) { 1462 if (out_of_space && !zlib_compress) { 1463 if (ops_processed[i]->status != 1464 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) { 1465 RTE_LOG(ERR, USER1, 1466 "Operation without expected out of " 1467 "space status error\n"); 1468 return -1; 1469 } else 1470 continue; 1471 } 1472 1473 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) { 1474 if (test_data->overflow == OVERFLOW_ENABLED) { 1475 if (ops_processed[i]->status == 1476 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) { 1477 RTE_LOG(INFO, USER1, 1478 "Out-of-space-recoverable functionality" 1479 " is not supported on this device\n"); 1480 return 2; 1481 } 1482 } 1483 1484 RTE_LOG(ERR, USER1, 1485 "Comp: Some operations were not successful\n"); 1486 return -1; 1487 } 1488 priv_data = (struct priv_op_data *)(ops_processed[i] + 1); 1489 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]); 1490 uncomp_bufs[priv_data->orig_idx] = NULL; 1491 } 1492 1493 if (out_of_space && !zlib_compress) 1494 return 1; 1495 1496 return 0; 1497 } 1498 1499 /** 1500 * The main decompression function. 1501 * 1502 * Function performs decompression operation. 1503 * Operation(s) configuration, depending on CLI parameters. 1504 * Operation(s) processing. 1505 * 1506 * @param int_data 1507 * Interim data containing session/transformation objects. 1508 * @param test_data 1509 * The test parameters set by users (command line parameters). 1510 * @param test_priv_data 1511 * A container used for aggregation all the private test arrays. 1512 * @return 1513 * - 0: On success. 1514 * - -1: On error. 1515 */ 1516 static int 1517 test_deflate_decomp_run(const struct interim_data_params *int_data, 1518 const struct test_data_params *test_data, 1519 struct test_private_arrays *test_priv_data) 1520 { 1521 1522 /* local variables: */ 1523 struct priv_op_data *priv_data; 1524 unsigned int i; 1525 uint16_t num_priv_xforms = 0; 1526 int ret; 1527 int ret_status = 0; 1528 1529 struct comp_testsuite_params *ts_params = &testsuite_params; 1530 1531 /* from test_data: */ 1532 enum rte_comp_op_type operation_type = test_data->decompress_state; 1533 unsigned int zlib_decompress = 1534 (test_data->zlib_dir == ZLIB_ALL || 1535 test_data->zlib_dir == ZLIB_DECOMPRESS); 1536 1537 /* from int_data: */ 1538 struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms; 1539 unsigned int num_xforms = int_data->num_xforms; 1540 unsigned int num_bufs = int_data->num_bufs; 1541 1542 /* from test_priv_data: */ 1543 struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs; 1544 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs; 1545 struct rte_comp_op **ops = test_priv_data->ops; 1546 struct rte_comp_op **ops_processed = test_priv_data->ops_processed; 1547 void **priv_xforms = test_priv_data->priv_xforms; 1548 uint32_t *compressed_data_size = test_priv_data->compressed_data_size; 1549 void **stream = test_priv_data->stream; 1550 1551 const struct rte_compressdev_capabilities *capa = 1552 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 1553 1554 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs); 1555 if (ret < 0) { 1556 RTE_LOG(ERR, USER1, 1557 "Decompress operations could not be allocated " 1558 "from the mempool\n"); 1559 ret_status = -1; 1560 goto exit; 1561 } 1562 1563 /* Source buffer is the compressed data from the previous operations */ 1564 for (i = 0; i < num_bufs; i++) { 1565 ops[i]->m_src = comp_bufs[i]; 1566 ops[i]->m_dst = uncomp_bufs[i]; 1567 ops[i]->src.offset = 0; 1568 /* 1569 * Set the length of the compressed data to the 1570 * number of bytes that were produced in the previous stage 1571 */ 1572 1573 if (compressed_data_size[i]) 1574 ops[i]->src.length = compressed_data_size[i]; 1575 else 1576 ops[i]->src.length = ops_processed[i]->produced; 1577 1578 ops[i]->dst.offset = 0; 1579 1580 if (operation_type == RTE_COMP_OP_STATELESS) { 1581 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL; 1582 ops[i]->op_type = RTE_COMP_OP_STATELESS; 1583 } else if (!zlib_decompress) { 1584 ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC; 1585 ops[i]->op_type = RTE_COMP_OP_STATEFUL; 1586 } else { 1587 RTE_LOG(ERR, USER1, 1588 "Decompression: stateful operations are" 1589 " not supported in these tests yet\n"); 1590 ret_status = -1; 1591 goto exit; 1592 } 1593 ops[i]->input_chksum = 0; 1594 /* 1595 * Copy private data from previous operations, 1596 * to keep the pointer to the original buffer 1597 */ 1598 memcpy(ops[i] + 1, ops_processed[i] + 1, 1599 sizeof(struct priv_op_data)); 1600 } 1601 1602 /* 1603 * Free the previous compress operations, 1604 * as they are not needed anymore 1605 */ 1606 rte_comp_op_bulk_free(ops_processed, num_bufs); 1607 1608 /* Decompress data (either with Zlib API or compressdev API */ 1609 if (zlib_decompress) { 1610 for (i = 0; i < num_bufs; i++) { 1611 priv_data = (struct priv_op_data *)(ops[i] + 1); 1612 uint16_t xform_idx = priv_data->orig_idx % num_xforms; 1613 const struct rte_comp_xform *decompress_xform = 1614 decompress_xforms[xform_idx]; 1615 1616 ret = decompress_zlib(ops[i], decompress_xform); 1617 if (ret < 0) { 1618 ret_status = -1; 1619 goto exit; 1620 } 1621 1622 ops_processed[i] = ops[i]; 1623 } 1624 } else { 1625 if (operation_type == RTE_COMP_OP_STATELESS) { 1626 /* Create decompress private xform data */ 1627 for (i = 0; i < num_xforms; i++) { 1628 ret = rte_compressdev_private_xform_create(0, 1629 (const struct rte_comp_xform *) 1630 decompress_xforms[i], 1631 &priv_xforms[i]); 1632 if (ret < 0) { 1633 RTE_LOG(ERR, USER1, 1634 "Decompression private xform " 1635 "could not be created\n"); 1636 ret_status = -1; 1637 goto exit; 1638 } 1639 num_priv_xforms++; 1640 } 1641 1642 if (capa->comp_feature_flags & 1643 RTE_COMP_FF_SHAREABLE_PRIV_XFORM) { 1644 /* Attach shareable private xform data to ops */ 1645 for (i = 0; i < num_bufs; i++) { 1646 priv_data = (struct priv_op_data *) 1647 (ops[i] + 1); 1648 uint16_t xform_idx = 1649 priv_data->orig_idx % num_xforms; 1650 ops[i]->private_xform = 1651 priv_xforms[xform_idx]; 1652 } 1653 } else { 1654 /* Create rest of the private xforms */ 1655 /* for the other ops */ 1656 for (i = num_xforms; i < num_bufs; i++) { 1657 ret = 1658 rte_compressdev_private_xform_create(0, 1659 decompress_xforms[i % num_xforms], 1660 &priv_xforms[i]); 1661 if (ret < 0) { 1662 RTE_LOG(ERR, USER1, 1663 "Decompression private xform" 1664 " could not be created\n"); 1665 ret_status = -1; 1666 goto exit; 1667 } 1668 num_priv_xforms++; 1669 } 1670 1671 /* Attach non shareable private xform data */ 1672 /* to ops */ 1673 for (i = 0; i < num_bufs; i++) { 1674 priv_data = (struct priv_op_data *) 1675 (ops[i] + 1); 1676 uint16_t xform_idx = 1677 priv_data->orig_idx; 1678 ops[i]->private_xform = 1679 priv_xforms[xform_idx]; 1680 } 1681 } 1682 } else { 1683 /* Create a stream object for stateful decompression */ 1684 ret = rte_compressdev_stream_create(0, 1685 decompress_xforms[0], stream); 1686 if (ret < 0) { 1687 RTE_LOG(ERR, USER1, 1688 "Decompression stream could not be created, error %d\n", 1689 ret); 1690 ret_status = -1; 1691 goto exit; 1692 } 1693 /* Attach stream to ops */ 1694 for (i = 0; i < num_bufs; i++) 1695 ops[i]->stream = *stream; 1696 } 1697 1698 test_priv_data->num_priv_xforms = num_priv_xforms; 1699 } 1700 1701 exit: 1702 return ret_status; 1703 } 1704 1705 /** 1706 * Prints out the test report. Memory freeing. 1707 * 1708 * Called after successful decompression. 1709 * Operation(s) status validation and compression buffers freeing. 1710 1711 * -1 returned if function fail. 1712 * 1713 * @param int_data 1714 * Interim data containing session/transformation objects. 1715 * @param test_data 1716 * The test parameters set by users (command line parameters). 1717 * @param test_priv_data 1718 * A container used for aggregation all the private test arrays. 1719 * @return 1720 * - 2: Next step must be executed by the caller (stateful decompression only) 1721 * - 1: On success (caller should stop and exit) 1722 * - 0: On success. 1723 * - -1: On error. 1724 */ 1725 static int 1726 test_deflate_decomp_finalize(const struct interim_data_params *int_data, 1727 const struct test_data_params *test_data, 1728 const struct test_private_arrays *test_priv_data) 1729 { 1730 /* local variables: */ 1731 unsigned int i; 1732 struct priv_op_data *priv_data; 1733 static unsigned int step; 1734 1735 /* from int_data: */ 1736 unsigned int num_bufs = int_data->num_bufs; 1737 const char * const *test_bufs = int_data->test_bufs; 1738 struct rte_comp_xform **compress_xforms = int_data->compress_xforms; 1739 1740 /* from test_priv_data: */ 1741 struct rte_comp_op **ops_processed = test_priv_data->ops_processed; 1742 struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs; 1743 struct rte_comp_op **ops = test_priv_data->ops; 1744 uint64_t *compress_checksum = test_priv_data->compress_checksum; 1745 unsigned int *decomp_produced_data_size = 1746 test_priv_data->decomp_produced_data_size; 1747 char **all_decomp_data = test_priv_data->all_decomp_data; 1748 1749 /* from test_data: */ 1750 unsigned int out_of_space = test_data->out_of_space; 1751 enum rte_comp_op_type operation_type = test_data->decompress_state; 1752 1753 unsigned int zlib_compress = 1754 (test_data->zlib_dir == ZLIB_ALL || 1755 test_data->zlib_dir == ZLIB_COMPRESS); 1756 unsigned int zlib_decompress = 1757 (test_data->zlib_dir == ZLIB_ALL || 1758 test_data->zlib_dir == ZLIB_DECOMPRESS); 1759 1760 for (i = 0; i < num_bufs; i++) { 1761 priv_data = (struct priv_op_data *)(ops_processed[i] + 1); 1762 char engine[] = "zlib, (directly, no PMD)"; 1763 if (zlib_compress) 1764 strlcpy(engine, "pmd", sizeof(engine)); 1765 RTE_LOG(DEBUG, USER1, 1766 "Buffer %u decompressed by %s from %u to %u bytes\n", 1767 i, engine, 1768 ops_processed[i]->consumed, ops_processed[i]->produced); 1769 ops[i] = NULL; 1770 } 1771 1772 /* 1773 * Check operation status and free source mbuf (destination mbuf and 1774 * compress operation information is still needed) 1775 */ 1776 for (i = 0; i < num_bufs; i++) { 1777 if (out_of_space && !zlib_decompress) { 1778 if (ops_processed[i]->status != 1779 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) { 1780 1781 RTE_LOG(ERR, USER1, 1782 "Operation without expected out of " 1783 "space status error\n"); 1784 return -1; 1785 } else 1786 continue; 1787 } 1788 1789 if (operation_type == RTE_COMP_OP_STATEFUL 1790 && (ops_processed[i]->status == 1791 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE 1792 || ops_processed[i]->status == 1793 RTE_COMP_OP_STATUS_SUCCESS)) { 1794 1795 RTE_LOG(DEBUG, USER1, 1796 ".............RECOVERABLE\n"); 1797 1798 /* collect the output into all_decomp_data */ 1799 const void *ptr = rte_pktmbuf_read( 1800 ops_processed[i]->m_dst, 1801 ops_processed[i]->dst.offset, 1802 ops_processed[i]->produced, 1803 *all_decomp_data + 1804 *decomp_produced_data_size); 1805 if (ptr != *all_decomp_data + 1806 *decomp_produced_data_size) 1807 rte_memcpy(*all_decomp_data + 1808 *decomp_produced_data_size, 1809 ptr, ops_processed[i]->produced); 1810 1811 *decomp_produced_data_size += 1812 ops_processed[i]->produced; 1813 if (ops_processed[i]->src.length > 1814 ops_processed[i]->consumed) { 1815 if (ops_processed[i]->status == 1816 RTE_COMP_OP_STATUS_SUCCESS) { 1817 RTE_LOG(ERR, USER1, 1818 "Operation finished too early\n"); 1819 return -1; 1820 } 1821 step++; 1822 if (step >= test_data->decompress_steps_max) { 1823 RTE_LOG(ERR, USER1, 1824 "Operation exceeded maximum steps\n"); 1825 return -1; 1826 } 1827 ops[i] = ops_processed[i]; 1828 ops[i]->status = 1829 RTE_COMP_OP_STATUS_NOT_PROCESSED; 1830 ops[i]->src.offset += 1831 ops_processed[i]->consumed; 1832 ops[i]->src.length -= 1833 ops_processed[i]->consumed; 1834 /* repeat the operation */ 1835 return 2; 1836 } else { 1837 /* Compare the original stream with the */ 1838 /* decompressed stream (in size and the data) */ 1839 priv_data = (struct priv_op_data *) 1840 (ops_processed[i] + 1); 1841 const char *buf1 = 1842 test_bufs[priv_data->orig_idx]; 1843 const char *buf2 = *all_decomp_data; 1844 1845 if (compare_buffers(buf1, strlen(buf1) + 1, 1846 buf2, *decomp_produced_data_size) < 0) 1847 return -1; 1848 /* Test checksums */ 1849 if (compress_xforms[0]->compress.chksum 1850 != RTE_COMP_CHECKSUM_NONE) { 1851 if (ops_processed[i]->output_chksum 1852 != compress_checksum[i]) { 1853 RTE_LOG(ERR, USER1, 1854 "The checksums differ\n" 1855 "Compression Checksum: %" PRIu64 "\tDecompression " 1856 "Checksum: %" PRIu64 "\n", compress_checksum[i], 1857 ops_processed[i]->output_chksum); 1858 return -1; 1859 } 1860 } 1861 } 1862 } else if (ops_processed[i]->status != 1863 RTE_COMP_OP_STATUS_SUCCESS) { 1864 RTE_LOG(ERR, USER1, 1865 "Decomp: Some operations were not successful, status = %u\n", 1866 ops_processed[i]->status); 1867 return -1; 1868 } 1869 priv_data = (struct priv_op_data *)(ops_processed[i] + 1); 1870 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]); 1871 comp_bufs[priv_data->orig_idx] = NULL; 1872 } 1873 1874 if (out_of_space && !zlib_decompress) 1875 return 1; 1876 1877 return 0; 1878 } 1879 1880 /** 1881 * Validation of the output (compression/decompression) data. 1882 * 1883 * The function compares the source stream with the output stream, 1884 * after decompression, to check if compression/decompression 1885 * was correct. 1886 * -1 returned if function fail. 1887 * 1888 * @param int_data 1889 * Interim data containing session/transformation objects. 1890 * @param test_data 1891 * The test parameters set by users (command line parameters). 1892 * @param test_priv_data 1893 * A container used for aggregation all the private test arrays. 1894 * @return 1895 * - 0: On success. 1896 * - -1: On error. 1897 */ 1898 static int 1899 test_results_validation(const struct interim_data_params *int_data, 1900 const struct test_data_params *test_data, 1901 const struct test_private_arrays *test_priv_data) 1902 { 1903 /* local variables: */ 1904 unsigned int i; 1905 struct priv_op_data *priv_data; 1906 const char *buf1; 1907 const char *buf2; 1908 char *contig_buf = NULL; 1909 uint32_t data_size; 1910 1911 /* from int_data: */ 1912 struct rte_comp_xform **compress_xforms = int_data->compress_xforms; 1913 unsigned int num_bufs = int_data->num_bufs; 1914 const char * const *test_bufs = int_data->test_bufs; 1915 1916 /* from test_priv_data: */ 1917 uint64_t *compress_checksum = test_priv_data->compress_checksum; 1918 struct rte_comp_op **ops_processed = test_priv_data->ops_processed; 1919 1920 /* 1921 * Compare the original stream with the decompressed stream 1922 * (in size and the data) 1923 */ 1924 for (i = 0; i < num_bufs; i++) { 1925 priv_data = (struct priv_op_data *)(ops_processed[i] + 1); 1926 buf1 = test_data->use_external_mbufs ? 1927 test_data->inbuf_memzone->addr : 1928 test_bufs[priv_data->orig_idx]; 1929 data_size = test_data->use_external_mbufs ? 1930 test_data->inbuf_data_size : 1931 strlen(buf1) + 1; 1932 1933 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0); 1934 if (contig_buf == NULL) { 1935 RTE_LOG(ERR, USER1, "Contiguous buffer could not " 1936 "be allocated\n"); 1937 goto exit; 1938 } 1939 1940 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0, 1941 ops_processed[i]->produced, contig_buf); 1942 if (compare_buffers(buf1, data_size, 1943 buf2, ops_processed[i]->produced) < 0) 1944 goto exit; 1945 1946 /* Test checksums */ 1947 if (compress_xforms[0]->compress.chksum != 1948 RTE_COMP_CHECKSUM_NONE) { 1949 if (ops_processed[i]->output_chksum != 1950 compress_checksum[i]) { 1951 RTE_LOG(ERR, USER1, "The checksums differ\n" 1952 "Compression Checksum: %" PRIu64 "\tDecompression " 1953 "Checksum: %" PRIu64 "\n", compress_checksum[i], 1954 ops_processed[i]->output_chksum); 1955 goto exit; 1956 } 1957 } 1958 1959 rte_free(contig_buf); 1960 contig_buf = NULL; 1961 } 1962 return 0; 1963 1964 exit: 1965 rte_free(contig_buf); 1966 return -1; 1967 } 1968 1969 /** 1970 * Compresses and decompresses input stream with compressdev API and Zlib API 1971 * 1972 * Basic test function. Common for all the functional tests. 1973 * -1 returned if function fail. 1974 * 1975 * @param int_data 1976 * Interim data containing session/transformation objects. 1977 * @param test_data 1978 * The test parameters set by users (command line parameters). 1979 * @return 1980 * - 1: Some operation not supported 1981 * - 0: On success. 1982 * - -1: On error. 1983 */ 1984 1985 static int 1986 test_deflate_comp_decomp(const struct interim_data_params *int_data, 1987 const struct test_data_params *test_data) 1988 { 1989 unsigned int num_bufs = int_data->num_bufs; 1990 unsigned int out_of_space = test_data->out_of_space; 1991 1992 void *stream = NULL; 1993 char *all_decomp_data = NULL; 1994 unsigned int decomp_produced_data_size = 0; 1995 1996 int ret_status = -1; 1997 int ret; 1998 struct rte_mbuf *uncomp_bufs[num_bufs]; 1999 struct rte_mbuf *comp_bufs[num_bufs]; 2000 struct rte_comp_op *ops[num_bufs]; 2001 struct rte_comp_op *ops_processed[num_bufs]; 2002 void *priv_xforms[num_bufs]; 2003 unsigned int i; 2004 2005 uint64_t compress_checksum[num_bufs]; 2006 uint32_t compressed_data_size[num_bufs]; 2007 char *contig_buf = NULL; 2008 2009 struct rte_mbuf_ext_shared_info compbuf_info; 2010 struct rte_mbuf_ext_shared_info decompbuf_info; 2011 2012 const struct rte_compressdev_capabilities *capa; 2013 2014 /* Compressing with CompressDev */ 2015 unsigned int zlib_compress = 2016 (test_data->zlib_dir == ZLIB_ALL || 2017 test_data->zlib_dir == ZLIB_COMPRESS); 2018 unsigned int zlib_decompress = 2019 (test_data->zlib_dir == ZLIB_ALL || 2020 test_data->zlib_dir == ZLIB_DECOMPRESS); 2021 2022 struct test_private_arrays test_priv_data; 2023 2024 test_priv_data.uncomp_bufs = uncomp_bufs; 2025 test_priv_data.comp_bufs = comp_bufs; 2026 test_priv_data.ops = ops; 2027 test_priv_data.ops_processed = ops_processed; 2028 test_priv_data.priv_xforms = priv_xforms; 2029 test_priv_data.compress_checksum = compress_checksum; 2030 test_priv_data.compressed_data_size = compressed_data_size; 2031 2032 test_priv_data.stream = &stream; 2033 test_priv_data.all_decomp_data = &all_decomp_data; 2034 test_priv_data.decomp_produced_data_size = &decomp_produced_data_size; 2035 2036 test_priv_data.num_priv_xforms = 0; /* it's used for decompression only */ 2037 2038 capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2039 if (capa == NULL) { 2040 RTE_LOG(ERR, USER1, 2041 "Compress device does not support DEFLATE\n"); 2042 return -1; 2043 } 2044 2045 /* Prepare the source mbufs with the data */ 2046 ret = test_setup_com_bufs(int_data, test_data, &test_priv_data); 2047 if (ret < 0) { 2048 ret_status = -1; 2049 goto exit; 2050 } 2051 2052 RTE_LOG(DEBUG, USER1, "<<< COMPRESSION >>>\n"); 2053 2054 /* COMPRESSION */ 2055 2056 /* Prepare output (destination) mbufs for compressed data */ 2057 ret = test_setup_output_bufs( 2058 OPERATION_COMPRESSION, 2059 out_of_space == 1 && !zlib_compress, 2060 &test_priv_data, 2061 int_data, 2062 test_data, 2063 &compbuf_info); 2064 if (ret < 0) { 2065 ret_status = -1; 2066 goto exit; 2067 } 2068 2069 /* Run compression */ 2070 ret = test_deflate_comp_run(int_data, test_data, &test_priv_data); 2071 if (ret < 0) { 2072 ret_status = -1; 2073 goto exit; 2074 } 2075 2076 ret = test_deflate_comp_finalize(int_data, test_data, &test_priv_data); 2077 if (ret < 0) { 2078 ret_status = -1; 2079 goto exit; 2080 } else if (ret == 1) { 2081 ret_status = 0; 2082 goto exit; 2083 } else if (ret == 2) { 2084 ret_status = 1; /* some operation not supported */ 2085 goto exit; 2086 } 2087 2088 /* DECOMPRESSION */ 2089 2090 RTE_LOG(DEBUG, USER1, "<<< DECOMPRESSION >>>\n"); 2091 2092 /* Prepare output (destination) mbufs for decompressed data */ 2093 ret = test_setup_output_bufs( 2094 OPERATION_DECOMPRESSION, 2095 out_of_space == 1 && !zlib_decompress, 2096 &test_priv_data, 2097 int_data, 2098 test_data, 2099 &decompbuf_info); 2100 if (ret < 0) { 2101 ret_status = -1; 2102 goto exit; 2103 } 2104 2105 /* Run decompression */ 2106 ret = test_deflate_decomp_run(int_data, test_data, &test_priv_data); 2107 if (ret < 0) { 2108 ret_status = -1; 2109 goto exit; 2110 } 2111 2112 if (!zlib_decompress) { 2113 next_step: /* next step for stateful decompression only */ 2114 ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs); 2115 if (ret < 0) { 2116 ret_status = -1; 2117 RTE_LOG(ERR, USER1, 2118 "Decompression: enqueue/dequeue operation failed\n"); 2119 } 2120 } 2121 2122 ret = test_deflate_decomp_finalize(int_data, test_data, &test_priv_data); 2123 if (ret < 0) { 2124 ret_status = -1; 2125 goto exit; 2126 } else if (ret == 1) { 2127 ret_status = 0; 2128 goto exit; 2129 } else if (ret == 2) { 2130 goto next_step; 2131 } 2132 2133 /* FINAL PROCESSING */ 2134 2135 ret = test_results_validation(int_data, test_data, &test_priv_data); 2136 if (ret < 0) { 2137 ret_status = -1; 2138 goto exit; 2139 } 2140 ret_status = 0; 2141 2142 exit: 2143 /* Free resources */ 2144 2145 if (stream != NULL) 2146 rte_compressdev_stream_free(0, stream); 2147 if (all_decomp_data != NULL) 2148 rte_free(all_decomp_data); 2149 2150 /* Free compress private xforms */ 2151 for (i = 0; i < test_priv_data.num_priv_xforms; i++) { 2152 if (priv_xforms[i] != NULL) { 2153 rte_compressdev_private_xform_free(0, priv_xforms[i]); 2154 priv_xforms[i] = NULL; 2155 } 2156 } 2157 for (i = 0; i < num_bufs; i++) { 2158 rte_pktmbuf_free(uncomp_bufs[i]); 2159 rte_pktmbuf_free(comp_bufs[i]); 2160 rte_comp_op_free(ops[i]); 2161 rte_comp_op_free(ops_processed[i]); 2162 } 2163 rte_free(contig_buf); 2164 2165 return ret_status; 2166 } 2167 2168 static int 2169 test_compressdev_deflate_stateless_fixed(void) 2170 { 2171 struct comp_testsuite_params *ts_params = &testsuite_params; 2172 uint16_t i; 2173 int ret; 2174 const struct rte_compressdev_capabilities *capab; 2175 2176 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2177 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2178 2179 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) 2180 return -ENOTSUP; 2181 2182 struct rte_comp_xform *compress_xform = 2183 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 2184 2185 if (compress_xform == NULL) { 2186 RTE_LOG(ERR, USER1, 2187 "Compress xform could not be created\n"); 2188 ret = TEST_FAILED; 2189 goto exit; 2190 } 2191 2192 memcpy(compress_xform, ts_params->def_comp_xform, 2193 sizeof(struct rte_comp_xform)); 2194 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED; 2195 2196 struct interim_data_params int_data = { 2197 NULL, 2198 1, 2199 NULL, 2200 &compress_xform, 2201 &ts_params->def_decomp_xform, 2202 1 2203 }; 2204 2205 struct test_data_params test_data = { 2206 .compress_state = RTE_COMP_OP_STATELESS, 2207 .decompress_state = RTE_COMP_OP_STATELESS, 2208 .buff_type = LB_BOTH, 2209 .zlib_dir = ZLIB_DECOMPRESS, 2210 .out_of_space = 0, 2211 .big_data = 0, 2212 .overflow = OVERFLOW_DISABLED, 2213 .ratio = RATIO_ENABLED 2214 }; 2215 2216 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 2217 int_data.test_bufs = &compress_test_bufs[i]; 2218 int_data.buf_idx = &i; 2219 2220 /* Compress with compressdev, decompress with Zlib */ 2221 test_data.zlib_dir = ZLIB_DECOMPRESS; 2222 ret = test_deflate_comp_decomp(&int_data, &test_data); 2223 if (ret < 0) 2224 goto exit; 2225 2226 /* Compress with Zlib, decompress with compressdev */ 2227 test_data.zlib_dir = ZLIB_COMPRESS; 2228 ret = test_deflate_comp_decomp(&int_data, &test_data); 2229 if (ret < 0) 2230 goto exit; 2231 } 2232 2233 ret = TEST_SUCCESS; 2234 2235 exit: 2236 rte_free(compress_xform); 2237 return ret; 2238 } 2239 2240 static int 2241 test_compressdev_deflate_stateless_dynamic(void) 2242 { 2243 struct comp_testsuite_params *ts_params = &testsuite_params; 2244 uint16_t i; 2245 int ret; 2246 struct rte_comp_xform *compress_xform = 2247 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 2248 2249 const struct rte_compressdev_capabilities *capab; 2250 2251 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2252 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2253 2254 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 2255 return -ENOTSUP; 2256 2257 if (compress_xform == NULL) { 2258 RTE_LOG(ERR, USER1, 2259 "Compress xform could not be created\n"); 2260 ret = TEST_FAILED; 2261 goto exit; 2262 } 2263 2264 memcpy(compress_xform, ts_params->def_comp_xform, 2265 sizeof(struct rte_comp_xform)); 2266 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC; 2267 2268 struct interim_data_params int_data = { 2269 NULL, 2270 1, 2271 NULL, 2272 &compress_xform, 2273 &ts_params->def_decomp_xform, 2274 1 2275 }; 2276 2277 struct test_data_params test_data = { 2278 .compress_state = RTE_COMP_OP_STATELESS, 2279 .decompress_state = RTE_COMP_OP_STATELESS, 2280 .buff_type = LB_BOTH, 2281 .zlib_dir = ZLIB_DECOMPRESS, 2282 .out_of_space = 0, 2283 .big_data = 0, 2284 .overflow = OVERFLOW_DISABLED, 2285 .ratio = RATIO_ENABLED 2286 }; 2287 2288 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 2289 int_data.test_bufs = &compress_test_bufs[i]; 2290 int_data.buf_idx = &i; 2291 2292 /* Compress with compressdev, decompress with Zlib */ 2293 test_data.zlib_dir = ZLIB_DECOMPRESS; 2294 ret = test_deflate_comp_decomp(&int_data, &test_data); 2295 if (ret < 0) 2296 goto exit; 2297 2298 /* Compress with Zlib, decompress with compressdev */ 2299 test_data.zlib_dir = ZLIB_COMPRESS; 2300 ret = test_deflate_comp_decomp(&int_data, &test_data); 2301 if (ret < 0) 2302 goto exit; 2303 } 2304 2305 ret = TEST_SUCCESS; 2306 2307 exit: 2308 rte_free(compress_xform); 2309 return ret; 2310 } 2311 2312 static int 2313 test_compressdev_deflate_stateless_multi_op(void) 2314 { 2315 struct comp_testsuite_params *ts_params = &testsuite_params; 2316 uint16_t num_bufs = RTE_DIM(compress_test_bufs); 2317 uint16_t buf_idx[num_bufs]; 2318 uint16_t i; 2319 int ret; 2320 2321 for (i = 0; i < num_bufs; i++) 2322 buf_idx[i] = i; 2323 2324 struct interim_data_params int_data = { 2325 compress_test_bufs, 2326 num_bufs, 2327 buf_idx, 2328 &ts_params->def_comp_xform, 2329 &ts_params->def_decomp_xform, 2330 1 2331 }; 2332 2333 struct test_data_params test_data = { 2334 .compress_state = RTE_COMP_OP_STATELESS, 2335 .decompress_state = RTE_COMP_OP_STATELESS, 2336 .buff_type = LB_BOTH, 2337 .zlib_dir = ZLIB_DECOMPRESS, 2338 .out_of_space = 0, 2339 .big_data = 0, 2340 .overflow = OVERFLOW_DISABLED, 2341 .ratio = RATIO_ENABLED 2342 }; 2343 2344 /* Compress with compressdev, decompress with Zlib */ 2345 test_data.zlib_dir = ZLIB_DECOMPRESS; 2346 ret = test_deflate_comp_decomp(&int_data, &test_data); 2347 if (ret < 0) 2348 return ret; 2349 2350 /* Compress with Zlib, decompress with compressdev */ 2351 test_data.zlib_dir = ZLIB_COMPRESS; 2352 ret = test_deflate_comp_decomp(&int_data, &test_data); 2353 if (ret < 0) 2354 return ret; 2355 2356 return TEST_SUCCESS; 2357 } 2358 2359 static int 2360 test_compressdev_deflate_stateless_multi_level(void) 2361 { 2362 struct comp_testsuite_params *ts_params = &testsuite_params; 2363 unsigned int level; 2364 uint16_t i; 2365 int ret; 2366 struct rte_comp_xform *compress_xform = 2367 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 2368 2369 if (compress_xform == NULL) { 2370 RTE_LOG(ERR, USER1, 2371 "Compress xform could not be created\n"); 2372 ret = TEST_FAILED; 2373 goto exit; 2374 } 2375 2376 memcpy(compress_xform, ts_params->def_comp_xform, 2377 sizeof(struct rte_comp_xform)); 2378 2379 struct interim_data_params int_data = { 2380 NULL, 2381 1, 2382 NULL, 2383 &compress_xform, 2384 &ts_params->def_decomp_xform, 2385 1 2386 }; 2387 2388 struct test_data_params test_data = { 2389 .compress_state = RTE_COMP_OP_STATELESS, 2390 .decompress_state = RTE_COMP_OP_STATELESS, 2391 .buff_type = LB_BOTH, 2392 .zlib_dir = ZLIB_DECOMPRESS, 2393 .out_of_space = 0, 2394 .big_data = 0, 2395 .overflow = OVERFLOW_DISABLED, 2396 .ratio = RATIO_ENABLED 2397 }; 2398 2399 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 2400 int_data.test_bufs = &compress_test_bufs[i]; 2401 int_data.buf_idx = &i; 2402 2403 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX; 2404 level++) { 2405 compress_xform->compress.level = level; 2406 /* Compress with compressdev, decompress with Zlib */ 2407 test_data.zlib_dir = ZLIB_DECOMPRESS; 2408 ret = test_deflate_comp_decomp(&int_data, &test_data); 2409 if (ret < 0) 2410 goto exit; 2411 } 2412 } 2413 2414 ret = TEST_SUCCESS; 2415 2416 exit: 2417 rte_free(compress_xform); 2418 return ret; 2419 } 2420 2421 #define NUM_XFORMS 3 2422 static int 2423 test_compressdev_deflate_stateless_multi_xform(void) 2424 { 2425 struct comp_testsuite_params *ts_params = &testsuite_params; 2426 uint16_t num_bufs = NUM_XFORMS; 2427 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL}; 2428 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL}; 2429 const char *test_buffers[NUM_XFORMS]; 2430 uint16_t i; 2431 unsigned int level = RTE_COMP_LEVEL_MIN; 2432 uint16_t buf_idx[num_bufs]; 2433 int ret; 2434 2435 /* Create multiple xforms with various levels */ 2436 for (i = 0; i < NUM_XFORMS; i++) { 2437 compress_xforms[i] = rte_malloc(NULL, 2438 sizeof(struct rte_comp_xform), 0); 2439 if (compress_xforms[i] == NULL) { 2440 RTE_LOG(ERR, USER1, 2441 "Compress xform could not be created\n"); 2442 ret = TEST_FAILED; 2443 goto exit; 2444 } 2445 2446 memcpy(compress_xforms[i], ts_params->def_comp_xform, 2447 sizeof(struct rte_comp_xform)); 2448 compress_xforms[i]->compress.level = level; 2449 level++; 2450 2451 decompress_xforms[i] = rte_malloc(NULL, 2452 sizeof(struct rte_comp_xform), 0); 2453 if (decompress_xforms[i] == NULL) { 2454 RTE_LOG(ERR, USER1, 2455 "Decompress xform could not be created\n"); 2456 ret = TEST_FAILED; 2457 goto exit; 2458 } 2459 2460 memcpy(decompress_xforms[i], ts_params->def_decomp_xform, 2461 sizeof(struct rte_comp_xform)); 2462 } 2463 2464 for (i = 0; i < NUM_XFORMS; i++) { 2465 buf_idx[i] = 0; 2466 /* Use the same buffer in all sessions */ 2467 test_buffers[i] = compress_test_bufs[0]; 2468 } 2469 2470 struct interim_data_params int_data = { 2471 test_buffers, 2472 num_bufs, 2473 buf_idx, 2474 compress_xforms, 2475 decompress_xforms, 2476 NUM_XFORMS 2477 }; 2478 2479 struct test_data_params test_data = { 2480 .compress_state = RTE_COMP_OP_STATELESS, 2481 .decompress_state = RTE_COMP_OP_STATELESS, 2482 .buff_type = LB_BOTH, 2483 .zlib_dir = ZLIB_DECOMPRESS, 2484 .out_of_space = 0, 2485 .big_data = 0, 2486 .overflow = OVERFLOW_DISABLED, 2487 .ratio = RATIO_ENABLED 2488 }; 2489 2490 /* Compress with compressdev, decompress with Zlib */ 2491 ret = test_deflate_comp_decomp(&int_data, &test_data); 2492 if (ret < 0) 2493 goto exit; 2494 2495 ret = TEST_SUCCESS; 2496 2497 exit: 2498 for (i = 0; i < NUM_XFORMS; i++) { 2499 rte_free(compress_xforms[i]); 2500 rte_free(decompress_xforms[i]); 2501 } 2502 2503 return ret; 2504 } 2505 2506 static int 2507 test_compressdev_deflate_stateless_sgl(void) 2508 { 2509 struct comp_testsuite_params *ts_params = &testsuite_params; 2510 uint16_t i; 2511 int ret; 2512 const struct rte_compressdev_capabilities *capab; 2513 2514 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2515 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2516 2517 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 2518 return -ENOTSUP; 2519 2520 struct interim_data_params int_data = { 2521 NULL, 2522 1, 2523 NULL, 2524 &ts_params->def_comp_xform, 2525 &ts_params->def_decomp_xform, 2526 1 2527 }; 2528 2529 struct test_data_params test_data = { 2530 .compress_state = RTE_COMP_OP_STATELESS, 2531 .decompress_state = RTE_COMP_OP_STATELESS, 2532 .buff_type = SGL_BOTH, 2533 .zlib_dir = ZLIB_DECOMPRESS, 2534 .out_of_space = 0, 2535 .big_data = 0, 2536 .overflow = OVERFLOW_DISABLED, 2537 .ratio = RATIO_ENABLED 2538 }; 2539 2540 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 2541 int_data.test_bufs = &compress_test_bufs[i]; 2542 int_data.buf_idx = &i; 2543 2544 /* Compress with compressdev, decompress with Zlib */ 2545 test_data.zlib_dir = ZLIB_DECOMPRESS; 2546 ret = test_deflate_comp_decomp(&int_data, &test_data); 2547 if (ret < 0) 2548 return ret; 2549 2550 /* Compress with Zlib, decompress with compressdev */ 2551 test_data.zlib_dir = ZLIB_COMPRESS; 2552 ret = test_deflate_comp_decomp(&int_data, &test_data); 2553 if (ret < 0) 2554 return ret; 2555 2556 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) { 2557 /* Compress with compressdev, decompress with Zlib */ 2558 test_data.zlib_dir = ZLIB_DECOMPRESS; 2559 test_data.buff_type = SGL_TO_LB; 2560 ret = test_deflate_comp_decomp(&int_data, &test_data); 2561 if (ret < 0) 2562 return ret; 2563 2564 /* Compress with Zlib, decompress with compressdev */ 2565 test_data.zlib_dir = ZLIB_COMPRESS; 2566 test_data.buff_type = SGL_TO_LB; 2567 ret = test_deflate_comp_decomp(&int_data, &test_data); 2568 if (ret < 0) 2569 return ret; 2570 } 2571 2572 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) { 2573 /* Compress with compressdev, decompress with Zlib */ 2574 test_data.zlib_dir = ZLIB_DECOMPRESS; 2575 test_data.buff_type = LB_TO_SGL; 2576 ret = test_deflate_comp_decomp(&int_data, &test_data); 2577 if (ret < 0) 2578 return ret; 2579 2580 /* Compress with Zlib, decompress with compressdev */ 2581 test_data.zlib_dir = ZLIB_COMPRESS; 2582 test_data.buff_type = LB_TO_SGL; 2583 ret = test_deflate_comp_decomp(&int_data, &test_data); 2584 if (ret < 0) 2585 return ret; 2586 } 2587 } 2588 2589 return TEST_SUCCESS; 2590 } 2591 2592 static int 2593 test_compressdev_deflate_stateless_checksum(void) 2594 { 2595 struct comp_testsuite_params *ts_params = &testsuite_params; 2596 uint16_t i; 2597 int ret; 2598 const struct rte_compressdev_capabilities *capab; 2599 2600 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2601 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2602 2603 /* Check if driver supports any checksum */ 2604 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 && 2605 (capab->comp_feature_flags & 2606 RTE_COMP_FF_ADLER32_CHECKSUM) == 0 && 2607 (capab->comp_feature_flags & 2608 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0) 2609 return -ENOTSUP; 2610 2611 struct rte_comp_xform *compress_xform = 2612 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 2613 if (compress_xform == NULL) { 2614 RTE_LOG(ERR, USER1, "Compress xform could not be created\n"); 2615 return TEST_FAILED; 2616 } 2617 2618 memcpy(compress_xform, ts_params->def_comp_xform, 2619 sizeof(struct rte_comp_xform)); 2620 2621 struct rte_comp_xform *decompress_xform = 2622 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 2623 if (decompress_xform == NULL) { 2624 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n"); 2625 rte_free(compress_xform); 2626 return TEST_FAILED; 2627 } 2628 2629 memcpy(decompress_xform, ts_params->def_decomp_xform, 2630 sizeof(struct rte_comp_xform)); 2631 2632 struct interim_data_params int_data = { 2633 NULL, 2634 1, 2635 NULL, 2636 &compress_xform, 2637 &decompress_xform, 2638 1 2639 }; 2640 2641 struct test_data_params test_data = { 2642 .compress_state = RTE_COMP_OP_STATELESS, 2643 .decompress_state = RTE_COMP_OP_STATELESS, 2644 .buff_type = LB_BOTH, 2645 .zlib_dir = ZLIB_DECOMPRESS, 2646 .out_of_space = 0, 2647 .big_data = 0, 2648 .overflow = OVERFLOW_DISABLED, 2649 .ratio = RATIO_ENABLED 2650 }; 2651 2652 /* Check if driver supports crc32 checksum and test */ 2653 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) { 2654 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32; 2655 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32; 2656 2657 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 2658 /* Compress with compressdev, decompress with Zlib */ 2659 int_data.test_bufs = &compress_test_bufs[i]; 2660 int_data.buf_idx = &i; 2661 2662 /* Generate zlib checksum and test against selected 2663 * drivers decompression checksum 2664 */ 2665 test_data.zlib_dir = ZLIB_COMPRESS; 2666 ret = test_deflate_comp_decomp(&int_data, &test_data); 2667 if (ret < 0) 2668 goto exit; 2669 2670 /* Generate compression and decompression 2671 * checksum of selected driver 2672 */ 2673 test_data.zlib_dir = ZLIB_NONE; 2674 ret = test_deflate_comp_decomp(&int_data, &test_data); 2675 if (ret < 0) 2676 goto exit; 2677 } 2678 } 2679 2680 /* Check if driver supports adler32 checksum and test */ 2681 if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) { 2682 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32; 2683 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32; 2684 2685 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 2686 int_data.test_bufs = &compress_test_bufs[i]; 2687 int_data.buf_idx = &i; 2688 2689 /* Generate zlib checksum and test against selected 2690 * drivers decompression checksum 2691 */ 2692 test_data.zlib_dir = ZLIB_COMPRESS; 2693 ret = test_deflate_comp_decomp(&int_data, &test_data); 2694 if (ret < 0) 2695 goto exit; 2696 /* Generate compression and decompression 2697 * checksum of selected driver 2698 */ 2699 test_data.zlib_dir = ZLIB_NONE; 2700 ret = test_deflate_comp_decomp(&int_data, &test_data); 2701 if (ret < 0) 2702 goto exit; 2703 } 2704 } 2705 2706 /* Check if driver supports combined crc and adler checksum and test */ 2707 if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) { 2708 compress_xform->compress.chksum = 2709 RTE_COMP_CHECKSUM_CRC32_ADLER32; 2710 decompress_xform->decompress.chksum = 2711 RTE_COMP_CHECKSUM_CRC32_ADLER32; 2712 2713 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 2714 int_data.test_bufs = &compress_test_bufs[i]; 2715 int_data.buf_idx = &i; 2716 2717 /* Generate compression and decompression 2718 * checksum of selected driver 2719 */ 2720 test_data.zlib_dir = ZLIB_NONE; 2721 ret = test_deflate_comp_decomp(&int_data, &test_data); 2722 if (ret < 0) 2723 goto exit; 2724 } 2725 } 2726 2727 ret = TEST_SUCCESS; 2728 2729 exit: 2730 rte_free(compress_xform); 2731 rte_free(decompress_xform); 2732 return ret; 2733 } 2734 2735 static int 2736 test_compressdev_out_of_space_buffer(void) 2737 { 2738 struct comp_testsuite_params *ts_params = &testsuite_params; 2739 int ret; 2740 uint16_t i; 2741 const struct rte_compressdev_capabilities *capab; 2742 2743 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); 2744 2745 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2746 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2747 2748 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) 2749 return -ENOTSUP; 2750 2751 struct interim_data_params int_data = { 2752 &compress_test_bufs[0], 2753 1, 2754 &i, 2755 &ts_params->def_comp_xform, 2756 &ts_params->def_decomp_xform, 2757 1 2758 }; 2759 2760 struct test_data_params test_data = { 2761 .compress_state = RTE_COMP_OP_STATELESS, 2762 .decompress_state = RTE_COMP_OP_STATELESS, 2763 .buff_type = LB_BOTH, 2764 .zlib_dir = ZLIB_DECOMPRESS, 2765 .out_of_space = 1, /* run out-of-space test */ 2766 .big_data = 0, 2767 .overflow = OVERFLOW_DISABLED, 2768 .ratio = RATIO_ENABLED 2769 }; 2770 /* Compress with compressdev, decompress with Zlib */ 2771 test_data.zlib_dir = ZLIB_DECOMPRESS; 2772 ret = test_deflate_comp_decomp(&int_data, &test_data); 2773 if (ret < 0) 2774 goto exit; 2775 2776 /* Compress with Zlib, decompress with compressdev */ 2777 test_data.zlib_dir = ZLIB_COMPRESS; 2778 ret = test_deflate_comp_decomp(&int_data, &test_data); 2779 if (ret < 0) 2780 goto exit; 2781 2782 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) { 2783 /* Compress with compressdev, decompress with Zlib */ 2784 test_data.zlib_dir = ZLIB_DECOMPRESS; 2785 test_data.buff_type = SGL_BOTH; 2786 ret = test_deflate_comp_decomp(&int_data, &test_data); 2787 if (ret < 0) 2788 goto exit; 2789 2790 /* Compress with Zlib, decompress with compressdev */ 2791 test_data.zlib_dir = ZLIB_COMPRESS; 2792 test_data.buff_type = SGL_BOTH; 2793 ret = test_deflate_comp_decomp(&int_data, &test_data); 2794 if (ret < 0) 2795 goto exit; 2796 } 2797 2798 ret = TEST_SUCCESS; 2799 2800 exit: 2801 return ret; 2802 } 2803 2804 static int 2805 test_compressdev_deflate_stateless_dynamic_big(void) 2806 { 2807 struct comp_testsuite_params *ts_params = &testsuite_params; 2808 uint16_t i = 0; 2809 int ret; 2810 unsigned int j; 2811 const struct rte_compressdev_capabilities *capab; 2812 char *test_buffer = NULL; 2813 2814 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2815 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2816 2817 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 2818 return -ENOTSUP; 2819 2820 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 2821 return -ENOTSUP; 2822 2823 test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0); 2824 if (test_buffer == NULL) { 2825 RTE_LOG(ERR, USER1, 2826 "Can't allocate buffer for big-data\n"); 2827 return TEST_FAILED; 2828 } 2829 2830 struct interim_data_params int_data = { 2831 (const char * const *)&test_buffer, 2832 1, 2833 &i, 2834 &ts_params->def_comp_xform, 2835 &ts_params->def_decomp_xform, 2836 1 2837 }; 2838 2839 struct test_data_params test_data = { 2840 .compress_state = RTE_COMP_OP_STATELESS, 2841 .decompress_state = RTE_COMP_OP_STATELESS, 2842 .buff_type = SGL_BOTH, 2843 .zlib_dir = ZLIB_DECOMPRESS, 2844 .out_of_space = 0, 2845 .big_data = 1, 2846 .overflow = OVERFLOW_DISABLED, 2847 .ratio = RATIO_DISABLED 2848 }; 2849 2850 ts_params->def_comp_xform->compress.deflate.huffman = 2851 RTE_COMP_HUFFMAN_DYNAMIC; 2852 2853 /* fill the buffer with data based on rand. data */ 2854 srand(BIG_DATA_TEST_SIZE); 2855 for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j) 2856 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 2857 test_buffer[BIG_DATA_TEST_SIZE - 1] = 0; 2858 2859 /* Compress with compressdev, decompress with Zlib */ 2860 test_data.zlib_dir = ZLIB_DECOMPRESS; 2861 ret = test_deflate_comp_decomp(&int_data, &test_data); 2862 if (ret < 0) 2863 goto exit; 2864 2865 /* Compress with Zlib, decompress with compressdev */ 2866 test_data.zlib_dir = ZLIB_COMPRESS; 2867 ret = test_deflate_comp_decomp(&int_data, &test_data); 2868 if (ret < 0) 2869 goto exit; 2870 2871 ret = TEST_SUCCESS; 2872 2873 exit: 2874 ts_params->def_comp_xform->compress.deflate.huffman = 2875 RTE_COMP_HUFFMAN_DEFAULT; 2876 rte_free(test_buffer); 2877 return ret; 2878 } 2879 2880 static int 2881 test_compressdev_deflate_stateful_decomp(void) 2882 { 2883 struct comp_testsuite_params *ts_params = &testsuite_params; 2884 int ret; 2885 uint16_t i; 2886 const struct rte_compressdev_capabilities *capab; 2887 2888 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2889 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2890 2891 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION)) 2892 return -ENOTSUP; 2893 2894 struct interim_data_params int_data = { 2895 &compress_test_bufs[0], 2896 1, 2897 &i, 2898 &ts_params->def_comp_xform, 2899 &ts_params->def_decomp_xform, 2900 1 2901 }; 2902 2903 struct test_data_params test_data = { 2904 .compress_state = RTE_COMP_OP_STATELESS, 2905 .decompress_state = RTE_COMP_OP_STATEFUL, 2906 .buff_type = LB_BOTH, 2907 .zlib_dir = ZLIB_COMPRESS, 2908 .out_of_space = 0, 2909 .big_data = 0, 2910 .decompress_output_block_size = 2000, 2911 .decompress_steps_max = 4, 2912 .overflow = OVERFLOW_DISABLED, 2913 .ratio = RATIO_ENABLED 2914 }; 2915 2916 /* Compress with Zlib, decompress with compressdev */ 2917 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 2918 ret = TEST_FAILED; 2919 goto exit; 2920 } 2921 2922 if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) { 2923 /* Now test with SGL buffers */ 2924 test_data.buff_type = SGL_BOTH; 2925 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 2926 ret = TEST_FAILED; 2927 goto exit; 2928 } 2929 } 2930 2931 ret = TEST_SUCCESS; 2932 2933 exit: 2934 return ret; 2935 } 2936 2937 static int 2938 test_compressdev_deflate_stateful_decomp_checksum(void) 2939 { 2940 struct comp_testsuite_params *ts_params = &testsuite_params; 2941 int ret; 2942 uint16_t i; 2943 const struct rte_compressdev_capabilities *capab; 2944 2945 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 2946 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 2947 2948 if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION)) 2949 return -ENOTSUP; 2950 2951 /* Check if driver supports any checksum */ 2952 if (!(capab->comp_feature_flags & 2953 (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM | 2954 RTE_COMP_FF_CRC32_ADLER32_CHECKSUM))) 2955 return -ENOTSUP; 2956 2957 struct rte_comp_xform *compress_xform = 2958 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 2959 if (compress_xform == NULL) { 2960 RTE_LOG(ERR, USER1, "Compress xform could not be created\n"); 2961 return TEST_FAILED; 2962 } 2963 2964 memcpy(compress_xform, ts_params->def_comp_xform, 2965 sizeof(struct rte_comp_xform)); 2966 2967 struct rte_comp_xform *decompress_xform = 2968 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 2969 if (decompress_xform == NULL) { 2970 RTE_LOG(ERR, USER1, "Decompress xform could not be created\n"); 2971 rte_free(compress_xform); 2972 return TEST_FAILED; 2973 } 2974 2975 memcpy(decompress_xform, ts_params->def_decomp_xform, 2976 sizeof(struct rte_comp_xform)); 2977 2978 struct interim_data_params int_data = { 2979 &compress_test_bufs[0], 2980 1, 2981 &i, 2982 &compress_xform, 2983 &decompress_xform, 2984 1 2985 }; 2986 2987 struct test_data_params test_data = { 2988 .compress_state = RTE_COMP_OP_STATELESS, 2989 .decompress_state = RTE_COMP_OP_STATEFUL, 2990 .buff_type = LB_BOTH, 2991 .zlib_dir = ZLIB_COMPRESS, 2992 .out_of_space = 0, 2993 .big_data = 0, 2994 .decompress_output_block_size = 2000, 2995 .decompress_steps_max = 4, 2996 .overflow = OVERFLOW_DISABLED, 2997 .ratio = RATIO_ENABLED 2998 }; 2999 3000 /* Check if driver supports crc32 checksum and test */ 3001 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) { 3002 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32; 3003 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32; 3004 /* Compress with Zlib, decompress with compressdev */ 3005 test_data.buff_type = LB_BOTH; 3006 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3007 ret = TEST_FAILED; 3008 goto exit; 3009 } 3010 if (capab->comp_feature_flags & 3011 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) { 3012 /* Now test with SGL buffers */ 3013 test_data.buff_type = SGL_BOTH; 3014 if (test_deflate_comp_decomp(&int_data, 3015 &test_data) < 0) { 3016 ret = TEST_FAILED; 3017 goto exit; 3018 } 3019 } 3020 } 3021 3022 /* Check if driver supports adler32 checksum and test */ 3023 if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) { 3024 compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32; 3025 decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32; 3026 /* Compress with Zlib, decompress with compressdev */ 3027 test_data.buff_type = LB_BOTH; 3028 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3029 ret = TEST_FAILED; 3030 goto exit; 3031 } 3032 if (capab->comp_feature_flags & 3033 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) { 3034 /* Now test with SGL buffers */ 3035 test_data.buff_type = SGL_BOTH; 3036 if (test_deflate_comp_decomp(&int_data, 3037 &test_data) < 0) { 3038 ret = TEST_FAILED; 3039 goto exit; 3040 } 3041 } 3042 } 3043 3044 /* Check if driver supports combined crc and adler checksum and test */ 3045 if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) { 3046 compress_xform->compress.chksum = 3047 RTE_COMP_CHECKSUM_CRC32_ADLER32; 3048 decompress_xform->decompress.chksum = 3049 RTE_COMP_CHECKSUM_CRC32_ADLER32; 3050 /* Zlib doesn't support combined checksum */ 3051 test_data.zlib_dir = ZLIB_NONE; 3052 /* Compress stateless, decompress stateful with compressdev */ 3053 test_data.buff_type = LB_BOTH; 3054 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3055 ret = TEST_FAILED; 3056 goto exit; 3057 } 3058 if (capab->comp_feature_flags & 3059 RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) { 3060 /* Now test with SGL buffers */ 3061 test_data.buff_type = SGL_BOTH; 3062 if (test_deflate_comp_decomp(&int_data, 3063 &test_data) < 0) { 3064 ret = TEST_FAILED; 3065 goto exit; 3066 } 3067 } 3068 } 3069 3070 ret = TEST_SUCCESS; 3071 3072 exit: 3073 rte_free(compress_xform); 3074 rte_free(decompress_xform); 3075 return ret; 3076 } 3077 3078 static const struct rte_memzone * 3079 make_memzone(const char *name, size_t size) 3080 { 3081 unsigned int socket_id = rte_socket_id(); 3082 char mz_name[RTE_MEMZONE_NAMESIZE]; 3083 const struct rte_memzone *memzone; 3084 3085 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id); 3086 memzone = rte_memzone_lookup(mz_name); 3087 if (memzone != NULL && memzone->len != size) { 3088 rte_memzone_free(memzone); 3089 memzone = NULL; 3090 } 3091 if (memzone == NULL) { 3092 memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id, 3093 RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); 3094 if (memzone == NULL) 3095 RTE_LOG(ERR, USER1, "Can't allocate memory zone %s", 3096 mz_name); 3097 } 3098 return memzone; 3099 } 3100 3101 static int 3102 test_compressdev_external_mbufs(void) 3103 { 3104 struct comp_testsuite_params *ts_params = &testsuite_params; 3105 size_t data_len = 0; 3106 uint16_t i; 3107 int ret = TEST_FAILED; 3108 3109 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) 3110 data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1); 3111 3112 struct interim_data_params int_data = { 3113 NULL, 3114 1, 3115 NULL, 3116 &ts_params->def_comp_xform, 3117 &ts_params->def_decomp_xform, 3118 1 3119 }; 3120 3121 struct test_data_params test_data = { 3122 .compress_state = RTE_COMP_OP_STATELESS, 3123 .decompress_state = RTE_COMP_OP_STATELESS, 3124 .buff_type = LB_BOTH, 3125 .zlib_dir = ZLIB_DECOMPRESS, 3126 .out_of_space = 0, 3127 .big_data = 0, 3128 .use_external_mbufs = 1, 3129 .inbuf_data_size = data_len, 3130 .inbuf_memzone = make_memzone("inbuf", data_len), 3131 .compbuf_memzone = make_memzone("compbuf", data_len * 3132 COMPRESS_BUF_SIZE_RATIO), 3133 .uncompbuf_memzone = make_memzone("decompbuf", data_len), 3134 .overflow = OVERFLOW_DISABLED 3135 }; 3136 3137 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 3138 /* prepare input data */ 3139 data_len = strlen(compress_test_bufs[i]) + 1; 3140 rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i], 3141 data_len); 3142 test_data.inbuf_data_size = data_len; 3143 int_data.buf_idx = &i; 3144 3145 /* Compress with compressdev, decompress with Zlib */ 3146 test_data.zlib_dir = ZLIB_DECOMPRESS; 3147 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) 3148 goto exit; 3149 3150 /* Compress with Zlib, decompress with compressdev */ 3151 test_data.zlib_dir = ZLIB_COMPRESS; 3152 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) 3153 goto exit; 3154 } 3155 3156 ret = TEST_SUCCESS; 3157 3158 exit: 3159 rte_memzone_free(test_data.inbuf_memzone); 3160 rte_memzone_free(test_data.compbuf_memzone); 3161 rte_memzone_free(test_data.uncompbuf_memzone); 3162 return ret; 3163 } 3164 3165 static int 3166 test_compressdev_deflate_stateless_fixed_oos_recoverable(void) 3167 { 3168 struct comp_testsuite_params *ts_params = &testsuite_params; 3169 uint16_t i; 3170 int ret; 3171 int comp_result; 3172 const struct rte_compressdev_capabilities *capab; 3173 3174 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3175 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3176 3177 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) 3178 return -ENOTSUP; 3179 3180 struct rte_comp_xform *compress_xform = 3181 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0); 3182 3183 if (compress_xform == NULL) { 3184 RTE_LOG(ERR, USER1, 3185 "Compress xform could not be created\n"); 3186 ret = TEST_FAILED; 3187 goto exit; 3188 } 3189 3190 memcpy(compress_xform, ts_params->def_comp_xform, 3191 sizeof(struct rte_comp_xform)); 3192 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED; 3193 3194 struct interim_data_params int_data = { 3195 NULL, 3196 1, 3197 NULL, 3198 &compress_xform, 3199 &ts_params->def_decomp_xform, 3200 1 3201 }; 3202 3203 struct test_data_params test_data = { 3204 .compress_state = RTE_COMP_OP_STATELESS, 3205 .decompress_state = RTE_COMP_OP_STATELESS, 3206 .buff_type = LB_BOTH, 3207 .zlib_dir = ZLIB_DECOMPRESS, 3208 .out_of_space = 0, 3209 .big_data = 0, 3210 .overflow = OVERFLOW_ENABLED, 3211 .ratio = RATIO_ENABLED 3212 }; 3213 3214 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) { 3215 int_data.test_bufs = &compress_test_bufs[i]; 3216 int_data.buf_idx = &i; 3217 3218 /* Compress with compressdev, decompress with Zlib */ 3219 test_data.zlib_dir = ZLIB_DECOMPRESS; 3220 comp_result = test_deflate_comp_decomp(&int_data, &test_data); 3221 if (comp_result < 0) { 3222 ret = TEST_FAILED; 3223 goto exit; 3224 } else if (comp_result > 0) { 3225 ret = -ENOTSUP; 3226 goto exit; 3227 } 3228 3229 /* Compress with Zlib, decompress with compressdev */ 3230 test_data.zlib_dir = ZLIB_COMPRESS; 3231 comp_result = test_deflate_comp_decomp(&int_data, &test_data); 3232 if (comp_result < 0) { 3233 ret = TEST_FAILED; 3234 goto exit; 3235 } else if (comp_result > 0) { 3236 ret = -ENOTSUP; 3237 goto exit; 3238 } 3239 } 3240 3241 ret = TEST_SUCCESS; 3242 3243 exit: 3244 rte_free(compress_xform); 3245 return ret; 3246 } 3247 3248 static int 3249 test_compressdev_deflate_im_buffers_LB_1op(void) 3250 { 3251 struct comp_testsuite_params *ts_params = &testsuite_params; 3252 uint16_t i = 0; 3253 int ret = TEST_SUCCESS; 3254 int j; 3255 const struct rte_compressdev_capabilities *capab; 3256 char *test_buffer = NULL; 3257 3258 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3259 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3260 3261 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3262 return -ENOTSUP; 3263 3264 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3265 return -ENOTSUP; 3266 3267 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); 3268 if (test_buffer == NULL) { 3269 RTE_LOG(ERR, USER1, 3270 "Can't allocate buffer for 'im buffer' test\n"); 3271 return TEST_FAILED; 3272 } 3273 3274 struct interim_data_params int_data = { 3275 (const char * const *)&test_buffer, 3276 1, 3277 &i, 3278 &ts_params->def_comp_xform, 3279 &ts_params->def_decomp_xform, 3280 1 3281 }; 3282 3283 struct test_data_params test_data = { 3284 .compress_state = RTE_COMP_OP_STATELESS, 3285 .decompress_state = RTE_COMP_OP_STATELESS, 3286 /* must be LB to SGL, 3287 * input LB buffer reaches its maximum, 3288 * if ratio 1.3 than another mbuf must be 3289 * created and attached 3290 */ 3291 .buff_type = LB_BOTH, 3292 .zlib_dir = ZLIB_NONE, 3293 .out_of_space = 0, 3294 .big_data = 1, 3295 .overflow = OVERFLOW_DISABLED, 3296 .ratio = RATIO_DISABLED 3297 }; 3298 3299 ts_params->def_comp_xform->compress.deflate.huffman = 3300 RTE_COMP_HUFFMAN_DYNAMIC; 3301 3302 /* fill the buffer with data based on rand. data */ 3303 srand(IM_BUF_DATA_TEST_SIZE_LB); 3304 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) 3305 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3306 3307 /* Compress with compressdev, decompress with compressdev */ 3308 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3309 ret = TEST_FAILED; 3310 goto end; 3311 } 3312 3313 end: 3314 ts_params->def_comp_xform->compress.deflate.huffman = 3315 RTE_COMP_HUFFMAN_DEFAULT; 3316 rte_free(test_buffer); 3317 return ret; 3318 } 3319 3320 static int 3321 test_compressdev_deflate_im_buffers_LB_2ops_first(void) 3322 { 3323 struct comp_testsuite_params *ts_params = &testsuite_params; 3324 uint16_t i = 0; 3325 int ret = TEST_SUCCESS; 3326 int j; 3327 const struct rte_compressdev_capabilities *capab; 3328 char *test_buffer = NULL; 3329 const char *test_buffers[2]; 3330 3331 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3332 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3333 3334 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3335 return -ENOTSUP; 3336 3337 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3338 return -ENOTSUP; 3339 3340 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); 3341 if (test_buffer == NULL) { 3342 RTE_LOG(ERR, USER1, 3343 "Can't allocate buffer for 'im buffer' test\n"); 3344 return TEST_FAILED; 3345 } 3346 3347 test_buffers[0] = test_buffer; 3348 test_buffers[1] = compress_test_bufs[0]; 3349 3350 struct interim_data_params int_data = { 3351 (const char * const *)test_buffers, 3352 2, 3353 &i, 3354 &ts_params->def_comp_xform, 3355 &ts_params->def_decomp_xform, 3356 1 3357 }; 3358 3359 struct test_data_params test_data = { 3360 .compress_state = RTE_COMP_OP_STATELESS, 3361 .decompress_state = RTE_COMP_OP_STATELESS, 3362 .buff_type = LB_BOTH, 3363 .zlib_dir = ZLIB_NONE, 3364 .out_of_space = 0, 3365 .big_data = 1, 3366 .overflow = OVERFLOW_DISABLED, 3367 .ratio = RATIO_DISABLED 3368 }; 3369 3370 ts_params->def_comp_xform->compress.deflate.huffman = 3371 RTE_COMP_HUFFMAN_DYNAMIC; 3372 3373 /* fill the buffer with data based on rand. data */ 3374 srand(IM_BUF_DATA_TEST_SIZE_LB); 3375 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) 3376 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3377 3378 /* Compress with compressdev, decompress with compressdev */ 3379 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3380 ret = TEST_FAILED; 3381 goto end; 3382 } 3383 3384 end: 3385 ts_params->def_comp_xform->compress.deflate.huffman = 3386 RTE_COMP_HUFFMAN_DEFAULT; 3387 rte_free(test_buffer); 3388 return ret; 3389 } 3390 3391 static int 3392 test_compressdev_deflate_im_buffers_LB_2ops_second(void) 3393 { 3394 struct comp_testsuite_params *ts_params = &testsuite_params; 3395 uint16_t i = 0; 3396 int ret = TEST_SUCCESS; 3397 int j; 3398 const struct rte_compressdev_capabilities *capab; 3399 char *test_buffer = NULL; 3400 const char *test_buffers[2]; 3401 3402 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3403 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3404 3405 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3406 return -ENOTSUP; 3407 3408 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3409 return -ENOTSUP; 3410 3411 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); 3412 if (test_buffer == NULL) { 3413 RTE_LOG(ERR, USER1, 3414 "Can't allocate buffer for 'im buffer' test\n"); 3415 return TEST_FAILED; 3416 } 3417 3418 test_buffers[0] = compress_test_bufs[0]; 3419 test_buffers[1] = test_buffer; 3420 3421 struct interim_data_params int_data = { 3422 (const char * const *)test_buffers, 3423 2, 3424 &i, 3425 &ts_params->def_comp_xform, 3426 &ts_params->def_decomp_xform, 3427 1 3428 }; 3429 3430 struct test_data_params test_data = { 3431 .compress_state = RTE_COMP_OP_STATELESS, 3432 .decompress_state = RTE_COMP_OP_STATELESS, 3433 .buff_type = LB_BOTH, 3434 .zlib_dir = ZLIB_NONE, 3435 .out_of_space = 0, 3436 .big_data = 1, 3437 .overflow = OVERFLOW_DISABLED, 3438 .ratio = RATIO_DISABLED 3439 }; 3440 3441 ts_params->def_comp_xform->compress.deflate.huffman = 3442 RTE_COMP_HUFFMAN_DYNAMIC; 3443 3444 /* fill the buffer with data based on rand. data */ 3445 srand(IM_BUF_DATA_TEST_SIZE_LB); 3446 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) 3447 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3448 3449 /* Compress with compressdev, decompress with compressdev */ 3450 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3451 ret = TEST_FAILED; 3452 goto end; 3453 } 3454 3455 end: 3456 ts_params->def_comp_xform->compress.deflate.huffman = 3457 RTE_COMP_HUFFMAN_DEFAULT; 3458 rte_free(test_buffer); 3459 return ret; 3460 } 3461 3462 static int 3463 test_compressdev_deflate_im_buffers_LB_3ops(void) 3464 { 3465 struct comp_testsuite_params *ts_params = &testsuite_params; 3466 uint16_t i = 0; 3467 int ret = TEST_SUCCESS; 3468 int j; 3469 const struct rte_compressdev_capabilities *capab; 3470 char *test_buffer = NULL; 3471 const char *test_buffers[3]; 3472 3473 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3474 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3475 3476 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3477 return -ENOTSUP; 3478 3479 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3480 return -ENOTSUP; 3481 3482 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); 3483 if (test_buffer == NULL) { 3484 RTE_LOG(ERR, USER1, 3485 "Can't allocate buffer for 'im buffer' test\n"); 3486 return TEST_FAILED; 3487 } 3488 3489 test_buffers[0] = compress_test_bufs[0]; 3490 test_buffers[1] = test_buffer; 3491 test_buffers[2] = compress_test_bufs[1]; 3492 3493 struct interim_data_params int_data = { 3494 (const char * const *)test_buffers, 3495 3, 3496 &i, 3497 &ts_params->def_comp_xform, 3498 &ts_params->def_decomp_xform, 3499 1 3500 }; 3501 3502 struct test_data_params test_data = { 3503 .compress_state = RTE_COMP_OP_STATELESS, 3504 .decompress_state = RTE_COMP_OP_STATELESS, 3505 .buff_type = LB_BOTH, 3506 .zlib_dir = ZLIB_NONE, 3507 .out_of_space = 0, 3508 .big_data = 1, 3509 .overflow = OVERFLOW_DISABLED, 3510 .ratio = RATIO_DISABLED 3511 }; 3512 3513 ts_params->def_comp_xform->compress.deflate.huffman = 3514 RTE_COMP_HUFFMAN_DYNAMIC; 3515 3516 /* fill the buffer with data based on rand. data */ 3517 srand(IM_BUF_DATA_TEST_SIZE_LB); 3518 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) 3519 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3520 3521 /* Compress with compressdev, decompress with compressdev */ 3522 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3523 ret = TEST_FAILED; 3524 goto end; 3525 } 3526 3527 end: 3528 ts_params->def_comp_xform->compress.deflate.huffman = 3529 RTE_COMP_HUFFMAN_DEFAULT; 3530 rte_free(test_buffer); 3531 return ret; 3532 } 3533 3534 static int 3535 test_compressdev_deflate_im_buffers_LB_4ops(void) 3536 { 3537 struct comp_testsuite_params *ts_params = &testsuite_params; 3538 uint16_t i = 0; 3539 int ret = TEST_SUCCESS; 3540 int j; 3541 const struct rte_compressdev_capabilities *capab; 3542 char *test_buffer = NULL; 3543 const char *test_buffers[4]; 3544 3545 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3546 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3547 3548 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3549 return -ENOTSUP; 3550 3551 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3552 return -ENOTSUP; 3553 3554 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_LB, 0); 3555 if (test_buffer == NULL) { 3556 RTE_LOG(ERR, USER1, 3557 "Can't allocate buffer for 'im buffer' test\n"); 3558 return TEST_FAILED; 3559 } 3560 3561 test_buffers[0] = compress_test_bufs[0]; 3562 test_buffers[1] = test_buffer; 3563 test_buffers[2] = compress_test_bufs[1]; 3564 test_buffers[3] = test_buffer; 3565 3566 struct interim_data_params int_data = { 3567 (const char * const *)test_buffers, 3568 4, 3569 &i, 3570 &ts_params->def_comp_xform, 3571 &ts_params->def_decomp_xform, 3572 1 3573 }; 3574 3575 struct test_data_params test_data = { 3576 .compress_state = RTE_COMP_OP_STATELESS, 3577 .decompress_state = RTE_COMP_OP_STATELESS, 3578 .buff_type = LB_BOTH, 3579 .zlib_dir = ZLIB_NONE, 3580 .out_of_space = 0, 3581 .big_data = 1, 3582 .overflow = OVERFLOW_DISABLED, 3583 .ratio = RATIO_DISABLED 3584 }; 3585 3586 ts_params->def_comp_xform->compress.deflate.huffman = 3587 RTE_COMP_HUFFMAN_DYNAMIC; 3588 3589 /* fill the buffer with data based on rand. data */ 3590 srand(IM_BUF_DATA_TEST_SIZE_LB); 3591 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_LB - 1; ++j) 3592 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3593 3594 /* Compress with compressdev, decompress with compressdev */ 3595 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3596 ret = TEST_FAILED; 3597 goto end; 3598 } 3599 3600 end: 3601 ts_params->def_comp_xform->compress.deflate.huffman = 3602 RTE_COMP_HUFFMAN_DEFAULT; 3603 rte_free(test_buffer); 3604 return ret; 3605 } 3606 3607 3608 static int 3609 test_compressdev_deflate_im_buffers_SGL_1op(void) 3610 { 3611 struct comp_testsuite_params *ts_params = &testsuite_params; 3612 uint16_t i = 0; 3613 int ret = TEST_SUCCESS; 3614 int j; 3615 const struct rte_compressdev_capabilities *capab; 3616 char *test_buffer = NULL; 3617 3618 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3619 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3620 3621 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3622 return -ENOTSUP; 3623 3624 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3625 return -ENOTSUP; 3626 3627 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); 3628 if (test_buffer == NULL) { 3629 RTE_LOG(ERR, USER1, 3630 "Can't allocate buffer for big-data\n"); 3631 return TEST_FAILED; 3632 } 3633 3634 struct interim_data_params int_data = { 3635 (const char * const *)&test_buffer, 3636 1, 3637 &i, 3638 &ts_params->def_comp_xform, 3639 &ts_params->def_decomp_xform, 3640 1 3641 }; 3642 3643 struct test_data_params test_data = { 3644 .compress_state = RTE_COMP_OP_STATELESS, 3645 .decompress_state = RTE_COMP_OP_STATELESS, 3646 .buff_type = SGL_BOTH, 3647 .zlib_dir = ZLIB_NONE, 3648 .out_of_space = 0, 3649 .big_data = 1, 3650 .overflow = OVERFLOW_DISABLED, 3651 .ratio = RATIO_DISABLED 3652 }; 3653 3654 ts_params->def_comp_xform->compress.deflate.huffman = 3655 RTE_COMP_HUFFMAN_DYNAMIC; 3656 3657 /* fill the buffer with data based on rand. data */ 3658 srand(IM_BUF_DATA_TEST_SIZE_SGL); 3659 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) 3660 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3661 3662 /* Compress with compressdev, decompress with compressdev */ 3663 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3664 ret = TEST_FAILED; 3665 goto end; 3666 } 3667 3668 end: 3669 ts_params->def_comp_xform->compress.deflate.huffman = 3670 RTE_COMP_HUFFMAN_DEFAULT; 3671 rte_free(test_buffer); 3672 return ret; 3673 } 3674 3675 static int 3676 test_compressdev_deflate_im_buffers_SGL_2ops_first(void) 3677 { 3678 struct comp_testsuite_params *ts_params = &testsuite_params; 3679 uint16_t i = 0; 3680 int ret = TEST_SUCCESS; 3681 int j; 3682 const struct rte_compressdev_capabilities *capab; 3683 char *test_buffer = NULL; 3684 const char *test_buffers[2]; 3685 3686 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3687 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3688 3689 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3690 return -ENOTSUP; 3691 3692 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3693 return -ENOTSUP; 3694 3695 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); 3696 if (test_buffer == NULL) { 3697 RTE_LOG(ERR, USER1, 3698 "Can't allocate buffer for big-data\n"); 3699 return TEST_FAILED; 3700 } 3701 3702 test_buffers[0] = test_buffer; 3703 test_buffers[1] = compress_test_bufs[0]; 3704 3705 struct interim_data_params int_data = { 3706 (const char * const *)test_buffers, 3707 2, 3708 &i, 3709 &ts_params->def_comp_xform, 3710 &ts_params->def_decomp_xform, 3711 1 3712 }; 3713 3714 struct test_data_params test_data = { 3715 .compress_state = RTE_COMP_OP_STATELESS, 3716 .decompress_state = RTE_COMP_OP_STATELESS, 3717 .buff_type = SGL_BOTH, 3718 .zlib_dir = ZLIB_NONE, 3719 .out_of_space = 0, 3720 .big_data = 1, 3721 .overflow = OVERFLOW_DISABLED, 3722 .ratio = RATIO_DISABLED 3723 }; 3724 3725 ts_params->def_comp_xform->compress.deflate.huffman = 3726 RTE_COMP_HUFFMAN_DYNAMIC; 3727 3728 /* fill the buffer with data based on rand. data */ 3729 srand(IM_BUF_DATA_TEST_SIZE_SGL); 3730 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) 3731 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3732 3733 /* Compress with compressdev, decompress with compressdev */ 3734 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3735 ret = TEST_FAILED; 3736 goto end; 3737 } 3738 3739 end: 3740 ts_params->def_comp_xform->compress.deflate.huffman = 3741 RTE_COMP_HUFFMAN_DEFAULT; 3742 rte_free(test_buffer); 3743 return ret; 3744 } 3745 3746 static int 3747 test_compressdev_deflate_im_buffers_SGL_2ops_second(void) 3748 { 3749 struct comp_testsuite_params *ts_params = &testsuite_params; 3750 uint16_t i = 0; 3751 int ret = TEST_SUCCESS; 3752 int j; 3753 const struct rte_compressdev_capabilities *capab; 3754 char *test_buffer = NULL; 3755 const char *test_buffers[2]; 3756 3757 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3758 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3759 3760 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3761 return -ENOTSUP; 3762 3763 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3764 return -ENOTSUP; 3765 3766 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); 3767 if (test_buffer == NULL) { 3768 RTE_LOG(ERR, USER1, 3769 "Can't allocate buffer for big-data\n"); 3770 return TEST_FAILED; 3771 } 3772 3773 test_buffers[0] = compress_test_bufs[0]; 3774 test_buffers[1] = test_buffer; 3775 3776 struct interim_data_params int_data = { 3777 (const char * const *)test_buffers, 3778 2, 3779 &i, 3780 &ts_params->def_comp_xform, 3781 &ts_params->def_decomp_xform, 3782 1 3783 }; 3784 3785 struct test_data_params test_data = { 3786 .compress_state = RTE_COMP_OP_STATELESS, 3787 .decompress_state = RTE_COMP_OP_STATELESS, 3788 .buff_type = SGL_BOTH, 3789 .zlib_dir = ZLIB_NONE, 3790 .out_of_space = 0, 3791 .big_data = 1, 3792 .overflow = OVERFLOW_DISABLED, 3793 .ratio = RATIO_DISABLED 3794 }; 3795 3796 ts_params->def_comp_xform->compress.deflate.huffman = 3797 RTE_COMP_HUFFMAN_DYNAMIC; 3798 3799 /* fill the buffer with data based on rand. data */ 3800 srand(IM_BUF_DATA_TEST_SIZE_SGL); 3801 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) 3802 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3803 3804 /* Compress with compressdev, decompress with compressdev */ 3805 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3806 ret = TEST_FAILED; 3807 goto end; 3808 } 3809 3810 end: 3811 ts_params->def_comp_xform->compress.deflate.huffman = 3812 RTE_COMP_HUFFMAN_DEFAULT; 3813 rte_free(test_buffer); 3814 return ret; 3815 } 3816 3817 static int 3818 test_compressdev_deflate_im_buffers_SGL_3ops(void) 3819 { 3820 struct comp_testsuite_params *ts_params = &testsuite_params; 3821 uint16_t i = 0; 3822 int ret = TEST_SUCCESS; 3823 int j; 3824 const struct rte_compressdev_capabilities *capab; 3825 char *test_buffer = NULL; 3826 const char *test_buffers[3]; 3827 3828 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3829 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3830 3831 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3832 return -ENOTSUP; 3833 3834 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3835 return -ENOTSUP; 3836 3837 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); 3838 if (test_buffer == NULL) { 3839 RTE_LOG(ERR, USER1, 3840 "Can't allocate buffer for big-data\n"); 3841 return TEST_FAILED; 3842 } 3843 3844 test_buffers[0] = compress_test_bufs[0]; 3845 test_buffers[1] = test_buffer; 3846 test_buffers[2] = compress_test_bufs[1]; 3847 3848 struct interim_data_params int_data = { 3849 (const char * const *)test_buffers, 3850 3, 3851 &i, 3852 &ts_params->def_comp_xform, 3853 &ts_params->def_decomp_xform, 3854 1 3855 }; 3856 3857 struct test_data_params test_data = { 3858 .compress_state = RTE_COMP_OP_STATELESS, 3859 .decompress_state = RTE_COMP_OP_STATELESS, 3860 .buff_type = SGL_BOTH, 3861 .zlib_dir = ZLIB_NONE, 3862 .out_of_space = 0, 3863 .big_data = 1, 3864 .overflow = OVERFLOW_DISABLED, 3865 .ratio = RATIO_DISABLED 3866 }; 3867 3868 ts_params->def_comp_xform->compress.deflate.huffman = 3869 RTE_COMP_HUFFMAN_DYNAMIC; 3870 3871 /* fill the buffer with data based on rand. data */ 3872 srand(IM_BUF_DATA_TEST_SIZE_SGL); 3873 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) 3874 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3875 3876 /* Compress with compressdev, decompress with compressdev */ 3877 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3878 ret = TEST_FAILED; 3879 goto end; 3880 } 3881 3882 end: 3883 ts_params->def_comp_xform->compress.deflate.huffman = 3884 RTE_COMP_HUFFMAN_DEFAULT; 3885 rte_free(test_buffer); 3886 return ret; 3887 } 3888 3889 3890 static int 3891 test_compressdev_deflate_im_buffers_SGL_4ops(void) 3892 { 3893 struct comp_testsuite_params *ts_params = &testsuite_params; 3894 uint16_t i = 0; 3895 int ret = TEST_SUCCESS; 3896 int j; 3897 const struct rte_compressdev_capabilities *capab; 3898 char *test_buffer = NULL; 3899 const char *test_buffers[4]; 3900 3901 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3902 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3903 3904 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3905 return -ENOTSUP; 3906 3907 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3908 return -ENOTSUP; 3909 3910 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_SGL, 0); 3911 if (test_buffer == NULL) { 3912 RTE_LOG(ERR, USER1, 3913 "Can't allocate buffer for big-data\n"); 3914 return TEST_FAILED; 3915 } 3916 3917 test_buffers[0] = compress_test_bufs[0]; 3918 test_buffers[1] = test_buffer; 3919 test_buffers[2] = compress_test_bufs[1]; 3920 test_buffers[3] = test_buffer; 3921 3922 struct interim_data_params int_data = { 3923 (const char * const *)test_buffers, 3924 4, 3925 &i, 3926 &ts_params->def_comp_xform, 3927 &ts_params->def_decomp_xform, 3928 1 3929 }; 3930 3931 struct test_data_params test_data = { 3932 .compress_state = RTE_COMP_OP_STATELESS, 3933 .decompress_state = RTE_COMP_OP_STATELESS, 3934 .buff_type = SGL_BOTH, 3935 .zlib_dir = ZLIB_NONE, 3936 .out_of_space = 0, 3937 .big_data = 1, 3938 .overflow = OVERFLOW_DISABLED, 3939 .ratio = RATIO_DISABLED 3940 }; 3941 3942 ts_params->def_comp_xform->compress.deflate.huffman = 3943 RTE_COMP_HUFFMAN_DYNAMIC; 3944 3945 /* fill the buffer with data based on rand. data */ 3946 srand(IM_BUF_DATA_TEST_SIZE_SGL); 3947 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_SGL - 1; ++j) 3948 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 3949 3950 /* Compress with compressdev, decompress with compressdev */ 3951 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 3952 ret = TEST_FAILED; 3953 goto end; 3954 } 3955 3956 end: 3957 ts_params->def_comp_xform->compress.deflate.huffman = 3958 RTE_COMP_HUFFMAN_DEFAULT; 3959 rte_free(test_buffer); 3960 return ret; 3961 } 3962 3963 static int 3964 test_compressdev_deflate_im_buffers_SGL_over_1op(void) 3965 { 3966 struct comp_testsuite_params *ts_params = &testsuite_params; 3967 uint16_t i = 0; 3968 int ret = TEST_SUCCESS; 3969 int j; 3970 const struct rte_compressdev_capabilities *capab; 3971 char *test_buffer = NULL; 3972 3973 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); 3974 3975 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 3976 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 3977 3978 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 3979 return -ENOTSUP; 3980 3981 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 3982 return -ENOTSUP; 3983 3984 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); 3985 if (test_buffer == NULL) { 3986 RTE_LOG(ERR, USER1, 3987 "Can't allocate buffer for big-data\n"); 3988 return TEST_FAILED; 3989 } 3990 3991 struct interim_data_params int_data = { 3992 (const char * const *)&test_buffer, 3993 1, 3994 &i, 3995 &ts_params->def_comp_xform, 3996 &ts_params->def_decomp_xform, 3997 1 3998 }; 3999 4000 struct test_data_params test_data = { 4001 .compress_state = RTE_COMP_OP_STATELESS, 4002 .decompress_state = RTE_COMP_OP_STATELESS, 4003 .buff_type = SGL_BOTH, 4004 .zlib_dir = ZLIB_NONE, 4005 .out_of_space = 0, 4006 .big_data = 1, 4007 .overflow = OVERFLOW_DISABLED, 4008 .ratio = RATIO_DISABLED 4009 }; 4010 4011 ts_params->def_comp_xform->compress.deflate.huffman = 4012 RTE_COMP_HUFFMAN_DYNAMIC; 4013 4014 /* fill the buffer with data based on rand. data */ 4015 srand(IM_BUF_DATA_TEST_SIZE_OVER); 4016 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) 4017 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 4018 4019 /* Compress with compressdev, decompress with compressdev */ 4020 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 4021 ret = TEST_SUCCESS; 4022 goto end; 4023 } 4024 4025 end: 4026 ts_params->def_comp_xform->compress.deflate.huffman = 4027 RTE_COMP_HUFFMAN_DEFAULT; 4028 rte_free(test_buffer); 4029 4030 return ret; 4031 } 4032 4033 4034 static int 4035 test_compressdev_deflate_im_buffers_SGL_over_2ops_first(void) 4036 { 4037 struct comp_testsuite_params *ts_params = &testsuite_params; 4038 uint16_t i = 0; 4039 int ret = TEST_SUCCESS; 4040 int j; 4041 const struct rte_compressdev_capabilities *capab; 4042 char *test_buffer = NULL; 4043 const char *test_buffers[2]; 4044 4045 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); 4046 4047 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 4048 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 4049 4050 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 4051 return -ENOTSUP; 4052 4053 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 4054 return -ENOTSUP; 4055 4056 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); 4057 if (test_buffer == NULL) { 4058 RTE_LOG(ERR, USER1, 4059 "Can't allocate buffer for big-data\n"); 4060 return TEST_FAILED; 4061 } 4062 4063 test_buffers[0] = test_buffer; 4064 test_buffers[1] = compress_test_bufs[0]; 4065 4066 struct interim_data_params int_data = { 4067 (const char * const *)test_buffers, 4068 2, 4069 &i, 4070 &ts_params->def_comp_xform, 4071 &ts_params->def_decomp_xform, 4072 1 4073 }; 4074 4075 struct test_data_params test_data = { 4076 .compress_state = RTE_COMP_OP_STATELESS, 4077 .decompress_state = RTE_COMP_OP_STATELESS, 4078 .buff_type = SGL_BOTH, 4079 .zlib_dir = ZLIB_NONE, 4080 .out_of_space = 0, 4081 .big_data = 1, 4082 .overflow = OVERFLOW_DISABLED, 4083 .ratio = RATIO_DISABLED 4084 }; 4085 4086 ts_params->def_comp_xform->compress.deflate.huffman = 4087 RTE_COMP_HUFFMAN_DYNAMIC; 4088 4089 /* fill the buffer with data based on rand. data */ 4090 srand(IM_BUF_DATA_TEST_SIZE_OVER); 4091 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) 4092 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 4093 4094 /* Compress with compressdev, decompress with compressdev */ 4095 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 4096 ret = TEST_SUCCESS; 4097 goto end; 4098 } 4099 4100 end: 4101 ts_params->def_comp_xform->compress.deflate.huffman = 4102 RTE_COMP_HUFFMAN_DEFAULT; 4103 rte_free(test_buffer); 4104 return ret; 4105 } 4106 4107 static int 4108 test_compressdev_deflate_im_buffers_SGL_over_2ops_second(void) 4109 { 4110 struct comp_testsuite_params *ts_params = &testsuite_params; 4111 uint16_t i = 0; 4112 int ret = TEST_SUCCESS; 4113 int j; 4114 const struct rte_compressdev_capabilities *capab; 4115 char *test_buffer = NULL; 4116 const char *test_buffers[2]; 4117 4118 RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n"); 4119 4120 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); 4121 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities"); 4122 4123 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0) 4124 return -ENOTSUP; 4125 4126 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0) 4127 return -ENOTSUP; 4128 4129 test_buffer = rte_malloc(NULL, IM_BUF_DATA_TEST_SIZE_OVER, 0); 4130 if (test_buffer == NULL) { 4131 RTE_LOG(ERR, USER1, 4132 "Can't allocate buffer for big-data\n"); 4133 return TEST_FAILED; 4134 } 4135 4136 test_buffers[0] = compress_test_bufs[0]; 4137 test_buffers[1] = test_buffer; 4138 4139 struct interim_data_params int_data = { 4140 (const char * const *)test_buffers, 4141 2, 4142 &i, 4143 &ts_params->def_comp_xform, 4144 &ts_params->def_decomp_xform, 4145 1 4146 }; 4147 4148 struct test_data_params test_data = { 4149 .compress_state = RTE_COMP_OP_STATELESS, 4150 .decompress_state = RTE_COMP_OP_STATELESS, 4151 .buff_type = SGL_BOTH, 4152 .zlib_dir = ZLIB_NONE, 4153 .out_of_space = 0, 4154 .big_data = 1, 4155 .overflow = OVERFLOW_DISABLED, 4156 .ratio = RATIO_DISABLED 4157 }; 4158 4159 ts_params->def_comp_xform->compress.deflate.huffman = 4160 RTE_COMP_HUFFMAN_DYNAMIC; 4161 4162 /* fill the buffer with data based on rand. data */ 4163 srand(IM_BUF_DATA_TEST_SIZE_OVER); 4164 for (j = 0; j < IM_BUF_DATA_TEST_SIZE_OVER - 1; ++j) 4165 test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1; 4166 4167 /* Compress with compressdev, decompress with compressdev */ 4168 if (test_deflate_comp_decomp(&int_data, &test_data) < 0) { 4169 ret = TEST_SUCCESS; 4170 goto end; 4171 } 4172 4173 end: 4174 ts_params->def_comp_xform->compress.deflate.huffman = 4175 RTE_COMP_HUFFMAN_DEFAULT; 4176 rte_free(test_buffer); 4177 return ret; 4178 } 4179 4180 static struct unit_test_suite compressdev_testsuite = { 4181 .suite_name = "compressdev unit test suite", 4182 .setup = testsuite_setup, 4183 .teardown = testsuite_teardown, 4184 .unit_test_cases = { 4185 TEST_CASE_ST(NULL, NULL, 4186 test_compressdev_invalid_configuration), 4187 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4188 test_compressdev_deflate_stateless_fixed), 4189 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4190 test_compressdev_deflate_stateless_dynamic), 4191 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4192 test_compressdev_deflate_stateless_dynamic_big), 4193 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4194 test_compressdev_deflate_stateless_multi_op), 4195 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4196 test_compressdev_deflate_stateless_multi_level), 4197 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4198 test_compressdev_deflate_stateless_multi_xform), 4199 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4200 test_compressdev_deflate_stateless_sgl), 4201 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4202 test_compressdev_deflate_stateless_checksum), 4203 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4204 test_compressdev_out_of_space_buffer), 4205 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4206 test_compressdev_deflate_stateful_decomp), 4207 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4208 test_compressdev_deflate_stateful_decomp_checksum), 4209 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4210 test_compressdev_external_mbufs), 4211 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4212 test_compressdev_deflate_stateless_fixed_oos_recoverable), 4213 4214 /* Positive test cases for IM buffer handling verification */ 4215 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4216 test_compressdev_deflate_im_buffers_LB_1op), 4217 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4218 test_compressdev_deflate_im_buffers_LB_2ops_first), 4219 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4220 test_compressdev_deflate_im_buffers_LB_2ops_second), 4221 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4222 test_compressdev_deflate_im_buffers_LB_3ops), 4223 4224 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4225 test_compressdev_deflate_im_buffers_LB_4ops), 4226 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4227 test_compressdev_deflate_im_buffers_SGL_1op), 4228 4229 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4230 test_compressdev_deflate_im_buffers_SGL_2ops_first), 4231 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4232 test_compressdev_deflate_im_buffers_SGL_2ops_second), 4233 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4234 test_compressdev_deflate_im_buffers_SGL_3ops), 4235 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4236 test_compressdev_deflate_im_buffers_SGL_4ops), 4237 4238 /* Negative test cases for IM buffer handling verification */ 4239 4240 /* For this test huge mempool is necessary. 4241 * It tests one case: 4242 * only one op containing big amount of data, so that 4243 * number of requested descriptors higher than number 4244 * of available descriptors (128) 4245 */ 4246 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4247 test_compressdev_deflate_im_buffers_SGL_over_1op), 4248 4249 /* For this test huge mempool is necessary. 4250 * 2 ops. First op contains big amount of data: 4251 * number of requested descriptors higher than number 4252 * of available descriptors (128), the second op is 4253 * relatively small. In this case both ops are rejected 4254 */ 4255 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4256 test_compressdev_deflate_im_buffers_SGL_over_2ops_first), 4257 4258 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown, 4259 test_compressdev_deflate_im_buffers_SGL_over_2ops_second), 4260 4261 TEST_CASES_END() /**< NULL terminate unit test array */ 4262 } 4263 }; 4264 4265 static int 4266 test_compressdev(void) 4267 { 4268 return unit_test_suite_runner(&compressdev_testsuite); 4269 } 4270 4271 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev); 4272