xref: /dpdk/app/test/test_compressdev.c (revision fdf7471cccb8be023037c218d1402c0549eb2c8e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 - 2019 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
13 #include <rte_mbuf.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
16 
17 #include "test_compressdev_test_buffer.h"
18 #include "test.h"
19 
20 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
21 
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
26 
27 /*
28  * 30% extra size for compressed data compared to original data,
29  * in case data size cannot be reduced and it is actually bigger
30  * due to the compress block headers
31  */
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define COMPRESS_BUF_SIZE_RATIO_OVERFLOW 0.2
34 #define NUM_LARGE_MBUFS 16
35 #define SMALL_SEG_SIZE 256
36 #define MAX_SEGS 16
37 #define NUM_OPS 16
38 #define NUM_MAX_XFORMS 16
39 #define NUM_MAX_INFLIGHT_OPS 128
40 #define CACHE_SIZE 0
41 
42 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
43 #define ZLIB_HEADER_SIZE 2
44 #define ZLIB_TRAILER_SIZE 4
45 #define GZIP_HEADER_SIZE 10
46 #define GZIP_TRAILER_SIZE 8
47 
48 #define OUT_OF_SPACE_BUF 1
49 
50 #define MAX_MBUF_SEGMENT_SIZE 65535
51 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
52 #define NUM_BIG_MBUFS 4
53 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
54 
55 const char *
56 huffman_type_strings[] = {
57 	[RTE_COMP_HUFFMAN_DEFAULT]	= "PMD default",
58 	[RTE_COMP_HUFFMAN_FIXED]	= "Fixed",
59 	[RTE_COMP_HUFFMAN_DYNAMIC]	= "Dynamic"
60 };
61 
62 enum zlib_direction {
63 	ZLIB_NONE,
64 	ZLIB_COMPRESS,
65 	ZLIB_DECOMPRESS,
66 	ZLIB_ALL
67 };
68 
69 enum varied_buff {
70 	LB_BOTH = 0,	/* both input and output are linear*/
71 	SGL_BOTH,	/* both input and output are chained */
72 	SGL_TO_LB,	/* input buffer is chained */
73 	LB_TO_SGL	/* output buffer is chained */
74 };
75 
76 enum overflow_test {
77 	OVERFLOW_DISABLED,
78 	OVERFLOW_ENABLED
79 };
80 
81 enum operation_type {
82 	OPERATION_COMPRESSION,
83 	OPERATION_DECOMPRESSION
84 };
85 
86 struct priv_op_data {
87 	uint16_t orig_idx;
88 };
89 
90 struct comp_testsuite_params {
91 	struct rte_mempool *large_mbuf_pool;
92 	struct rte_mempool *small_mbuf_pool;
93 	struct rte_mempool *big_mbuf_pool;
94 	struct rte_mempool *op_pool;
95 	struct rte_comp_xform *def_comp_xform;
96 	struct rte_comp_xform *def_decomp_xform;
97 };
98 
99 struct interim_data_params {
100 	const char * const *test_bufs;
101 	unsigned int num_bufs;
102 	uint16_t *buf_idx;
103 	struct rte_comp_xform **compress_xforms;
104 	struct rte_comp_xform **decompress_xforms;
105 	unsigned int num_xforms;
106 };
107 
108 struct test_data_params {
109 	enum rte_comp_op_type compress_state;
110 	enum rte_comp_op_type decompress_state;
111 	enum varied_buff buff_type;
112 	enum zlib_direction zlib_dir;
113 	unsigned int out_of_space;
114 	unsigned int big_data;
115 	/* stateful decompression specific parameters */
116 	unsigned int decompress_output_block_size;
117 	unsigned int decompress_steps_max;
118 	/* external mbufs specific parameters */
119 	unsigned int use_external_mbufs;
120 	unsigned int inbuf_data_size;
121 	const struct rte_memzone *inbuf_memzone;
122 	const struct rte_memzone *compbuf_memzone;
123 	const struct rte_memzone *uncompbuf_memzone;
124 	/* overflow test activation */
125 	enum overflow_test overflow;
126 };
127 
128 struct test_private_arrays {
129 	struct rte_mbuf **uncomp_bufs;
130 	struct rte_mbuf **comp_bufs;
131 	struct rte_comp_op **ops;
132 	struct rte_comp_op **ops_processed;
133 	void **priv_xforms;
134 	uint64_t *compress_checksum;
135 	uint32_t *compressed_data_size;
136 	void **stream;
137 	char **all_decomp_data;
138 	unsigned int *decomp_produced_data_size;
139 	uint16_t num_priv_xforms;
140 };
141 
142 static struct comp_testsuite_params testsuite_params = { 0 };
143 
144 static void
145 testsuite_teardown(void)
146 {
147 	struct comp_testsuite_params *ts_params = &testsuite_params;
148 
149 	if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
150 		RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
151 	if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
152 		RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
153 	if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
154 		RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
155 	if (rte_mempool_in_use_count(ts_params->op_pool))
156 		RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
157 
158 	rte_mempool_free(ts_params->large_mbuf_pool);
159 	rte_mempool_free(ts_params->small_mbuf_pool);
160 	rte_mempool_free(ts_params->big_mbuf_pool);
161 	rte_mempool_free(ts_params->op_pool);
162 	rte_free(ts_params->def_comp_xform);
163 	rte_free(ts_params->def_decomp_xform);
164 }
165 
166 static int
167 testsuite_setup(void)
168 {
169 	struct comp_testsuite_params *ts_params = &testsuite_params;
170 	uint32_t max_buf_size = 0;
171 	unsigned int i;
172 
173 	if (rte_compressdev_count() == 0) {
174 		RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
175 		return TEST_SKIPPED;
176 	}
177 
178 	RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
179 				rte_compressdev_name_get(0));
180 
181 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
182 		max_buf_size = RTE_MAX(max_buf_size,
183 				strlen(compress_test_bufs[i]) + 1);
184 
185 	/*
186 	 * Buffers to be used in compression and decompression.
187 	 * Since decompressed data might be larger than
188 	 * compressed data (due to block header),
189 	 * buffers should be big enough for both cases.
190 	 */
191 	max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
192 	ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
193 			NUM_LARGE_MBUFS,
194 			CACHE_SIZE, 0,
195 			max_buf_size + RTE_PKTMBUF_HEADROOM,
196 			rte_socket_id());
197 	if (ts_params->large_mbuf_pool == NULL) {
198 		RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
199 		return TEST_FAILED;
200 	}
201 
202 	/* Create mempool with smaller buffers for SGL testing */
203 	ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
204 			NUM_LARGE_MBUFS * MAX_SEGS,
205 			CACHE_SIZE, 0,
206 			SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
207 			rte_socket_id());
208 	if (ts_params->small_mbuf_pool == NULL) {
209 		RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
210 		goto exit;
211 	}
212 
213 	/* Create mempool with big buffers for SGL testing */
214 	ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
215 			NUM_BIG_MBUFS + 1,
216 			CACHE_SIZE, 0,
217 			MAX_MBUF_SEGMENT_SIZE,
218 			rte_socket_id());
219 	if (ts_params->big_mbuf_pool == NULL) {
220 		RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
221 		goto exit;
222 	}
223 
224 	ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
225 				0, sizeof(struct priv_op_data),
226 				rte_socket_id());
227 	if (ts_params->op_pool == NULL) {
228 		RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
229 		goto exit;
230 	}
231 
232 	ts_params->def_comp_xform =
233 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
234 	if (ts_params->def_comp_xform == NULL) {
235 		RTE_LOG(ERR, USER1,
236 			"Default compress xform could not be created\n");
237 		goto exit;
238 	}
239 	ts_params->def_decomp_xform =
240 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
241 	if (ts_params->def_decomp_xform == NULL) {
242 		RTE_LOG(ERR, USER1,
243 			"Default decompress xform could not be created\n");
244 		goto exit;
245 	}
246 
247 	/* Initializes default values for compress/decompress xforms */
248 	ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
249 	ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
250 	ts_params->def_comp_xform->compress.deflate.huffman =
251 						RTE_COMP_HUFFMAN_DEFAULT;
252 	ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
253 	ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
254 	ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
255 
256 	ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
257 	ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
258 	ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
259 	ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
260 
261 	return TEST_SUCCESS;
262 
263 exit:
264 	testsuite_teardown();
265 
266 	return TEST_FAILED;
267 }
268 
269 static int
270 generic_ut_setup(void)
271 {
272 	/* Configure compressdev (one device, one queue pair) */
273 	struct rte_compressdev_config config = {
274 		.socket_id = rte_socket_id(),
275 		.nb_queue_pairs = 1,
276 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
277 		.max_nb_streams = 1
278 	};
279 
280 	if (rte_compressdev_configure(0, &config) < 0) {
281 		RTE_LOG(ERR, USER1, "Device configuration failed\n");
282 		return -1;
283 	}
284 
285 	if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
286 			rte_socket_id()) < 0) {
287 		RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
288 		return -1;
289 	}
290 
291 	if (rte_compressdev_start(0) < 0) {
292 		RTE_LOG(ERR, USER1, "Device could not be started\n");
293 		return -1;
294 	}
295 
296 	return 0;
297 }
298 
299 static void
300 generic_ut_teardown(void)
301 {
302 	rte_compressdev_stop(0);
303 	if (rte_compressdev_close(0) < 0)
304 		RTE_LOG(ERR, USER1, "Device could not be closed\n");
305 }
306 
307 static int
308 test_compressdev_invalid_configuration(void)
309 {
310 	struct rte_compressdev_config invalid_config;
311 	struct rte_compressdev_config valid_config = {
312 		.socket_id = rte_socket_id(),
313 		.nb_queue_pairs = 1,
314 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
315 		.max_nb_streams = 1
316 	};
317 	struct rte_compressdev_info dev_info;
318 
319 	/* Invalid configuration with 0 queue pairs */
320 	memcpy(&invalid_config, &valid_config,
321 			sizeof(struct rte_compressdev_config));
322 	invalid_config.nb_queue_pairs = 0;
323 
324 	TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
325 			"Device configuration was successful "
326 			"with no queue pairs (invalid)\n");
327 
328 	/*
329 	 * Invalid configuration with too many queue pairs
330 	 * (if there is an actual maximum number of queue pairs)
331 	 */
332 	rte_compressdev_info_get(0, &dev_info);
333 	if (dev_info.max_nb_queue_pairs != 0) {
334 		memcpy(&invalid_config, &valid_config,
335 			sizeof(struct rte_compressdev_config));
336 		invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
337 
338 		TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
339 				"Device configuration was successful "
340 				"with too many queue pairs (invalid)\n");
341 	}
342 
343 	/* Invalid queue pair setup, with no number of queue pairs set */
344 	TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
345 				NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
346 			"Queue pair setup was successful "
347 			"with no queue pairs set (invalid)\n");
348 
349 	return TEST_SUCCESS;
350 }
351 
352 static int
353 compare_buffers(const char *buffer1, uint32_t buffer1_len,
354 		const char *buffer2, uint32_t buffer2_len)
355 {
356 	if (buffer1_len != buffer2_len) {
357 		RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
358 		return -1;
359 	}
360 
361 	if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
362 		RTE_LOG(ERR, USER1, "Buffers are different\n");
363 		return -1;
364 	}
365 
366 	return 0;
367 }
368 
369 /*
370  * Maps compressdev and Zlib flush flags
371  */
372 static int
373 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
374 {
375 	switch (flag) {
376 	case RTE_COMP_FLUSH_NONE:
377 		return Z_NO_FLUSH;
378 	case RTE_COMP_FLUSH_SYNC:
379 		return Z_SYNC_FLUSH;
380 	case RTE_COMP_FLUSH_FULL:
381 		return Z_FULL_FLUSH;
382 	case RTE_COMP_FLUSH_FINAL:
383 		return Z_FINISH;
384 	/*
385 	 * There should be only the values above,
386 	 * so this should never happen
387 	 */
388 	default:
389 		return -1;
390 	}
391 }
392 
393 static int
394 compress_zlib(struct rte_comp_op *op,
395 		const struct rte_comp_xform *xform, int mem_level)
396 {
397 	z_stream stream;
398 	int zlib_flush;
399 	int strategy, window_bits, comp_level;
400 	int ret = TEST_FAILED;
401 	uint8_t *single_src_buf = NULL;
402 	uint8_t *single_dst_buf = NULL;
403 
404 	/* initialize zlib stream */
405 	stream.zalloc = Z_NULL;
406 	stream.zfree = Z_NULL;
407 	stream.opaque = Z_NULL;
408 
409 	if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
410 		strategy = Z_FIXED;
411 	else
412 		strategy = Z_DEFAULT_STRATEGY;
413 
414 	/*
415 	 * Window bits is the base two logarithm of the window size (in bytes).
416 	 * When doing raw DEFLATE, this number will be negative.
417 	 */
418 	window_bits = -(xform->compress.window_size);
419 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
420 		window_bits *= -1;
421 	else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
422 		window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
423 
424 	comp_level = xform->compress.level;
425 
426 	if (comp_level != RTE_COMP_LEVEL_NONE)
427 		ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
428 			window_bits, mem_level, strategy);
429 	else
430 		ret = deflateInit(&stream, Z_NO_COMPRESSION);
431 
432 	if (ret != Z_OK) {
433 		printf("Zlib deflate could not be initialized\n");
434 		goto exit;
435 	}
436 
437 	/* Assuming stateless operation */
438 	/* SGL Input */
439 	if (op->m_src->nb_segs > 1) {
440 		single_src_buf = rte_malloc(NULL,
441 				rte_pktmbuf_pkt_len(op->m_src), 0);
442 		if (single_src_buf == NULL) {
443 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
444 			goto exit;
445 		}
446 
447 		if (rte_pktmbuf_read(op->m_src, op->src.offset,
448 					rte_pktmbuf_pkt_len(op->m_src) -
449 					op->src.offset,
450 					single_src_buf) == NULL) {
451 			RTE_LOG(ERR, USER1,
452 				"Buffer could not be read entirely\n");
453 			goto exit;
454 		}
455 
456 		stream.avail_in = op->src.length;
457 		stream.next_in = single_src_buf;
458 
459 	} else {
460 		stream.avail_in = op->src.length;
461 		stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
462 				op->src.offset);
463 	}
464 	/* SGL output */
465 	if (op->m_dst->nb_segs > 1) {
466 
467 		single_dst_buf = rte_malloc(NULL,
468 				rte_pktmbuf_pkt_len(op->m_dst), 0);
469 			if (single_dst_buf == NULL) {
470 				RTE_LOG(ERR, USER1,
471 					"Buffer could not be allocated\n");
472 			goto exit;
473 		}
474 
475 		stream.avail_out = op->m_dst->pkt_len;
476 		stream.next_out = single_dst_buf;
477 
478 	} else {/* linear output */
479 		stream.avail_out = op->m_dst->data_len;
480 		stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
481 				op->dst.offset);
482 	}
483 
484 	/* Stateless operation, all buffer will be compressed in one go */
485 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
486 	ret = deflate(&stream, zlib_flush);
487 
488 	if (stream.avail_in != 0) {
489 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
490 		goto exit;
491 	}
492 
493 	if (ret != Z_STREAM_END)
494 		goto exit;
495 
496 	/* Copy data to destination SGL */
497 	if (op->m_dst->nb_segs > 1) {
498 		uint32_t remaining_data = stream.total_out;
499 		uint8_t *src_data = single_dst_buf;
500 		struct rte_mbuf *dst_buf = op->m_dst;
501 
502 		while (remaining_data > 0) {
503 			uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
504 						uint8_t *, op->dst.offset);
505 			/* Last segment */
506 			if (remaining_data < dst_buf->data_len) {
507 				memcpy(dst_data, src_data, remaining_data);
508 				remaining_data = 0;
509 			} else {
510 				memcpy(dst_data, src_data, dst_buf->data_len);
511 				remaining_data -= dst_buf->data_len;
512 				src_data += dst_buf->data_len;
513 				dst_buf = dst_buf->next;
514 			}
515 		}
516 	}
517 
518 	op->consumed = stream.total_in;
519 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
520 		rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
521 		rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
522 		op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
523 				ZLIB_TRAILER_SIZE);
524 	} else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
525 		rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
526 		rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
527 		op->produced = stream.total_out - (GZIP_HEADER_SIZE +
528 				GZIP_TRAILER_SIZE);
529 	} else
530 		op->produced = stream.total_out;
531 
532 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
533 	op->output_chksum = stream.adler;
534 
535 	deflateReset(&stream);
536 
537 	ret = 0;
538 exit:
539 	deflateEnd(&stream);
540 	rte_free(single_src_buf);
541 	rte_free(single_dst_buf);
542 
543 	return ret;
544 }
545 
546 static int
547 decompress_zlib(struct rte_comp_op *op,
548 		const struct rte_comp_xform *xform)
549 {
550 	z_stream stream;
551 	int window_bits;
552 	int zlib_flush;
553 	int ret = TEST_FAILED;
554 	uint8_t *single_src_buf = NULL;
555 	uint8_t *single_dst_buf = NULL;
556 
557 	/* initialize zlib stream */
558 	stream.zalloc = Z_NULL;
559 	stream.zfree = Z_NULL;
560 	stream.opaque = Z_NULL;
561 
562 	/*
563 	 * Window bits is the base two logarithm of the window size (in bytes).
564 	 * When doing raw DEFLATE, this number will be negative.
565 	 */
566 	window_bits = -(xform->decompress.window_size);
567 	ret = inflateInit2(&stream, window_bits);
568 
569 	if (ret != Z_OK) {
570 		printf("Zlib deflate could not be initialized\n");
571 		goto exit;
572 	}
573 
574 	/* Assuming stateless operation */
575 	/* SGL */
576 	if (op->m_src->nb_segs > 1) {
577 		single_src_buf = rte_malloc(NULL,
578 				rte_pktmbuf_pkt_len(op->m_src), 0);
579 		if (single_src_buf == NULL) {
580 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
581 			goto exit;
582 		}
583 		single_dst_buf = rte_malloc(NULL,
584 				rte_pktmbuf_pkt_len(op->m_dst), 0);
585 		if (single_dst_buf == NULL) {
586 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
587 			goto exit;
588 		}
589 		if (rte_pktmbuf_read(op->m_src, 0,
590 					rte_pktmbuf_pkt_len(op->m_src),
591 					single_src_buf) == NULL) {
592 			RTE_LOG(ERR, USER1,
593 				"Buffer could not be read entirely\n");
594 			goto exit;
595 		}
596 
597 		stream.avail_in = op->src.length;
598 		stream.next_in = single_src_buf;
599 		stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
600 		stream.next_out = single_dst_buf;
601 
602 	} else {
603 		stream.avail_in = op->src.length;
604 		stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
605 		stream.avail_out = op->m_dst->data_len;
606 		stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
607 	}
608 
609 	/* Stateless operation, all buffer will be compressed in one go */
610 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
611 	ret = inflate(&stream, zlib_flush);
612 
613 	if (stream.avail_in != 0) {
614 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
615 		goto exit;
616 	}
617 
618 	if (ret != Z_STREAM_END)
619 		goto exit;
620 
621 	if (op->m_src->nb_segs > 1) {
622 		uint32_t remaining_data = stream.total_out;
623 		uint8_t *src_data = single_dst_buf;
624 		struct rte_mbuf *dst_buf = op->m_dst;
625 
626 		while (remaining_data > 0) {
627 			uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
628 					uint8_t *);
629 			/* Last segment */
630 			if (remaining_data < dst_buf->data_len) {
631 				memcpy(dst_data, src_data, remaining_data);
632 				remaining_data = 0;
633 			} else {
634 				memcpy(dst_data, src_data, dst_buf->data_len);
635 				remaining_data -= dst_buf->data_len;
636 				src_data += dst_buf->data_len;
637 				dst_buf = dst_buf->next;
638 			}
639 		}
640 	}
641 
642 	op->consumed = stream.total_in;
643 	op->produced = stream.total_out;
644 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
645 
646 	inflateReset(&stream);
647 
648 	ret = 0;
649 exit:
650 	inflateEnd(&stream);
651 
652 	return ret;
653 }
654 
655 static int
656 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
657 		uint32_t total_data_size,
658 		struct rte_mempool *small_mbuf_pool,
659 		struct rte_mempool *large_mbuf_pool,
660 		uint8_t limit_segs_in_sgl,
661 		uint16_t seg_size)
662 {
663 	uint32_t remaining_data = total_data_size;
664 	uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
665 	struct rte_mempool *pool;
666 	struct rte_mbuf *next_seg;
667 	uint32_t data_size;
668 	char *buf_ptr;
669 	const char *data_ptr = test_buf;
670 	uint16_t i;
671 	int ret;
672 
673 	if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
674 		num_remaining_segs = limit_segs_in_sgl - 1;
675 
676 	/*
677 	 * Allocate data in the first segment (header) and
678 	 * copy data if test buffer is provided
679 	 */
680 	if (remaining_data < seg_size)
681 		data_size = remaining_data;
682 	else
683 		data_size = seg_size;
684 
685 	buf_ptr = rte_pktmbuf_append(head_buf, data_size);
686 	if (buf_ptr == NULL) {
687 		RTE_LOG(ERR, USER1,
688 			"Not enough space in the 1st buffer\n");
689 		return -1;
690 	}
691 
692 	if (data_ptr != NULL) {
693 		/* Copy characters without NULL terminator */
694 		strncpy(buf_ptr, data_ptr, data_size);
695 		data_ptr += data_size;
696 	}
697 	remaining_data -= data_size;
698 	num_remaining_segs--;
699 
700 	/*
701 	 * Allocate the rest of the segments,
702 	 * copy the rest of the data and chain the segments.
703 	 */
704 	for (i = 0; i < num_remaining_segs; i++) {
705 
706 		if (i == (num_remaining_segs - 1)) {
707 			/* last segment */
708 			if (remaining_data > seg_size)
709 				pool = large_mbuf_pool;
710 			else
711 				pool = small_mbuf_pool;
712 			data_size = remaining_data;
713 		} else {
714 			data_size = seg_size;
715 			pool = small_mbuf_pool;
716 		}
717 
718 		next_seg = rte_pktmbuf_alloc(pool);
719 		if (next_seg == NULL) {
720 			RTE_LOG(ERR, USER1,
721 				"New segment could not be allocated "
722 				"from the mempool\n");
723 			return -1;
724 		}
725 		buf_ptr = rte_pktmbuf_append(next_seg, data_size);
726 		if (buf_ptr == NULL) {
727 			RTE_LOG(ERR, USER1,
728 				"Not enough space in the buffer\n");
729 			rte_pktmbuf_free(next_seg);
730 			return -1;
731 		}
732 		if (data_ptr != NULL) {
733 			/* Copy characters without NULL terminator */
734 			strncpy(buf_ptr, data_ptr, data_size);
735 			data_ptr += data_size;
736 		}
737 		remaining_data -= data_size;
738 
739 		ret = rte_pktmbuf_chain(head_buf, next_seg);
740 		if (ret != 0) {
741 			rte_pktmbuf_free(next_seg);
742 			RTE_LOG(ERR, USER1,
743 				"Segment could not chained\n");
744 			return -1;
745 		}
746 	}
747 
748 	return 0;
749 }
750 
751 static void
752 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
753 {
754 }
755 
756 static int
757 test_run_enqueue_dequeue(struct rte_comp_op **ops,
758 			 struct rte_comp_op **ops_processed,
759 			 unsigned int num_bufs)
760 {
761 	uint16_t num_enqd, num_deqd, num_total_deqd;
762 	unsigned int deqd_retries = 0;
763 
764 	/* Enqueue and dequeue all operations */
765 	num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
766 	if (num_enqd < num_bufs) {
767 		RTE_LOG(ERR, USER1,
768 			"Some operations could not be enqueued\n");
769 		return -1;
770 	}
771 
772 	num_total_deqd = 0;
773 	do {
774 		/*
775 		 * If retrying a dequeue call, wait for 10 ms to allow
776 		 * enough time to the driver to process the operations
777 		 */
778 		if (deqd_retries != 0) {
779 			/*
780 			 * Avoid infinite loop if not all the
781 			 * operations get out of the device
782 			 */
783 			if (deqd_retries == MAX_DEQD_RETRIES) {
784 				RTE_LOG(ERR, USER1,
785 					"Not all operations could be dequeued\n");
786 				return -1;
787 			}
788 			usleep(DEQUEUE_WAIT_TIME);
789 		}
790 		num_deqd = rte_compressdev_dequeue_burst(0, 0,
791 				&ops_processed[num_total_deqd], num_bufs);
792 		num_total_deqd += num_deqd;
793 		deqd_retries++;
794 
795 	} while (num_total_deqd < num_enqd);
796 
797 	return 0;
798 }
799 
800 /**
801  * Arrays initialization. Input buffers preparation for compression.
802  *
803  * API that initializes all the private arrays to NULL
804  * and allocates input buffers to perform compression operations.
805  *
806  * @param int_data
807  *   Interim data containing session/transformation objects.
808  * @param test_data
809  *   The test parameters set by users (command line parameters).
810  * @param test_priv_data
811  *   A container used for aggregation all the private test arrays.
812  * @return
813  *   - 0: On success.
814  *   - -1: On error.
815  */
816 static int
817 test_setup_com_bufs(const struct interim_data_params *int_data,
818 		const struct test_data_params *test_data,
819 		const struct test_private_arrays *test_priv_data)
820 {
821 	/* local variables: */
822 	unsigned int i;
823 	uint32_t data_size;
824 	char *buf_ptr;
825 	int ret;
826 	char **all_decomp_data = test_priv_data->all_decomp_data;
827 
828 	struct comp_testsuite_params *ts_params = &testsuite_params;
829 
830 	/* from int_data: */
831 	const char * const *test_bufs = int_data->test_bufs;
832 	unsigned int num_bufs = int_data->num_bufs;
833 
834 	/* from test_data: */
835 	unsigned int buff_type = test_data->buff_type;
836 	unsigned int big_data = test_data->big_data;
837 
838 	/* from test_priv_data: */
839 	struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
840 	struct rte_mempool *buf_pool;
841 
842 	static struct rte_mbuf_ext_shared_info inbuf_info;
843 
844 	size_t array_size = sizeof(void *) * num_bufs;
845 
846 	/* Initialize all arrays to NULL */
847 	memset(test_priv_data->uncomp_bufs, 0, array_size);
848 	memset(test_priv_data->comp_bufs, 0, array_size);
849 	memset(test_priv_data->ops, 0, array_size);
850 	memset(test_priv_data->ops_processed, 0, array_size);
851 	memset(test_priv_data->priv_xforms, 0, array_size);
852 	memset(test_priv_data->compressed_data_size,
853 	       0, sizeof(uint32_t) * num_bufs);
854 
855 	if (test_data->decompress_state == RTE_COMP_OP_STATEFUL) {
856 		data_size = strlen(test_bufs[0]) + 1;
857 		*all_decomp_data = rte_malloc(NULL, data_size,
858 					     RTE_CACHE_LINE_SIZE);
859 	}
860 
861 	if (big_data)
862 		buf_pool = ts_params->big_mbuf_pool;
863 	else if (buff_type == SGL_BOTH)
864 		buf_pool = ts_params->small_mbuf_pool;
865 	else
866 		buf_pool = ts_params->large_mbuf_pool;
867 
868 	/* for compression uncomp_bufs is used as a source buffer */
869 	/* allocation from buf_pool (mempool type) */
870 	ret = rte_pktmbuf_alloc_bulk(buf_pool,
871 				uncomp_bufs, num_bufs);
872 	if (ret < 0) {
873 		RTE_LOG(ERR, USER1,
874 			"Source mbufs could not be allocated "
875 			"from the mempool\n");
876 		return -1;
877 	}
878 
879 	if (test_data->use_external_mbufs) {
880 		inbuf_info.free_cb = extbuf_free_callback;
881 		inbuf_info.fcb_opaque = NULL;
882 		rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
883 		for (i = 0; i < num_bufs; i++) {
884 			rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
885 					test_data->inbuf_memzone->addr,
886 					test_data->inbuf_memzone->iova,
887 					test_data->inbuf_data_size,
888 					&inbuf_info);
889 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i],
890 					test_data->inbuf_data_size);
891 			if (buf_ptr == NULL) {
892 				RTE_LOG(ERR, USER1,
893 					"Append extra bytes to the source mbuf failed\n");
894 				return -1;
895 			}
896 		}
897 	} else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
898 		for (i = 0; i < num_bufs; i++) {
899 			data_size = strlen(test_bufs[i]) + 1;
900 			if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
901 			    data_size,
902 			    big_data ? buf_pool : ts_params->small_mbuf_pool,
903 			    big_data ? buf_pool : ts_params->large_mbuf_pool,
904 			    big_data ? 0 : MAX_SEGS,
905 			    big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
906 				return -1;
907 		}
908 	} else {
909 		for (i = 0; i < num_bufs; i++) {
910 			data_size = strlen(test_bufs[i]) + 1;
911 
912 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
913 			if (buf_ptr == NULL) {
914 				RTE_LOG(ERR, USER1,
915 					"Append extra bytes to the source mbuf failed\n");
916 				return -1;
917 			}
918 			strlcpy(buf_ptr, test_bufs[i], data_size);
919 		}
920 	}
921 
922 	return 0;
923 }
924 
925 /**
926  * Data size calculation (for both compression and decompression).
927  *
928  * Calculate size of anticipated output buffer required for both
929  * compression and decompression operations based on input int_data.
930  *
931  * @param op_type
932  *   Operation type: compress or decompress
933  * @param out_of_space_and_zlib
934  *   Boolean value to switch into "out of space" buffer if set.
935  *   To test "out-of-space" data size, zlib_decompress must be set as well.
936  * @param test_priv_data
937  *   A container used for aggregation all the private test arrays.
938  * @param int_data
939  *   Interim data containing session/transformation objects.
940  * @param test_data
941  *   The test parameters set by users (command line parameters).
942  * @param i
943  *   current buffer index
944  * @return
945  *   data size
946  */
947 static inline uint32_t
948 test_mbufs_calculate_data_size(
949 		enum operation_type op_type,
950 		unsigned int out_of_space_and_zlib,
951 		const struct test_private_arrays *test_priv_data,
952 		const struct interim_data_params *int_data,
953 		const struct test_data_params *test_data,
954 		unsigned int i)
955 {
956 	/* local variables: */
957 	uint32_t data_size;
958 	struct priv_op_data *priv_data;
959 	float ratio;
960 	uint8_t not_zlib_compr; /* true if zlib isn't current compression dev */
961 	enum overflow_test overflow = test_data->overflow;
962 
963 	/* from test_priv_data: */
964 	struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
965 
966 	/* from int_data: */
967 	const char * const *test_bufs = int_data->test_bufs;
968 
969 	if (out_of_space_and_zlib)
970 		data_size = OUT_OF_SPACE_BUF;
971 	else {
972 		if (op_type == OPERATION_COMPRESSION) {
973 			not_zlib_compr = (test_data->zlib_dir == ZLIB_DECOMPRESS
974 				|| test_data->zlib_dir == ZLIB_NONE);
975 
976 			ratio = (not_zlib_compr &&
977 				(overflow == OVERFLOW_ENABLED)) ?
978 				COMPRESS_BUF_SIZE_RATIO_OVERFLOW :
979 				COMPRESS_BUF_SIZE_RATIO;
980 
981 			data_size = strlen(test_bufs[i]) * ratio;
982 
983 		} else {
984 			priv_data = (struct priv_op_data *)
985 					(ops_processed[i] + 1);
986 			data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
987 		}
988 	}
989 
990 	return data_size;
991 }
992 
993 
994 /**
995  * Memory buffers preparation (for both compression and decompression).
996  *
997  * Function allocates output buffers to perform compression
998  * or decompression operations depending on value of op_type.
999  *
1000  * @param op_type
1001  *   Operation type: compress or decompress
1002  * @param out_of_space_and_zlib
1003  *   Boolean value to switch into "out of space" buffer if set.
1004  *   To test "out-of-space" data size, zlib_decompress must be set as well.
1005  * @param test_priv_data
1006  *   A container used for aggregation all the private test arrays.
1007  * @param int_data
1008  *   Interim data containing session/transformation objects.
1009  * @param test_data
1010  *   The test parameters set by users (command line parameters).
1011  * @param current_extbuf_info,
1012  *   The structure containing all the information related to external mbufs
1013  * @return
1014  *   - 0: On success.
1015  *   - -1: On error.
1016  */
1017 static int
1018 test_setup_output_bufs(
1019 		enum operation_type op_type,
1020 		unsigned int out_of_space_and_zlib,
1021 		const struct test_private_arrays *test_priv_data,
1022 		const struct interim_data_params *int_data,
1023 		const struct test_data_params *test_data,
1024 		struct rte_mbuf_ext_shared_info *current_extbuf_info)
1025 {
1026 	/* local variables: */
1027 	unsigned int i;
1028 	uint32_t data_size;
1029 	int ret;
1030 	char *buf_ptr;
1031 
1032 	/* from test_priv_data: */
1033 	struct rte_mbuf **current_bufs;
1034 
1035 	/* from int_data: */
1036 	unsigned int num_bufs = int_data->num_bufs;
1037 
1038 	/* from test_data: */
1039 	unsigned int buff_type = test_data->buff_type;
1040 	unsigned int big_data = test_data->big_data;
1041 	const struct rte_memzone *current_memzone;
1042 
1043 	struct comp_testsuite_params *ts_params = &testsuite_params;
1044 	struct rte_mempool *buf_pool;
1045 
1046 	if (big_data)
1047 		buf_pool = ts_params->big_mbuf_pool;
1048 	else if (buff_type == SGL_BOTH)
1049 		buf_pool = ts_params->small_mbuf_pool;
1050 	else
1051 		buf_pool = ts_params->large_mbuf_pool;
1052 
1053 	if (op_type == OPERATION_COMPRESSION)
1054 		current_bufs = test_priv_data->comp_bufs;
1055 	else
1056 		current_bufs = test_priv_data->uncomp_bufs;
1057 
1058 	/* the mbufs allocation*/
1059 	ret = rte_pktmbuf_alloc_bulk(buf_pool, current_bufs, num_bufs);
1060 	if (ret < 0) {
1061 		RTE_LOG(ERR, USER1,
1062 			"Destination mbufs could not be allocated "
1063 			"from the mempool\n");
1064 		return -1;
1065 	}
1066 
1067 	if (test_data->use_external_mbufs) {
1068 		current_extbuf_info->free_cb = extbuf_free_callback;
1069 		current_extbuf_info->fcb_opaque = NULL;
1070 		rte_mbuf_ext_refcnt_set(current_extbuf_info, 1);
1071 		if (op_type == OPERATION_COMPRESSION)
1072 			current_memzone = test_data->compbuf_memzone;
1073 		else
1074 			current_memzone = test_data->uncompbuf_memzone;
1075 
1076 		for (i = 0; i < num_bufs; i++) {
1077 			rte_pktmbuf_attach_extbuf(current_bufs[i],
1078 					current_memzone->addr,
1079 					current_memzone->iova,
1080 					current_memzone->len,
1081 					current_extbuf_info);
1082 			rte_pktmbuf_append(current_bufs[i],
1083 					current_memzone->len);
1084 		}
1085 	} else {
1086 		for (i = 0; i < num_bufs; i++) {
1087 
1088 			/* data size calculation */
1089 			data_size = test_mbufs_calculate_data_size(
1090 					op_type,
1091 					out_of_space_and_zlib,
1092 					test_priv_data,
1093 					int_data,
1094 					test_data,
1095 					i);
1096 
1097 			/* data allocation */
1098 			if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1099 				ret = prepare_sgl_bufs(NULL, current_bufs[i],
1100 				      data_size,
1101 				      big_data ? buf_pool :
1102 						ts_params->small_mbuf_pool,
1103 				      big_data ? buf_pool :
1104 						ts_params->large_mbuf_pool,
1105 				      big_data ? 0 : MAX_SEGS,
1106 				      big_data ? MAX_DATA_MBUF_SIZE :
1107 						 SMALL_SEG_SIZE);
1108 				if (ret < 0)
1109 					return -1;
1110 			} else {
1111 				buf_ptr = rte_pktmbuf_append(current_bufs[i],
1112 						data_size);
1113 				if (buf_ptr == NULL) {
1114 					RTE_LOG(ERR, USER1,
1115 						"Append extra bytes to the destination mbuf failed\n");
1116 					return -1;
1117 				}
1118 			}
1119 		}
1120 	}
1121 
1122 	return 0;
1123 }
1124 
1125 /**
1126  * The main compression function.
1127  *
1128  * Function performs compression operation.
1129  * Operation(s) configuration, depending on CLI parameters.
1130  * Operation(s) processing.
1131  *
1132  * @param int_data
1133  *   Interim data containing session/transformation objects.
1134  * @param test_data
1135  *   The test parameters set by users (command line parameters).
1136  * @param test_priv_data
1137  *   A container used for aggregation all the private test arrays.
1138  * @return
1139  *   - 0: On success.
1140  *   - -1: On error.
1141  */
1142 static int
1143 test_deflate_comp_run(const struct interim_data_params *int_data,
1144 		const struct test_data_params *test_data,
1145 		const struct test_private_arrays *test_priv_data)
1146 {
1147 	/* local variables: */
1148 	struct priv_op_data *priv_data;
1149 	unsigned int i;
1150 	uint16_t num_priv_xforms = 0;
1151 	int ret;
1152 	int ret_status = 0;
1153 	char *buf_ptr;
1154 
1155 	struct comp_testsuite_params *ts_params = &testsuite_params;
1156 
1157 	/* from test_data: */
1158 	enum rte_comp_op_type operation_type = test_data->compress_state;
1159 	unsigned int zlib_compress =
1160 			(test_data->zlib_dir == ZLIB_ALL ||
1161 			test_data->zlib_dir == ZLIB_COMPRESS);
1162 
1163 	/* from int_data: */
1164 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1165 	unsigned int num_xforms = int_data->num_xforms;
1166 	unsigned int num_bufs = int_data->num_bufs;
1167 
1168 	/* from test_priv_data: */
1169 	struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1170 	struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1171 	struct rte_comp_op **ops = test_priv_data->ops;
1172 	struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1173 	void **priv_xforms = test_priv_data->priv_xforms;
1174 
1175 	const struct rte_compressdev_capabilities *capa =
1176 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1177 
1178 	/* Build the compression operations */
1179 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1180 	if (ret < 0) {
1181 		RTE_LOG(ERR, USER1,
1182 			"Compress operations could not be allocated "
1183 			"from the mempool\n");
1184 		ret_status = -1;
1185 		goto exit;
1186 	}
1187 
1188 	for (i = 0; i < num_bufs; i++) {
1189 		ops[i]->m_src = uncomp_bufs[i];
1190 		ops[i]->m_dst = comp_bufs[i];
1191 		ops[i]->src.offset = 0;
1192 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
1193 		ops[i]->dst.offset = 0;
1194 
1195 		if (operation_type == RTE_COMP_OP_STATELESS) {
1196 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1197 		} else {
1198 			RTE_LOG(ERR, USER1,
1199 				"Compression: stateful operations are not "
1200 				"supported in these tests yet\n");
1201 			ret_status = -1;
1202 			goto exit;
1203 		}
1204 		ops[i]->input_chksum = 0;
1205 		/*
1206 		 * Store original operation index in private data,
1207 		 * since ordering does not have to be maintained,
1208 		 * when dequeueing from compressdev, so a comparison
1209 		 * at the end of the test can be done.
1210 		 */
1211 		priv_data = (struct priv_op_data *) (ops[i] + 1);
1212 		priv_data->orig_idx = i;
1213 	}
1214 
1215 	/* Compress data (either with Zlib API or compressdev API */
1216 	if (zlib_compress) {
1217 		for (i = 0; i < num_bufs; i++) {
1218 			const struct rte_comp_xform *compress_xform =
1219 				compress_xforms[i % num_xforms];
1220 			ret = compress_zlib(ops[i], compress_xform,
1221 					DEFAULT_MEM_LEVEL);
1222 			if (ret < 0) {
1223 				ret_status = -1;
1224 				goto exit;
1225 			}
1226 
1227 			ops_processed[i] = ops[i];
1228 		}
1229 	} else {
1230 		/* Create compress private xform data */
1231 		for (i = 0; i < num_xforms; i++) {
1232 			ret = rte_compressdev_private_xform_create(0,
1233 				(const struct rte_comp_xform *)
1234 					compress_xforms[i],
1235 				&priv_xforms[i]);
1236 			if (ret < 0) {
1237 				RTE_LOG(ERR, USER1,
1238 					"Compression private xform "
1239 					"could not be created\n");
1240 				ret_status = -1;
1241 				goto exit;
1242 			}
1243 			num_priv_xforms++;
1244 		}
1245 		if (capa->comp_feature_flags &
1246 				RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1247 			/* Attach shareable private xform data to ops */
1248 			for (i = 0; i < num_bufs; i++)
1249 				ops[i]->private_xform =
1250 						priv_xforms[i % num_xforms];
1251 		} else {
1252 		/* Create rest of the private xforms for the other ops */
1253 			for (i = num_xforms; i < num_bufs; i++) {
1254 				ret = rte_compressdev_private_xform_create(0,
1255 					compress_xforms[i % num_xforms],
1256 					&priv_xforms[i]);
1257 				if (ret < 0) {
1258 					RTE_LOG(ERR, USER1,
1259 						"Compression private xform "
1260 						"could not be created\n");
1261 					ret_status = -1;
1262 					goto exit;
1263 				}
1264 				num_priv_xforms++;
1265 			}
1266 			/* Attach non shareable private xform data to ops */
1267 			for (i = 0; i < num_bufs; i++)
1268 				ops[i]->private_xform = priv_xforms[i];
1269 		}
1270 
1271 recovery_lb:
1272 		ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
1273 		if (ret < 0) {
1274 			RTE_LOG(ERR, USER1,
1275 				"Compression: enqueue/dequeue operation failed\n");
1276 			ret_status = -1;
1277 			goto exit;
1278 		}
1279 
1280 		for (i = 0; i < num_bufs; i++) {
1281 			test_priv_data->compressed_data_size[i] +=
1282 					ops_processed[i]->produced;
1283 
1284 			if (ops_processed[i]->status ==
1285 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
1286 
1287 				ops[i]->status =
1288 					RTE_COMP_OP_STATUS_NOT_PROCESSED;
1289 				ops[i]->src.offset +=
1290 					ops_processed[i]->consumed;
1291 				ops[i]->src.length -=
1292 					ops_processed[i]->consumed;
1293 				ops[i]->dst.offset +=
1294 					ops_processed[i]->produced;
1295 
1296 				buf_ptr = rte_pktmbuf_append(
1297 					ops[i]->m_dst,
1298 					ops_processed[i]->produced);
1299 
1300 				if (buf_ptr == NULL) {
1301 					RTE_LOG(ERR, USER1,
1302 						"Data recovery: append extra bytes to the current mbuf failed\n");
1303 					ret_status = -1;
1304 					goto exit;
1305 				}
1306 				goto recovery_lb;
1307 			}
1308 		}
1309 	}
1310 
1311 exit:
1312 	/* Free resources */
1313 	if (ret_status < 0)
1314 		for (i = 0; i < num_bufs; i++) {
1315 			rte_comp_op_free(ops[i]);
1316 			ops_processed[i] = NULL;
1317 		}
1318 
1319 	/* Free compress private xforms */
1320 	for (i = 0; i < num_priv_xforms; i++) {
1321 		if (priv_xforms[i] != NULL) {
1322 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1323 			priv_xforms[i] = NULL;
1324 		}
1325 	}
1326 
1327 	return ret_status;
1328 }
1329 
1330 /**
1331  * Prints out the test report. Memory freeing.
1332  *
1333  * Called after successful compression.
1334  * Operation(s) status validation and decompression buffers freeing.
1335 
1336  * -1 returned if function fail.
1337  *
1338  * @param int_data
1339  *   Interim data containing session/transformation objects.
1340  * @param test_data
1341  *   The test parameters set by users (command line parameters).
1342  * @param test_priv_data
1343  *   A container used for aggregation all the private test arrays.
1344  * @return
1345  *   - 2: Some operation is not supported
1346  *   - 1: Decompression should be skipped
1347  *   - 0: On success.
1348  *   - -1: On error.
1349  */
1350 static int
1351 test_deflate_comp_finalize(const struct interim_data_params *int_data,
1352 		const struct test_data_params *test_data,
1353 		const struct test_private_arrays *test_priv_data)
1354 {
1355 	/* local variables: */
1356 	unsigned int i;
1357 	struct priv_op_data *priv_data;
1358 
1359 	/* from int_data: */
1360 	unsigned int num_xforms = int_data->num_xforms;
1361 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1362 	uint16_t *buf_idx = int_data->buf_idx;
1363 	unsigned int num_bufs = int_data->num_bufs;
1364 
1365 	/* from test_priv_data: */
1366 	struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1367 	uint64_t *compress_checksum = test_priv_data->compress_checksum;
1368 	struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1369 	struct rte_comp_op **ops = test_priv_data->ops;
1370 
1371 	/* from test_data: */
1372 	unsigned int out_of_space = test_data->out_of_space;
1373 	unsigned int zlib_compress =
1374 			(test_data->zlib_dir == ZLIB_ALL ||
1375 			test_data->zlib_dir == ZLIB_COMPRESS);
1376 	unsigned int zlib_decompress =
1377 			(test_data->zlib_dir == ZLIB_ALL ||
1378 			test_data->zlib_dir == ZLIB_DECOMPRESS);
1379 
1380 	for (i = 0; i < num_bufs; i++) {
1381 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1382 		uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1383 		const struct rte_comp_compress_xform *compress_xform =
1384 				&compress_xforms[xform_idx]->compress;
1385 		enum rte_comp_huffman huffman_type =
1386 			compress_xform->deflate.huffman;
1387 		char engine[] = "zlib (directly, not PMD)";
1388 		if (zlib_decompress)
1389 			strlcpy(engine, "PMD", sizeof(engine));
1390 
1391 		RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1392 			" %u bytes (level = %d, huffman = %s)\n",
1393 			buf_idx[priv_data->orig_idx], engine,
1394 			ops_processed[i]->consumed, ops_processed[i]->produced,
1395 			compress_xform->level,
1396 			huffman_type_strings[huffman_type]);
1397 		RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1398 			ops_processed[i]->consumed == 0 ? 0 :
1399 			(float)ops_processed[i]->produced /
1400 			ops_processed[i]->consumed * 100);
1401 		if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1402 			compress_checksum[i] = ops_processed[i]->output_chksum;
1403 		ops[i] = NULL;
1404 	}
1405 
1406 	/*
1407 	 * Check operation status and free source mbufs (destination mbuf and
1408 	 * compress operation information is needed for the decompression stage)
1409 	 */
1410 	for (i = 0; i < num_bufs; i++) {
1411 		if (out_of_space && !zlib_compress) {
1412 			if (ops_processed[i]->status !=
1413 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1414 				RTE_LOG(ERR, USER1,
1415 					"Operation without expected out of "
1416 					"space status error\n");
1417 				return -1;
1418 			} else
1419 				continue;
1420 		}
1421 
1422 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1423 			if (test_data->overflow == OVERFLOW_ENABLED) {
1424 				if (ops_processed[i]->status ==
1425 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1426 					RTE_LOG(INFO, USER1,
1427 					"Out-of-space-recoverable functionality"
1428 					" is not supported on this device\n");
1429 					return 2;
1430 				}
1431 			}
1432 
1433 			RTE_LOG(ERR, USER1,
1434 				"Some operations were not successful\n");
1435 			return -1;
1436 		}
1437 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1438 		rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1439 		uncomp_bufs[priv_data->orig_idx] = NULL;
1440 	}
1441 
1442 	if (out_of_space && !zlib_compress)
1443 		return 1;
1444 
1445 	return 0;
1446 }
1447 
1448 /**
1449  * The main decompression function.
1450  *
1451  * Function performs decompression operation.
1452  * Operation(s) configuration, depending on CLI parameters.
1453  * Operation(s) processing.
1454  *
1455  * @param int_data
1456  *   Interim data containing session/transformation objects.
1457  * @param test_data
1458  *   The test parameters set by users (command line parameters).
1459  * @param test_priv_data
1460  *   A container used for aggregation all the private test arrays.
1461  * @return
1462  *   - 0: On success.
1463  *   - -1: On error.
1464  */
1465 static int
1466 test_deflate_decomp_run(const struct interim_data_params *int_data,
1467 		const struct test_data_params *test_data,
1468 		struct test_private_arrays *test_priv_data)
1469 {
1470 
1471 	/* local variables: */
1472 	struct priv_op_data *priv_data;
1473 	unsigned int i;
1474 	uint16_t num_priv_xforms = 0;
1475 	int ret;
1476 	int ret_status = 0;
1477 
1478 	struct comp_testsuite_params *ts_params = &testsuite_params;
1479 
1480 	/* from test_data: */
1481 	enum rte_comp_op_type operation_type = test_data->decompress_state;
1482 	unsigned int zlib_decompress =
1483 			(test_data->zlib_dir == ZLIB_ALL ||
1484 			test_data->zlib_dir == ZLIB_DECOMPRESS);
1485 
1486 	/* from int_data: */
1487 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
1488 	unsigned int num_xforms = int_data->num_xforms;
1489 	unsigned int num_bufs = int_data->num_bufs;
1490 
1491 	/* from test_priv_data: */
1492 	struct rte_mbuf **uncomp_bufs = test_priv_data->uncomp_bufs;
1493 	struct rte_comp_op **ops = test_priv_data->ops;
1494 	struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1495 	void **priv_xforms = test_priv_data->priv_xforms;
1496 	uint32_t *compressed_data_size = test_priv_data->compressed_data_size;
1497 	void **stream = test_priv_data->stream;
1498 
1499 	const struct rte_compressdev_capabilities *capa =
1500 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1501 
1502 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1503 	if (ret < 0) {
1504 		RTE_LOG(ERR, USER1,
1505 			"Decompress operations could not be allocated "
1506 			"from the mempool\n");
1507 		ret_status = -1;
1508 		goto exit;
1509 	}
1510 
1511 	/* Source buffer is the compressed data from the previous operations */
1512 	for (i = 0; i < num_bufs; i++) {
1513 		ops[i]->m_src = ops_processed[i]->m_dst;
1514 		ops[i]->m_dst = uncomp_bufs[i];
1515 		ops[i]->src.offset = 0;
1516 		/*
1517 		 * Set the length of the compressed data to the
1518 		 * number of bytes that were produced in the previous stage
1519 		 */
1520 
1521 		if (compressed_data_size[i])
1522 			ops[i]->src.length = compressed_data_size[i];
1523 		else
1524 			ops[i]->src.length = ops_processed[i]->produced;
1525 
1526 		ops[i]->dst.offset = 0;
1527 
1528 		if (operation_type == RTE_COMP_OP_STATELESS) {
1529 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1530 			ops[i]->op_type = RTE_COMP_OP_STATELESS;
1531 		} else if (!zlib_decompress) {
1532 			ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1533 			ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1534 		} else {
1535 			RTE_LOG(ERR, USER1,
1536 				"Decompression: stateful operations are"
1537 				" not supported in these tests yet\n");
1538 			ret_status = -1;
1539 			goto exit;
1540 		}
1541 		ops[i]->input_chksum = 0;
1542 		/*
1543 		 * Copy private data from previous operations,
1544 		 * to keep the pointer to the original buffer
1545 		 */
1546 		memcpy(ops[i] + 1, ops_processed[i] + 1,
1547 				sizeof(struct priv_op_data));
1548 	}
1549 
1550 	/*
1551 	 * Free the previous compress operations,
1552 	 * as they are not needed anymore
1553 	 */
1554 	rte_comp_op_bulk_free(ops_processed, num_bufs);
1555 
1556 	/* Decompress data (either with Zlib API or compressdev API */
1557 	if (zlib_decompress) {
1558 		for (i = 0; i < num_bufs; i++) {
1559 			priv_data = (struct priv_op_data *)(ops[i] + 1);
1560 			uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1561 			const struct rte_comp_xform *decompress_xform =
1562 				decompress_xforms[xform_idx];
1563 
1564 			ret = decompress_zlib(ops[i], decompress_xform);
1565 			if (ret < 0) {
1566 				ret_status = -1;
1567 				goto exit;
1568 			}
1569 
1570 			ops_processed[i] = ops[i];
1571 		}
1572 	} else {
1573 		if (operation_type == RTE_COMP_OP_STATELESS) {
1574 			/* Create decompress private xform data */
1575 			for (i = 0; i < num_xforms; i++) {
1576 				ret = rte_compressdev_private_xform_create(0,
1577 					(const struct rte_comp_xform *)
1578 					decompress_xforms[i],
1579 					&priv_xforms[i]);
1580 				if (ret < 0) {
1581 					RTE_LOG(ERR, USER1,
1582 						"Decompression private xform "
1583 						"could not be created\n");
1584 					ret_status = -1;
1585 					goto exit;
1586 				}
1587 				num_priv_xforms++;
1588 			}
1589 
1590 			if (capa->comp_feature_flags &
1591 					RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1592 				/* Attach shareable private xform data to ops */
1593 				for (i = 0; i < num_bufs; i++) {
1594 					priv_data = (struct priv_op_data *)
1595 							(ops[i] + 1);
1596 					uint16_t xform_idx =
1597 					       priv_data->orig_idx % num_xforms;
1598 					ops[i]->private_xform =
1599 							priv_xforms[xform_idx];
1600 				}
1601 			} else {
1602 				/* Create rest of the private xforms */
1603 				/* for the other ops */
1604 				for (i = num_xforms; i < num_bufs; i++) {
1605 					ret =
1606 					 rte_compressdev_private_xform_create(0,
1607 					      decompress_xforms[i % num_xforms],
1608 					      &priv_xforms[i]);
1609 					if (ret < 0) {
1610 						RTE_LOG(ERR, USER1,
1611 							"Decompression private xform"
1612 							" could not be created\n");
1613 						ret_status = -1;
1614 						goto exit;
1615 					}
1616 					num_priv_xforms++;
1617 				}
1618 
1619 				/* Attach non shareable private xform data */
1620 				/* to ops */
1621 				for (i = 0; i < num_bufs; i++) {
1622 					priv_data = (struct priv_op_data *)
1623 							(ops[i] + 1);
1624 					uint16_t xform_idx =
1625 							priv_data->orig_idx;
1626 					ops[i]->private_xform =
1627 							priv_xforms[xform_idx];
1628 				}
1629 			}
1630 		} else {
1631 			/* Create a stream object for stateful decompression */
1632 			ret = rte_compressdev_stream_create(0,
1633 					decompress_xforms[0], stream);
1634 			if (ret < 0) {
1635 				RTE_LOG(ERR, USER1,
1636 					"Decompression stream could not be created, error %d\n",
1637 					ret);
1638 				ret_status = -1;
1639 				goto exit;
1640 			}
1641 			/* Attach stream to ops */
1642 			for (i = 0; i < num_bufs; i++)
1643 				ops[i]->stream = *stream;
1644 		}
1645 
1646 		test_priv_data->num_priv_xforms = num_priv_xforms;
1647 	}
1648 
1649 exit:
1650 	return ret_status;
1651 }
1652 
1653 /**
1654  * Prints out the test report. Memory freeing.
1655  *
1656  * Called after successful decompression.
1657  * Operation(s) status validation and compression buffers freeing.
1658 
1659  * -1 returned if function fail.
1660  *
1661  * @param int_data
1662  *   Interim data containing session/transformation objects.
1663  * @param test_data
1664  *   The test parameters set by users (command line parameters).
1665  * @param test_priv_data
1666  *   A container used for aggregation all the private test arrays.
1667  * @return
1668  *   - 2: Next step must be executed by the caller (stateful decompression only)
1669  *   - 1: On success (caller should stop and exit)
1670  *   - 0: On success.
1671  *   - -1: On error.
1672  */
1673 static int
1674 test_deflate_decomp_finalize(const struct interim_data_params *int_data,
1675 		const struct test_data_params *test_data,
1676 		const struct test_private_arrays *test_priv_data)
1677 {
1678 	/* local variables: */
1679 	unsigned int i;
1680 	struct priv_op_data *priv_data;
1681 	static unsigned int step;
1682 
1683 	/* from int_data: */
1684 	uint16_t *buf_idx = int_data->buf_idx;
1685 	unsigned int num_bufs = int_data->num_bufs;
1686 	const char * const *test_bufs = int_data->test_bufs;
1687 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1688 
1689 	/* from test_priv_data: */
1690 	struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1691 	struct rte_mbuf **comp_bufs = test_priv_data->comp_bufs;
1692 	struct rte_comp_op **ops = test_priv_data->ops;
1693 	uint64_t *compress_checksum = test_priv_data->compress_checksum;
1694 	unsigned int *decomp_produced_data_size =
1695 			test_priv_data->decomp_produced_data_size;
1696 	char **all_decomp_data = test_priv_data->all_decomp_data;
1697 
1698 	/* from test_data: */
1699 	unsigned int out_of_space = test_data->out_of_space;
1700 	enum rte_comp_op_type operation_type = test_data->decompress_state;
1701 
1702 	unsigned int zlib_compress =
1703 			(test_data->zlib_dir == ZLIB_ALL ||
1704 			test_data->zlib_dir == ZLIB_COMPRESS);
1705 	unsigned int zlib_decompress =
1706 			(test_data->zlib_dir == ZLIB_ALL ||
1707 			test_data->zlib_dir == ZLIB_DECOMPRESS);
1708 
1709 	for (i = 0; i < num_bufs; i++) {
1710 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1711 		char engine[] = "zlib, (directly, no PMD)";
1712 		if (zlib_compress)
1713 			strlcpy(engine, "pmd", sizeof(engine));
1714 		RTE_LOG(DEBUG, USER1,
1715 			"Buffer %u decompressed by %s from %u to %u bytes\n",
1716 			buf_idx[priv_data->orig_idx], engine,
1717 			ops_processed[i]->consumed, ops_processed[i]->produced);
1718 		ops[i] = NULL;
1719 	}
1720 
1721 	/*
1722 	 * Check operation status and free source mbuf (destination mbuf and
1723 	 * compress operation information is still needed)
1724 	 */
1725 	for (i = 0; i < num_bufs; i++) {
1726 		if (out_of_space && !zlib_decompress) {
1727 			if (ops_processed[i]->status !=
1728 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1729 
1730 				RTE_LOG(ERR, USER1,
1731 					"Operation without expected out of "
1732 					"space status error\n");
1733 				return -1;
1734 			} else
1735 				continue;
1736 		}
1737 
1738 		if (operation_type == RTE_COMP_OP_STATEFUL
1739 			&& (ops_processed[i]->status ==
1740 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1741 			    || ops_processed[i]->status ==
1742 				RTE_COMP_OP_STATUS_SUCCESS)) {
1743 			/* collect the output into all_decomp_data */
1744 			const void *ptr = rte_pktmbuf_read(
1745 					ops_processed[i]->m_dst,
1746 					ops_processed[i]->dst.offset,
1747 					ops_processed[i]->produced,
1748 					*all_decomp_data +
1749 						*decomp_produced_data_size);
1750 			if (ptr != *all_decomp_data +
1751 					*decomp_produced_data_size)
1752 				rte_memcpy(*all_decomp_data +
1753 					   *decomp_produced_data_size,
1754 					   ptr, ops_processed[i]->produced);
1755 
1756 			*decomp_produced_data_size +=
1757 					ops_processed[i]->produced;
1758 			if (ops_processed[i]->src.length >
1759 					ops_processed[i]->consumed) {
1760 				if (ops_processed[i]->status ==
1761 						RTE_COMP_OP_STATUS_SUCCESS) {
1762 					RTE_LOG(ERR, USER1,
1763 					      "Operation finished too early\n");
1764 					return -1;
1765 				}
1766 				step++;
1767 				if (step >= test_data->decompress_steps_max) {
1768 					RTE_LOG(ERR, USER1,
1769 					  "Operation exceeded maximum steps\n");
1770 					return -1;
1771 				}
1772 				ops[i] = ops_processed[i];
1773 				ops[i]->status =
1774 					       RTE_COMP_OP_STATUS_NOT_PROCESSED;
1775 				ops[i]->src.offset +=
1776 						ops_processed[i]->consumed;
1777 				ops[i]->src.length -=
1778 						ops_processed[i]->consumed;
1779 				/* repeat the operation */
1780 				//goto next_step;
1781 				return 2;
1782 			} else {
1783 				/* Compare the original stream with the */
1784 				/* decompressed stream (in size and the data) */
1785 				priv_data = (struct priv_op_data *)
1786 						(ops_processed[i] + 1);
1787 				const char *buf1 =
1788 						test_bufs[priv_data->orig_idx];
1789 				const char *buf2 = *all_decomp_data;
1790 
1791 				if (compare_buffers(buf1, strlen(buf1) + 1,
1792 					  buf2, *decomp_produced_data_size) < 0)
1793 					return -1;
1794 				/* Test checksums */
1795 				if (compress_xforms[0]->compress.chksum
1796 						!= RTE_COMP_CHECKSUM_NONE) {
1797 					if (ops_processed[i]->output_chksum
1798 						      != compress_checksum[i]) {
1799 						RTE_LOG(ERR, USER1,
1800 			"The checksums differ\n"
1801 			"Compression Checksum: %" PRIu64 "\tDecompression "
1802 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1803 					       ops_processed[i]->output_chksum);
1804 						return -1;
1805 					}
1806 				}
1807 			}
1808 		} else if (ops_processed[i]->status !=
1809 			   RTE_COMP_OP_STATUS_SUCCESS) {
1810 			RTE_LOG(ERR, USER1,
1811 				"Some operations were not successful\n");
1812 			return -1;
1813 		}
1814 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1815 		rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1816 		comp_bufs[priv_data->orig_idx] = NULL;
1817 	}
1818 
1819 	if (out_of_space && !zlib_decompress)
1820 		return 1;
1821 
1822 	return 0;
1823 }
1824 
1825 /**
1826  * Validation of the output (compression/decompression) data.
1827  *
1828  * The function compares the source stream with the output stream,
1829  * after decompression, to check if compression/decompression
1830  * was correct.
1831  * -1 returned if function fail.
1832  *
1833  * @param int_data
1834  *   Interim data containing session/transformation objects.
1835  * @param test_data
1836  *   The test parameters set by users (command line parameters).
1837  * @param test_priv_data
1838  *   A container used for aggregation all the private test arrays.
1839  * @return
1840  *   - 0: On success.
1841  *   - -1: On error.
1842  */
1843 static int
1844 test_results_validation(const struct interim_data_params *int_data,
1845 		const struct test_data_params *test_data,
1846 		const struct test_private_arrays *test_priv_data)
1847 {
1848 	/* local variables: */
1849 	unsigned int i;
1850 	struct priv_op_data *priv_data;
1851 	const char *buf1;
1852 	const char *buf2;
1853 	char *contig_buf = NULL;
1854 	uint32_t data_size;
1855 
1856 	/* from int_data: */
1857 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
1858 	unsigned int num_bufs = int_data->num_bufs;
1859 	const char * const *test_bufs = int_data->test_bufs;
1860 
1861 	/* from test_priv_data: */
1862 	uint64_t *compress_checksum = test_priv_data->compress_checksum;
1863 	struct rte_comp_op **ops_processed = test_priv_data->ops_processed;
1864 
1865 	/*
1866 	 * Compare the original stream with the decompressed stream
1867 	 * (in size and the data)
1868 	 */
1869 	for (i = 0; i < num_bufs; i++) {
1870 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1871 		buf1 = test_data->use_external_mbufs ?
1872 				test_data->inbuf_memzone->addr :
1873 				test_bufs[priv_data->orig_idx];
1874 		data_size = test_data->use_external_mbufs ?
1875 				test_data->inbuf_data_size :
1876 				strlen(buf1) + 1;
1877 
1878 		contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1879 		if (contig_buf == NULL) {
1880 			RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1881 					"be allocated\n");
1882 			goto exit;
1883 		}
1884 
1885 		buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1886 				ops_processed[i]->produced, contig_buf);
1887 		if (compare_buffers(buf1, data_size,
1888 				buf2, ops_processed[i]->produced) < 0)
1889 			goto exit;
1890 
1891 		/* Test checksums */
1892 		if (compress_xforms[0]->compress.chksum !=
1893 				RTE_COMP_CHECKSUM_NONE) {
1894 			if (ops_processed[i]->output_chksum !=
1895 					compress_checksum[i]) {
1896 				RTE_LOG(ERR, USER1, "The checksums differ\n"
1897 			"Compression Checksum: %" PRIu64 "\tDecompression "
1898 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1899 			ops_processed[i]->output_chksum);
1900 				goto exit;
1901 			}
1902 		}
1903 
1904 		rte_free(contig_buf);
1905 		contig_buf = NULL;
1906 	}
1907 	return 0;
1908 
1909 exit:
1910 	rte_free(contig_buf);
1911 	return -1;
1912 }
1913 
1914 /**
1915  * Compresses and decompresses input stream with compressdev API and Zlib API
1916  *
1917  * Basic test function. Common for all the functional tests.
1918  * -1 returned if function fail.
1919  *
1920  * @param int_data
1921  *   Interim data containing session/transformation objects.
1922  * @param test_data
1923  *   The test parameters set by users (command line parameters).
1924  * @return
1925  *   - 1: Some operation not supported
1926  *   - 0: On success.
1927  *   - -1: On error.
1928  */
1929 
1930 static int
1931 test_deflate_comp_decomp(const struct interim_data_params *int_data,
1932 		const struct test_data_params *test_data)
1933 {
1934 	unsigned int num_bufs = int_data->num_bufs;
1935 	unsigned int out_of_space = test_data->out_of_space;
1936 
1937 	void *stream = NULL;
1938 	char *all_decomp_data = NULL;
1939 	unsigned int decomp_produced_data_size = 0;
1940 
1941 	int ret_status = -1;
1942 	int ret;
1943 	struct rte_mbuf *uncomp_bufs[num_bufs];
1944 	struct rte_mbuf *comp_bufs[num_bufs];
1945 	struct rte_comp_op *ops[num_bufs];
1946 	struct rte_comp_op *ops_processed[num_bufs];
1947 	void *priv_xforms[num_bufs];
1948 	unsigned int i;
1949 
1950 	uint64_t compress_checksum[num_bufs];
1951 	uint32_t compressed_data_size[num_bufs];
1952 	char *contig_buf = NULL;
1953 
1954 	struct rte_mbuf_ext_shared_info compbuf_info;
1955 	struct rte_mbuf_ext_shared_info decompbuf_info;
1956 
1957 	const struct rte_compressdev_capabilities *capa;
1958 
1959 	/* Compressing with CompressDev */
1960 	unsigned int zlib_compress =
1961 			(test_data->zlib_dir == ZLIB_ALL ||
1962 			test_data->zlib_dir == ZLIB_COMPRESS);
1963 	unsigned int zlib_decompress =
1964 			(test_data->zlib_dir == ZLIB_ALL ||
1965 			test_data->zlib_dir == ZLIB_DECOMPRESS);
1966 
1967 	struct test_private_arrays test_priv_data;
1968 
1969 	test_priv_data.uncomp_bufs = uncomp_bufs;
1970 	test_priv_data.comp_bufs = comp_bufs;
1971 	test_priv_data.ops = ops;
1972 	test_priv_data.ops_processed = ops_processed;
1973 	test_priv_data.priv_xforms = priv_xforms;
1974 	test_priv_data.compress_checksum = compress_checksum;
1975 	test_priv_data.compressed_data_size = compressed_data_size;
1976 
1977 	test_priv_data.stream = &stream;
1978 	test_priv_data.all_decomp_data = &all_decomp_data;
1979 	test_priv_data.decomp_produced_data_size = &decomp_produced_data_size;
1980 
1981 	test_priv_data.num_priv_xforms = 0; /* it's used for deompression only */
1982 
1983 	capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1984 	if (capa == NULL) {
1985 		RTE_LOG(ERR, USER1,
1986 			"Compress device does not support DEFLATE\n");
1987 		return -1;
1988 	}
1989 	//test_objects_init(&test_priv_data, num_bufs);
1990 
1991 	/* Prepare the source mbufs with the data */
1992 	ret = test_setup_com_bufs(int_data, test_data, &test_priv_data);
1993 	if (ret < 0) {
1994 		ret_status = -1;
1995 		goto exit;
1996 	}
1997 
1998 /* COMPRESSION  */
1999 
2000 	/* Prepare output (destination) mbufs for compressed data */
2001 	ret = test_setup_output_bufs(
2002 			OPERATION_COMPRESSION,
2003 			out_of_space == 1 && !zlib_compress,
2004 			&test_priv_data,
2005 			int_data,
2006 			test_data,
2007 			&compbuf_info);
2008 	if (ret < 0) {
2009 		ret_status = -1;
2010 		goto exit;
2011 	}
2012 
2013 	/* Run compression */
2014 	ret = test_deflate_comp_run(int_data, test_data, &test_priv_data);
2015 	if (ret < 0) {
2016 		ret_status = -1;
2017 		goto exit;
2018 	}
2019 
2020 	ret = test_deflate_comp_finalize(int_data, test_data, &test_priv_data);
2021 	if (ret < 0) {
2022 		ret_status = -1;
2023 		goto exit;
2024 	} else if (ret == 1) {
2025 		ret_status = 0;
2026 		goto exit;
2027 	} else if (ret == 2) {
2028 		ret_status = 1;	 /* some operation not supported */
2029 		goto exit;
2030 	}
2031 
2032 /* DECOMPRESSION  */
2033 
2034 	/* Prepare output (destination) mbufs for decompressed data */
2035 	ret = test_setup_output_bufs(
2036 			OPERATION_DECOMPRESSION,
2037 			out_of_space == 1 && !zlib_decompress,
2038 			&test_priv_data,
2039 			int_data,
2040 			test_data,
2041 			&decompbuf_info);
2042 	if (ret < 0) {
2043 		ret_status = -1;
2044 		goto exit;
2045 	}
2046 
2047 	/* Run decompression */
2048 	ret = test_deflate_decomp_run(int_data, test_data, &test_priv_data);
2049 	if (ret < 0) {
2050 		ret_status = -1;
2051 		goto exit;
2052 	}
2053 
2054 	if (!zlib_decompress) {
2055 next_step:	/* next step for stateful decompression only */
2056 		ret = test_run_enqueue_dequeue(ops, ops_processed, num_bufs);
2057 		if (ret < 0) {
2058 			ret_status = -1;
2059 			RTE_LOG(ERR, USER1,
2060 				"Decompression: enqueue/dequeue operation failed\n");
2061 		}
2062 	}
2063 
2064 	ret = test_deflate_decomp_finalize(int_data, test_data, &test_priv_data);
2065 	if (ret < 0) {
2066 		ret_status = -1;
2067 		goto exit;
2068 	} else if (ret == 1) {
2069 		ret_status = 0;
2070 		goto exit;
2071 	} else if (ret == 2) {
2072 		goto next_step;
2073 	}
2074 
2075 /* FINAL PROCESSING  */
2076 
2077 	ret = test_results_validation(int_data, test_data, &test_priv_data);
2078 	if (ret < 0) {
2079 		ret_status = -1;
2080 		goto exit;
2081 	}
2082 	ret_status = 0;
2083 
2084 exit:
2085 	/* Free resources */
2086 
2087 	if (stream != NULL)
2088 		rte_compressdev_stream_free(0, stream);
2089 	if (all_decomp_data != NULL)
2090 		rte_free(all_decomp_data);
2091 
2092 	/* Free compress private xforms */
2093 	for (i = 0; i < test_priv_data.num_priv_xforms; i++) {
2094 		if (priv_xforms[i] != NULL) {
2095 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
2096 			priv_xforms[i] = NULL;
2097 		}
2098 	}
2099 
2100 	for (i = 0; i < num_bufs; i++) {
2101 		rte_pktmbuf_free(uncomp_bufs[i]);
2102 		rte_pktmbuf_free(comp_bufs[i]);
2103 		rte_comp_op_free(ops[i]);
2104 		rte_comp_op_free(ops_processed[i]);
2105 	}
2106 	rte_free(contig_buf);
2107 
2108 	return ret_status;
2109 }
2110 
2111 static int
2112 test_compressdev_deflate_stateless_fixed(void)
2113 {
2114 	struct comp_testsuite_params *ts_params = &testsuite_params;
2115 	uint16_t i;
2116 	int ret;
2117 	const struct rte_compressdev_capabilities *capab;
2118 
2119 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2120 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2121 
2122 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2123 		return -ENOTSUP;
2124 
2125 	struct rte_comp_xform *compress_xform =
2126 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2127 
2128 	if (compress_xform == NULL) {
2129 		RTE_LOG(ERR, USER1,
2130 			"Compress xform could not be created\n");
2131 		ret = TEST_FAILED;
2132 		goto exit;
2133 	}
2134 
2135 	memcpy(compress_xform, ts_params->def_comp_xform,
2136 			sizeof(struct rte_comp_xform));
2137 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
2138 
2139 	struct interim_data_params int_data = {
2140 		NULL,
2141 		1,
2142 		NULL,
2143 		&compress_xform,
2144 		&ts_params->def_decomp_xform,
2145 		1
2146 	};
2147 
2148 	struct test_data_params test_data = {
2149 		.compress_state = RTE_COMP_OP_STATELESS,
2150 		.decompress_state = RTE_COMP_OP_STATELESS,
2151 		.buff_type = LB_BOTH,
2152 		.zlib_dir = ZLIB_DECOMPRESS,
2153 		.out_of_space = 0,
2154 		.big_data = 0,
2155 		.overflow = OVERFLOW_DISABLED
2156 	};
2157 
2158 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2159 		int_data.test_bufs = &compress_test_bufs[i];
2160 		int_data.buf_idx = &i;
2161 
2162 		/* Compress with compressdev, decompress with Zlib */
2163 		test_data.zlib_dir = ZLIB_DECOMPRESS;
2164 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2165 		if (ret < 0)
2166 			goto exit;
2167 
2168 		/* Compress with Zlib, decompress with compressdev */
2169 		test_data.zlib_dir = ZLIB_COMPRESS;
2170 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2171 		if (ret < 0)
2172 			goto exit;
2173 	}
2174 
2175 	ret = TEST_SUCCESS;
2176 
2177 exit:
2178 	rte_free(compress_xform);
2179 	return ret;
2180 }
2181 
2182 static int
2183 test_compressdev_deflate_stateless_dynamic(void)
2184 {
2185 	struct comp_testsuite_params *ts_params = &testsuite_params;
2186 	uint16_t i;
2187 	int ret;
2188 	struct rte_comp_xform *compress_xform =
2189 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2190 
2191 	const struct rte_compressdev_capabilities *capab;
2192 
2193 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2194 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2195 
2196 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2197 		return -ENOTSUP;
2198 
2199 	if (compress_xform == NULL) {
2200 		RTE_LOG(ERR, USER1,
2201 			"Compress xform could not be created\n");
2202 		ret = TEST_FAILED;
2203 		goto exit;
2204 	}
2205 
2206 	memcpy(compress_xform, ts_params->def_comp_xform,
2207 			sizeof(struct rte_comp_xform));
2208 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
2209 
2210 	struct interim_data_params int_data = {
2211 		NULL,
2212 		1,
2213 		NULL,
2214 		&compress_xform,
2215 		&ts_params->def_decomp_xform,
2216 		1
2217 	};
2218 
2219 	struct test_data_params test_data = {
2220 		.compress_state = RTE_COMP_OP_STATELESS,
2221 		.decompress_state = RTE_COMP_OP_STATELESS,
2222 		.buff_type = LB_BOTH,
2223 		.zlib_dir = ZLIB_DECOMPRESS,
2224 		.out_of_space = 0,
2225 		.big_data = 0,
2226 		.overflow = OVERFLOW_DISABLED
2227 	};
2228 
2229 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2230 		int_data.test_bufs = &compress_test_bufs[i];
2231 		int_data.buf_idx = &i;
2232 
2233 		/* Compress with compressdev, decompress with Zlib */
2234 		test_data.zlib_dir = ZLIB_DECOMPRESS;
2235 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2236 		if (ret < 0)
2237 			goto exit;
2238 
2239 		/* Compress with Zlib, decompress with compressdev */
2240 		test_data.zlib_dir = ZLIB_COMPRESS;
2241 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2242 		if (ret < 0)
2243 			goto exit;
2244 	}
2245 
2246 	ret = TEST_SUCCESS;
2247 
2248 exit:
2249 	rte_free(compress_xform);
2250 	return ret;
2251 }
2252 
2253 static int
2254 test_compressdev_deflate_stateless_multi_op(void)
2255 {
2256 	struct comp_testsuite_params *ts_params = &testsuite_params;
2257 	uint16_t num_bufs = RTE_DIM(compress_test_bufs);
2258 	uint16_t buf_idx[num_bufs];
2259 	uint16_t i;
2260 	int ret;
2261 
2262 	for (i = 0; i < num_bufs; i++)
2263 		buf_idx[i] = i;
2264 
2265 	struct interim_data_params int_data = {
2266 		compress_test_bufs,
2267 		num_bufs,
2268 		buf_idx,
2269 		&ts_params->def_comp_xform,
2270 		&ts_params->def_decomp_xform,
2271 		1
2272 	};
2273 
2274 	struct test_data_params test_data = {
2275 		.compress_state = RTE_COMP_OP_STATELESS,
2276 		.decompress_state = RTE_COMP_OP_STATELESS,
2277 		.buff_type = LB_BOTH,
2278 		.zlib_dir = ZLIB_DECOMPRESS,
2279 		.out_of_space = 0,
2280 		.big_data = 0,
2281 		.overflow = OVERFLOW_DISABLED
2282 	};
2283 
2284 	/* Compress with compressdev, decompress with Zlib */
2285 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2286 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2287 	if (ret < 0)
2288 		return ret;
2289 
2290 	/* Compress with Zlib, decompress with compressdev */
2291 	test_data.zlib_dir = ZLIB_COMPRESS;
2292 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2293 	if (ret < 0)
2294 		return ret;
2295 
2296 	return TEST_SUCCESS;
2297 }
2298 
2299 static int
2300 test_compressdev_deflate_stateless_multi_level(void)
2301 {
2302 	struct comp_testsuite_params *ts_params = &testsuite_params;
2303 	unsigned int level;
2304 	uint16_t i;
2305 	int ret;
2306 	struct rte_comp_xform *compress_xform =
2307 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2308 
2309 	if (compress_xform == NULL) {
2310 		RTE_LOG(ERR, USER1,
2311 			"Compress xform could not be created\n");
2312 		ret = TEST_FAILED;
2313 		goto exit;
2314 	}
2315 
2316 	memcpy(compress_xform, ts_params->def_comp_xform,
2317 			sizeof(struct rte_comp_xform));
2318 
2319 	struct interim_data_params int_data = {
2320 		NULL,
2321 		1,
2322 		NULL,
2323 		&compress_xform,
2324 		&ts_params->def_decomp_xform,
2325 		1
2326 	};
2327 
2328 	struct test_data_params test_data = {
2329 		.compress_state = RTE_COMP_OP_STATELESS,
2330 		.decompress_state = RTE_COMP_OP_STATELESS,
2331 		.buff_type = LB_BOTH,
2332 		.zlib_dir = ZLIB_DECOMPRESS,
2333 		.out_of_space = 0,
2334 		.big_data = 0,
2335 		.overflow = OVERFLOW_DISABLED
2336 	};
2337 
2338 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2339 		int_data.test_bufs = &compress_test_bufs[i];
2340 		int_data.buf_idx = &i;
2341 
2342 		for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
2343 				level++) {
2344 			compress_xform->compress.level = level;
2345 			/* Compress with compressdev, decompress with Zlib */
2346 			test_data.zlib_dir = ZLIB_DECOMPRESS;
2347 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2348 			if (ret < 0)
2349 				goto exit;
2350 		}
2351 	}
2352 
2353 	ret = TEST_SUCCESS;
2354 
2355 exit:
2356 	rte_free(compress_xform);
2357 	return ret;
2358 }
2359 
2360 #define NUM_XFORMS 3
2361 static int
2362 test_compressdev_deflate_stateless_multi_xform(void)
2363 {
2364 	struct comp_testsuite_params *ts_params = &testsuite_params;
2365 	uint16_t num_bufs = NUM_XFORMS;
2366 	struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
2367 	struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
2368 	const char *test_buffers[NUM_XFORMS];
2369 	uint16_t i;
2370 	unsigned int level = RTE_COMP_LEVEL_MIN;
2371 	uint16_t buf_idx[num_bufs];
2372 	int ret;
2373 
2374 	/* Create multiple xforms with various levels */
2375 	for (i = 0; i < NUM_XFORMS; i++) {
2376 		compress_xforms[i] = rte_malloc(NULL,
2377 				sizeof(struct rte_comp_xform), 0);
2378 		if (compress_xforms[i] == NULL) {
2379 			RTE_LOG(ERR, USER1,
2380 				"Compress xform could not be created\n");
2381 			ret = TEST_FAILED;
2382 			goto exit;
2383 		}
2384 
2385 		memcpy(compress_xforms[i], ts_params->def_comp_xform,
2386 				sizeof(struct rte_comp_xform));
2387 		compress_xforms[i]->compress.level = level;
2388 		level++;
2389 
2390 		decompress_xforms[i] = rte_malloc(NULL,
2391 				sizeof(struct rte_comp_xform), 0);
2392 		if (decompress_xforms[i] == NULL) {
2393 			RTE_LOG(ERR, USER1,
2394 				"Decompress xform could not be created\n");
2395 			ret = TEST_FAILED;
2396 			goto exit;
2397 		}
2398 
2399 		memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
2400 				sizeof(struct rte_comp_xform));
2401 	}
2402 
2403 	for (i = 0; i < NUM_XFORMS; i++) {
2404 		buf_idx[i] = 0;
2405 		/* Use the same buffer in all sessions */
2406 		test_buffers[i] = compress_test_bufs[0];
2407 	}
2408 
2409 	struct interim_data_params int_data = {
2410 		test_buffers,
2411 		num_bufs,
2412 		buf_idx,
2413 		compress_xforms,
2414 		decompress_xforms,
2415 		NUM_XFORMS
2416 	};
2417 
2418 	struct test_data_params test_data = {
2419 		.compress_state = RTE_COMP_OP_STATELESS,
2420 		.decompress_state = RTE_COMP_OP_STATELESS,
2421 		.buff_type = LB_BOTH,
2422 		.zlib_dir = ZLIB_DECOMPRESS,
2423 		.out_of_space = 0,
2424 		.big_data = 0,
2425 		.overflow = OVERFLOW_DISABLED
2426 	};
2427 
2428 	/* Compress with compressdev, decompress with Zlib */
2429 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2430 	if (ret < 0)
2431 		goto exit;
2432 
2433 	ret = TEST_SUCCESS;
2434 
2435 exit:
2436 	for (i = 0; i < NUM_XFORMS; i++) {
2437 		rte_free(compress_xforms[i]);
2438 		rte_free(decompress_xforms[i]);
2439 	}
2440 
2441 	return ret;
2442 }
2443 
2444 static int
2445 test_compressdev_deflate_stateless_sgl(void)
2446 {
2447 	struct comp_testsuite_params *ts_params = &testsuite_params;
2448 	uint16_t i;
2449 	int ret;
2450 	const struct rte_compressdev_capabilities *capab;
2451 
2452 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2453 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2454 
2455 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2456 		return -ENOTSUP;
2457 
2458 	struct interim_data_params int_data = {
2459 		NULL,
2460 		1,
2461 		NULL,
2462 		&ts_params->def_comp_xform,
2463 		&ts_params->def_decomp_xform,
2464 		1
2465 	};
2466 
2467 	struct test_data_params test_data = {
2468 		.compress_state = RTE_COMP_OP_STATELESS,
2469 		.decompress_state = RTE_COMP_OP_STATELESS,
2470 		.buff_type = SGL_BOTH,
2471 		.zlib_dir = ZLIB_DECOMPRESS,
2472 		.out_of_space = 0,
2473 		.big_data = 0,
2474 		.overflow = OVERFLOW_DISABLED
2475 	};
2476 
2477 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2478 		int_data.test_bufs = &compress_test_bufs[i];
2479 		int_data.buf_idx = &i;
2480 
2481 		/* Compress with compressdev, decompress with Zlib */
2482 		test_data.zlib_dir = ZLIB_DECOMPRESS;
2483 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2484 		if (ret < 0)
2485 			return ret;
2486 
2487 		/* Compress with Zlib, decompress with compressdev */
2488 		test_data.zlib_dir = ZLIB_COMPRESS;
2489 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2490 		if (ret < 0)
2491 			return ret;
2492 
2493 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
2494 			/* Compress with compressdev, decompress with Zlib */
2495 			test_data.zlib_dir = ZLIB_DECOMPRESS;
2496 			test_data.buff_type = SGL_TO_LB;
2497 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2498 			if (ret < 0)
2499 				return ret;
2500 
2501 			/* Compress with Zlib, decompress with compressdev */
2502 			test_data.zlib_dir = ZLIB_COMPRESS;
2503 			test_data.buff_type = SGL_TO_LB;
2504 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2505 			if (ret < 0)
2506 				return ret;
2507 		}
2508 
2509 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
2510 			/* Compress with compressdev, decompress with Zlib */
2511 			test_data.zlib_dir = ZLIB_DECOMPRESS;
2512 			test_data.buff_type = LB_TO_SGL;
2513 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2514 			if (ret < 0)
2515 				return ret;
2516 
2517 			/* Compress with Zlib, decompress with compressdev */
2518 			test_data.zlib_dir = ZLIB_COMPRESS;
2519 			test_data.buff_type = LB_TO_SGL;
2520 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2521 			if (ret < 0)
2522 				return ret;
2523 		}
2524 	}
2525 
2526 	return TEST_SUCCESS;
2527 }
2528 
2529 static int
2530 test_compressdev_deflate_stateless_checksum(void)
2531 {
2532 	struct comp_testsuite_params *ts_params = &testsuite_params;
2533 	uint16_t i;
2534 	int ret;
2535 	const struct rte_compressdev_capabilities *capab;
2536 
2537 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2538 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2539 
2540 	/* Check if driver supports any checksum */
2541 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
2542 			(capab->comp_feature_flags &
2543 			RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
2544 			(capab->comp_feature_flags &
2545 			RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
2546 		return -ENOTSUP;
2547 
2548 	struct rte_comp_xform *compress_xform =
2549 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2550 	if (compress_xform == NULL) {
2551 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2552 		return TEST_FAILED;
2553 	}
2554 
2555 	memcpy(compress_xform, ts_params->def_comp_xform,
2556 			sizeof(struct rte_comp_xform));
2557 
2558 	struct rte_comp_xform *decompress_xform =
2559 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2560 	if (decompress_xform == NULL) {
2561 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2562 		rte_free(compress_xform);
2563 		return TEST_FAILED;
2564 	}
2565 
2566 	memcpy(decompress_xform, ts_params->def_decomp_xform,
2567 			sizeof(struct rte_comp_xform));
2568 
2569 	struct interim_data_params int_data = {
2570 		NULL,
2571 		1,
2572 		NULL,
2573 		&compress_xform,
2574 		&decompress_xform,
2575 		1
2576 	};
2577 
2578 	struct test_data_params test_data = {
2579 		.compress_state = RTE_COMP_OP_STATELESS,
2580 		.decompress_state = RTE_COMP_OP_STATELESS,
2581 		.buff_type = LB_BOTH,
2582 		.zlib_dir = ZLIB_DECOMPRESS,
2583 		.out_of_space = 0,
2584 		.big_data = 0,
2585 		.overflow = OVERFLOW_DISABLED
2586 	};
2587 
2588 	/* Check if driver supports crc32 checksum and test */
2589 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
2590 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2591 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2592 
2593 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2594 			/* Compress with compressdev, decompress with Zlib */
2595 			int_data.test_bufs = &compress_test_bufs[i];
2596 			int_data.buf_idx = &i;
2597 
2598 			/* Generate zlib checksum and test against selected
2599 			 * drivers decompression checksum
2600 			 */
2601 			test_data.zlib_dir = ZLIB_COMPRESS;
2602 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2603 			if (ret < 0)
2604 				goto exit;
2605 
2606 			/* Generate compression and decompression
2607 			 * checksum of selected driver
2608 			 */
2609 			test_data.zlib_dir = ZLIB_NONE;
2610 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2611 			if (ret < 0)
2612 				goto exit;
2613 		}
2614 	}
2615 
2616 	/* Check if driver supports adler32 checksum and test */
2617 	if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2618 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2619 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2620 
2621 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2622 			int_data.test_bufs = &compress_test_bufs[i];
2623 			int_data.buf_idx = &i;
2624 
2625 			/* Generate zlib checksum and test against selected
2626 			 * drivers decompression checksum
2627 			 */
2628 			test_data.zlib_dir = ZLIB_COMPRESS;
2629 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2630 			if (ret < 0)
2631 				goto exit;
2632 			/* Generate compression and decompression
2633 			 * checksum of selected driver
2634 			 */
2635 			test_data.zlib_dir = ZLIB_NONE;
2636 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2637 			if (ret < 0)
2638 				goto exit;
2639 		}
2640 	}
2641 
2642 	/* Check if driver supports combined crc and adler checksum and test */
2643 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2644 		compress_xform->compress.chksum =
2645 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2646 		decompress_xform->decompress.chksum =
2647 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2648 
2649 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2650 			int_data.test_bufs = &compress_test_bufs[i];
2651 			int_data.buf_idx = &i;
2652 
2653 			/* Generate compression and decompression
2654 			 * checksum of selected driver
2655 			 */
2656 			test_data.zlib_dir = ZLIB_NONE;
2657 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2658 			if (ret < 0)
2659 				goto exit;
2660 		}
2661 	}
2662 
2663 	ret = TEST_SUCCESS;
2664 
2665 exit:
2666 	rte_free(compress_xform);
2667 	rte_free(decompress_xform);
2668 	return ret;
2669 }
2670 
2671 static int
2672 test_compressdev_out_of_space_buffer(void)
2673 {
2674 	struct comp_testsuite_params *ts_params = &testsuite_params;
2675 	int ret;
2676 	uint16_t i;
2677 	const struct rte_compressdev_capabilities *capab;
2678 
2679 	RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2680 
2681 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2682 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2683 
2684 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2685 		return -ENOTSUP;
2686 
2687 	struct interim_data_params int_data = {
2688 		&compress_test_bufs[0],
2689 		1,
2690 		&i,
2691 		&ts_params->def_comp_xform,
2692 		&ts_params->def_decomp_xform,
2693 		1
2694 	};
2695 
2696 	struct test_data_params test_data = {
2697 		.compress_state = RTE_COMP_OP_STATELESS,
2698 		.decompress_state = RTE_COMP_OP_STATELESS,
2699 		.buff_type = LB_BOTH,
2700 		.zlib_dir = ZLIB_DECOMPRESS,
2701 		.out_of_space = 1,  /* run out-of-space test */
2702 		.big_data = 0,
2703 		.overflow = OVERFLOW_DISABLED
2704 	};
2705 	/* Compress with compressdev, decompress with Zlib */
2706 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2707 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2708 	if (ret < 0)
2709 		goto exit;
2710 
2711 	/* Compress with Zlib, decompress with compressdev */
2712 	test_data.zlib_dir = ZLIB_COMPRESS;
2713 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2714 	if (ret < 0)
2715 		goto exit;
2716 
2717 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2718 		/* Compress with compressdev, decompress with Zlib */
2719 		test_data.zlib_dir = ZLIB_DECOMPRESS;
2720 		test_data.buff_type = SGL_BOTH;
2721 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2722 		if (ret < 0)
2723 			goto exit;
2724 
2725 		/* Compress with Zlib, decompress with compressdev */
2726 		test_data.zlib_dir = ZLIB_COMPRESS;
2727 		test_data.buff_type = SGL_BOTH;
2728 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2729 		if (ret < 0)
2730 			goto exit;
2731 	}
2732 
2733 	ret  = TEST_SUCCESS;
2734 
2735 exit:
2736 	return ret;
2737 }
2738 
2739 static int
2740 test_compressdev_deflate_stateless_dynamic_big(void)
2741 {
2742 	struct comp_testsuite_params *ts_params = &testsuite_params;
2743 	uint16_t i = 0;
2744 	int ret;
2745 	int j;
2746 	const struct rte_compressdev_capabilities *capab;
2747 	char *test_buffer = NULL;
2748 
2749 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2750 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2751 
2752 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2753 		return -ENOTSUP;
2754 
2755 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2756 		return -ENOTSUP;
2757 
2758 	test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2759 	if (test_buffer == NULL) {
2760 		RTE_LOG(ERR, USER1,
2761 			"Can't allocate buffer for big-data\n");
2762 		return TEST_FAILED;
2763 	}
2764 
2765 	struct interim_data_params int_data = {
2766 		(const char * const *)&test_buffer,
2767 		1,
2768 		&i,
2769 		&ts_params->def_comp_xform,
2770 		&ts_params->def_decomp_xform,
2771 		1
2772 	};
2773 
2774 	struct test_data_params test_data = {
2775 		.compress_state = RTE_COMP_OP_STATELESS,
2776 		.decompress_state = RTE_COMP_OP_STATELESS,
2777 		.buff_type = SGL_BOTH,
2778 		.zlib_dir = ZLIB_DECOMPRESS,
2779 		.out_of_space = 0,
2780 		.big_data = 1,
2781 		.overflow = OVERFLOW_DISABLED
2782 	};
2783 
2784 	ts_params->def_comp_xform->compress.deflate.huffman =
2785 						RTE_COMP_HUFFMAN_DYNAMIC;
2786 
2787 	/* fill the buffer with data based on rand. data */
2788 	srand(BIG_DATA_TEST_SIZE);
2789 	for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2790 		test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2791 	test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2792 
2793 	/* Compress with compressdev, decompress with Zlib */
2794 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2795 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2796 	if (ret < 0)
2797 		goto exit;
2798 
2799 	/* Compress with Zlib, decompress with compressdev */
2800 	test_data.zlib_dir = ZLIB_COMPRESS;
2801 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2802 	if (ret < 0)
2803 		goto exit;
2804 
2805 	ret = TEST_SUCCESS;
2806 
2807 exit:
2808 	ts_params->def_comp_xform->compress.deflate.huffman =
2809 						RTE_COMP_HUFFMAN_DEFAULT;
2810 	rte_free(test_buffer);
2811 	return ret;
2812 }
2813 
2814 static int
2815 test_compressdev_deflate_stateful_decomp(void)
2816 {
2817 	struct comp_testsuite_params *ts_params = &testsuite_params;
2818 	int ret;
2819 	uint16_t i;
2820 	const struct rte_compressdev_capabilities *capab;
2821 
2822 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2823 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2824 
2825 	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2826 		return -ENOTSUP;
2827 
2828 	struct interim_data_params int_data = {
2829 		&compress_test_bufs[0],
2830 		1,
2831 		&i,
2832 		&ts_params->def_comp_xform,
2833 		&ts_params->def_decomp_xform,
2834 		1
2835 	};
2836 
2837 	struct test_data_params test_data = {
2838 		.compress_state = RTE_COMP_OP_STATELESS,
2839 		.decompress_state = RTE_COMP_OP_STATEFUL,
2840 		.buff_type = LB_BOTH,
2841 		.zlib_dir = ZLIB_COMPRESS,
2842 		.out_of_space = 0,
2843 		.big_data = 0,
2844 		.decompress_output_block_size = 2000,
2845 		.decompress_steps_max = 4,
2846 		.overflow = OVERFLOW_DISABLED
2847 	};
2848 
2849 	/* Compress with Zlib, decompress with compressdev */
2850 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2851 		ret = TEST_FAILED;
2852 		goto exit;
2853 	}
2854 
2855 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2856 		/* Now test with SGL buffers */
2857 		test_data.buff_type = SGL_BOTH;
2858 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2859 			ret = TEST_FAILED;
2860 			goto exit;
2861 		}
2862 	}
2863 
2864 	ret  = TEST_SUCCESS;
2865 
2866 exit:
2867 	return ret;
2868 }
2869 
2870 static int
2871 test_compressdev_deflate_stateful_decomp_checksum(void)
2872 {
2873 	struct comp_testsuite_params *ts_params = &testsuite_params;
2874 	int ret;
2875 	uint16_t i;
2876 	const struct rte_compressdev_capabilities *capab;
2877 
2878 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2879 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2880 
2881 	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2882 		return -ENOTSUP;
2883 
2884 	/* Check if driver supports any checksum */
2885 	if (!(capab->comp_feature_flags &
2886 	     (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2887 	      RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2888 		return -ENOTSUP;
2889 
2890 	struct rte_comp_xform *compress_xform =
2891 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2892 	if (compress_xform == NULL) {
2893 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2894 		return TEST_FAILED;
2895 	}
2896 
2897 	memcpy(compress_xform, ts_params->def_comp_xform,
2898 	       sizeof(struct rte_comp_xform));
2899 
2900 	struct rte_comp_xform *decompress_xform =
2901 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2902 	if (decompress_xform == NULL) {
2903 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2904 		rte_free(compress_xform);
2905 		return TEST_FAILED;
2906 	}
2907 
2908 	memcpy(decompress_xform, ts_params->def_decomp_xform,
2909 	       sizeof(struct rte_comp_xform));
2910 
2911 	struct interim_data_params int_data = {
2912 		&compress_test_bufs[0],
2913 		1,
2914 		&i,
2915 		&compress_xform,
2916 		&decompress_xform,
2917 		1
2918 	};
2919 
2920 	struct test_data_params test_data = {
2921 		.compress_state = RTE_COMP_OP_STATELESS,
2922 		.decompress_state = RTE_COMP_OP_STATEFUL,
2923 		.buff_type = LB_BOTH,
2924 		.zlib_dir = ZLIB_COMPRESS,
2925 		.out_of_space = 0,
2926 		.big_data = 0,
2927 		.decompress_output_block_size = 2000,
2928 		.decompress_steps_max = 4,
2929 		.overflow = OVERFLOW_DISABLED
2930 	};
2931 
2932 	/* Check if driver supports crc32 checksum and test */
2933 	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
2934 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2935 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2936 		/* Compress with Zlib, decompress with compressdev */
2937 		test_data.buff_type = LB_BOTH;
2938 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2939 			ret = TEST_FAILED;
2940 			goto exit;
2941 		}
2942 		if (capab->comp_feature_flags &
2943 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2944 			/* Now test with SGL buffers */
2945 			test_data.buff_type = SGL_BOTH;
2946 			if (test_deflate_comp_decomp(&int_data,
2947 						     &test_data) < 0) {
2948 				ret = TEST_FAILED;
2949 				goto exit;
2950 			}
2951 		}
2952 	}
2953 
2954 	/* Check if driver supports adler32 checksum and test */
2955 	if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
2956 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2957 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2958 		/* Compress with Zlib, decompress with compressdev */
2959 		test_data.buff_type = LB_BOTH;
2960 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2961 			ret = TEST_FAILED;
2962 			goto exit;
2963 		}
2964 		if (capab->comp_feature_flags &
2965 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2966 			/* Now test with SGL buffers */
2967 			test_data.buff_type = SGL_BOTH;
2968 			if (test_deflate_comp_decomp(&int_data,
2969 						     &test_data) < 0) {
2970 				ret = TEST_FAILED;
2971 				goto exit;
2972 			}
2973 		}
2974 	}
2975 
2976 	/* Check if driver supports combined crc and adler checksum and test */
2977 	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
2978 		compress_xform->compress.chksum =
2979 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2980 		decompress_xform->decompress.chksum =
2981 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2982 		/* Zlib doesn't support combined checksum */
2983 		test_data.zlib_dir = ZLIB_NONE;
2984 		/* Compress stateless, decompress stateful with compressdev */
2985 		test_data.buff_type = LB_BOTH;
2986 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2987 			ret = TEST_FAILED;
2988 			goto exit;
2989 		}
2990 		if (capab->comp_feature_flags &
2991 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2992 			/* Now test with SGL buffers */
2993 			test_data.buff_type = SGL_BOTH;
2994 			if (test_deflate_comp_decomp(&int_data,
2995 						     &test_data) < 0) {
2996 				ret = TEST_FAILED;
2997 				goto exit;
2998 			}
2999 		}
3000 	}
3001 
3002 	ret  = TEST_SUCCESS;
3003 
3004 exit:
3005 	rte_free(compress_xform);
3006 	rte_free(decompress_xform);
3007 	return ret;
3008 }
3009 
3010 static const struct rte_memzone *
3011 make_memzone(const char *name, size_t size)
3012 {
3013 	unsigned int socket_id = rte_socket_id();
3014 	char mz_name[RTE_MEMZONE_NAMESIZE];
3015 	const struct rte_memzone *memzone;
3016 
3017 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
3018 	memzone = rte_memzone_lookup(mz_name);
3019 	if (memzone != NULL && memzone->len != size) {
3020 		rte_memzone_free(memzone);
3021 		memzone = NULL;
3022 	}
3023 	if (memzone == NULL) {
3024 		memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
3025 				RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
3026 		if (memzone == NULL)
3027 			RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
3028 				mz_name);
3029 	}
3030 	return memzone;
3031 }
3032 
3033 static int
3034 test_compressdev_external_mbufs(void)
3035 {
3036 	struct comp_testsuite_params *ts_params = &testsuite_params;
3037 	size_t data_len = 0;
3038 	uint16_t i;
3039 	int ret = TEST_FAILED;
3040 
3041 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
3042 		data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
3043 
3044 	struct interim_data_params int_data = {
3045 		NULL,
3046 		1,
3047 		NULL,
3048 		&ts_params->def_comp_xform,
3049 		&ts_params->def_decomp_xform,
3050 		1
3051 	};
3052 
3053 	struct test_data_params test_data = {
3054 		.compress_state = RTE_COMP_OP_STATELESS,
3055 		.decompress_state = RTE_COMP_OP_STATELESS,
3056 		.buff_type = LB_BOTH,
3057 		.zlib_dir = ZLIB_DECOMPRESS,
3058 		.out_of_space = 0,
3059 		.big_data = 0,
3060 		.use_external_mbufs = 1,
3061 		.inbuf_data_size = data_len,
3062 		.inbuf_memzone = make_memzone("inbuf", data_len),
3063 		.compbuf_memzone = make_memzone("compbuf", data_len *
3064 						COMPRESS_BUF_SIZE_RATIO),
3065 		.uncompbuf_memzone = make_memzone("decompbuf", data_len),
3066 		.overflow = OVERFLOW_DISABLED
3067 	};
3068 
3069 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3070 		/* prepare input data */
3071 		data_len = strlen(compress_test_bufs[i]) + 1;
3072 		rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
3073 			   data_len);
3074 		test_data.inbuf_data_size = data_len;
3075 		int_data.buf_idx = &i;
3076 
3077 		/* Compress with compressdev, decompress with Zlib */
3078 		test_data.zlib_dir = ZLIB_DECOMPRESS;
3079 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3080 			goto exit;
3081 
3082 		/* Compress with Zlib, decompress with compressdev */
3083 		test_data.zlib_dir = ZLIB_COMPRESS;
3084 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
3085 			goto exit;
3086 	}
3087 
3088 	ret = TEST_SUCCESS;
3089 
3090 exit:
3091 	rte_memzone_free(test_data.inbuf_memzone);
3092 	rte_memzone_free(test_data.compbuf_memzone);
3093 	rte_memzone_free(test_data.uncompbuf_memzone);
3094 	return ret;
3095 }
3096 
3097 static int
3098 test_compressdev_deflate_stateless_fixed_oos_recoverable(void)
3099 {
3100 	struct comp_testsuite_params *ts_params = &testsuite_params;
3101 	uint16_t i;
3102 	int ret;
3103 	int comp_result;
3104 	const struct rte_compressdev_capabilities *capab;
3105 
3106 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
3107 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
3108 
3109 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
3110 		return -ENOTSUP;
3111 
3112 	struct rte_comp_xform *compress_xform =
3113 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
3114 
3115 	if (compress_xform == NULL) {
3116 		RTE_LOG(ERR, USER1,
3117 			"Compress xform could not be created\n");
3118 		ret = TEST_FAILED;
3119 		goto exit;
3120 	}
3121 
3122 	memcpy(compress_xform, ts_params->def_comp_xform,
3123 			sizeof(struct rte_comp_xform));
3124 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
3125 
3126 	struct interim_data_params int_data = {
3127 		NULL,
3128 		1,
3129 		NULL,
3130 		&compress_xform,
3131 		&ts_params->def_decomp_xform,
3132 		1
3133 	};
3134 
3135 	struct test_data_params test_data = {
3136 		.compress_state = RTE_COMP_OP_STATELESS,
3137 		.decompress_state = RTE_COMP_OP_STATELESS,
3138 		.buff_type = LB_BOTH,
3139 		.zlib_dir = ZLIB_DECOMPRESS,
3140 		.out_of_space = 0,
3141 		.big_data = 0,
3142 		.overflow = OVERFLOW_ENABLED
3143 	};
3144 
3145 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
3146 		int_data.test_bufs = &compress_test_bufs[i];
3147 		int_data.buf_idx = &i;
3148 
3149 		/* Compress with compressdev, decompress with Zlib */
3150 		test_data.zlib_dir = ZLIB_DECOMPRESS;
3151 		comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3152 		if (comp_result < 0) {
3153 			ret = TEST_FAILED;
3154 			goto exit;
3155 		} else if (comp_result > 0) {
3156 			ret = -ENOTSUP;
3157 			goto exit;
3158 		}
3159 
3160 		/* Compress with Zlib, decompress with compressdev */
3161 		test_data.zlib_dir = ZLIB_COMPRESS;
3162 		comp_result = test_deflate_comp_decomp(&int_data, &test_data);
3163 		if (comp_result < 0) {
3164 			ret = TEST_FAILED;
3165 			goto exit;
3166 		} else if (comp_result > 0) {
3167 			ret = -ENOTSUP;
3168 			goto exit;
3169 		}
3170 	}
3171 
3172 	ret = TEST_SUCCESS;
3173 
3174 exit:
3175 	rte_free(compress_xform);
3176 	return ret;
3177 }
3178 
3179 static struct unit_test_suite compressdev_testsuite  = {
3180 	.suite_name = "compressdev unit test suite",
3181 	.setup = testsuite_setup,
3182 	.teardown = testsuite_teardown,
3183 	.unit_test_cases = {
3184 		TEST_CASE_ST(NULL, NULL,
3185 			test_compressdev_invalid_configuration),
3186 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3187 			test_compressdev_deflate_stateless_fixed),
3188 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3189 			test_compressdev_deflate_stateless_dynamic),
3190 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3191 			test_compressdev_deflate_stateless_dynamic_big),
3192 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3193 			test_compressdev_deflate_stateless_multi_op),
3194 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3195 			test_compressdev_deflate_stateless_multi_level),
3196 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3197 			test_compressdev_deflate_stateless_multi_xform),
3198 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3199 			test_compressdev_deflate_stateless_sgl),
3200 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3201 			test_compressdev_deflate_stateless_checksum),
3202 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3203 			test_compressdev_out_of_space_buffer),
3204 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3205 			test_compressdev_deflate_stateful_decomp),
3206 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3207 			test_compressdev_deflate_stateful_decomp_checksum),
3208 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3209 			test_compressdev_external_mbufs),
3210 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
3211 		test_compressdev_deflate_stateless_fixed_oos_recoverable),
3212 		TEST_CASES_END() /**< NULL terminate unit test array */
3213 	}
3214 };
3215 
3216 static int
3217 test_compressdev(void)
3218 {
3219 	return unit_test_suite_runner(&compressdev_testsuite);
3220 }
3221 
3222 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
3223