xref: /dpdk/app/test/test_compressdev.c (revision 54ad947eda42042d2bdae69b57d0c7c8e291d9ec)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 - 2019 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
13 #include <rte_mbuf.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
16 
17 #include "test_compressdev_test_buffer.h"
18 #include "test.h"
19 
20 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
21 
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
26 
27 /*
28  * 30% extra size for compressed data compared to original data,
29  * in case data size cannot be reduced and it is actually bigger
30  * due to the compress block headers
31  */
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
35 #define MAX_SEGS 16
36 #define NUM_OPS 16
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
39 #define CACHE_SIZE 0
40 
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
46 
47 #define OUT_OF_SPACE_BUF 1
48 
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
53 
54 const char *
55 huffman_type_strings[] = {
56 	[RTE_COMP_HUFFMAN_DEFAULT]	= "PMD default",
57 	[RTE_COMP_HUFFMAN_FIXED]	= "Fixed",
58 	[RTE_COMP_HUFFMAN_DYNAMIC]	= "Dynamic"
59 };
60 
61 enum zlib_direction {
62 	ZLIB_NONE,
63 	ZLIB_COMPRESS,
64 	ZLIB_DECOMPRESS,
65 	ZLIB_ALL
66 };
67 
68 enum varied_buff {
69 	LB_BOTH = 0,	/* both input and output are linear*/
70 	SGL_BOTH,	/* both input and output are chained */
71 	SGL_TO_LB,	/* input buffer is chained */
72 	LB_TO_SGL	/* output buffer is chained */
73 };
74 
75 struct priv_op_data {
76 	uint16_t orig_idx;
77 };
78 
79 struct comp_testsuite_params {
80 	struct rte_mempool *large_mbuf_pool;
81 	struct rte_mempool *small_mbuf_pool;
82 	struct rte_mempool *big_mbuf_pool;
83 	struct rte_mempool *op_pool;
84 	struct rte_comp_xform *def_comp_xform;
85 	struct rte_comp_xform *def_decomp_xform;
86 };
87 
88 struct interim_data_params {
89 	const char * const *test_bufs;
90 	unsigned int num_bufs;
91 	uint16_t *buf_idx;
92 	struct rte_comp_xform **compress_xforms;
93 	struct rte_comp_xform **decompress_xforms;
94 	unsigned int num_xforms;
95 };
96 
97 struct test_data_params {
98 	enum rte_comp_op_type state;
99 	enum varied_buff buff_type;
100 	enum zlib_direction zlib_dir;
101 	unsigned int out_of_space;
102 	unsigned int big_data;
103 };
104 
105 static struct comp_testsuite_params testsuite_params = { 0 };
106 
107 static void
108 testsuite_teardown(void)
109 {
110 	struct comp_testsuite_params *ts_params = &testsuite_params;
111 
112 	if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
113 		RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
114 	if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
115 		RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
116 	if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
117 		RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
118 	if (rte_mempool_in_use_count(ts_params->op_pool))
119 		RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
120 
121 	rte_mempool_free(ts_params->large_mbuf_pool);
122 	rte_mempool_free(ts_params->small_mbuf_pool);
123 	rte_mempool_free(ts_params->big_mbuf_pool);
124 	rte_mempool_free(ts_params->op_pool);
125 	rte_free(ts_params->def_comp_xform);
126 	rte_free(ts_params->def_decomp_xform);
127 }
128 
129 static int
130 testsuite_setup(void)
131 {
132 	struct comp_testsuite_params *ts_params = &testsuite_params;
133 	uint32_t max_buf_size = 0;
134 	unsigned int i;
135 
136 	if (rte_compressdev_count() == 0) {
137 		RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
138 		return TEST_SKIPPED;
139 	}
140 
141 	RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
142 				rte_compressdev_name_get(0));
143 
144 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
145 		max_buf_size = RTE_MAX(max_buf_size,
146 				strlen(compress_test_bufs[i]) + 1);
147 
148 	/*
149 	 * Buffers to be used in compression and decompression.
150 	 * Since decompressed data might be larger than
151 	 * compressed data (due to block header),
152 	 * buffers should be big enough for both cases.
153 	 */
154 	max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
155 	ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
156 			NUM_LARGE_MBUFS,
157 			CACHE_SIZE, 0,
158 			max_buf_size + RTE_PKTMBUF_HEADROOM,
159 			rte_socket_id());
160 	if (ts_params->large_mbuf_pool == NULL) {
161 		RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
162 		return TEST_FAILED;
163 	}
164 
165 	/* Create mempool with smaller buffers for SGL testing */
166 	ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
167 			NUM_LARGE_MBUFS * MAX_SEGS,
168 			CACHE_SIZE, 0,
169 			SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
170 			rte_socket_id());
171 	if (ts_params->small_mbuf_pool == NULL) {
172 		RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
173 		goto exit;
174 	}
175 
176 	/* Create mempool with big buffers for SGL testing */
177 	ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
178 			NUM_BIG_MBUFS + 1,
179 			CACHE_SIZE, 0,
180 			MAX_MBUF_SEGMENT_SIZE,
181 			rte_socket_id());
182 	if (ts_params->big_mbuf_pool == NULL) {
183 		RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
184 		goto exit;
185 	}
186 
187 	ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
188 				0, sizeof(struct priv_op_data),
189 				rte_socket_id());
190 	if (ts_params->op_pool == NULL) {
191 		RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
192 		goto exit;
193 	}
194 
195 	ts_params->def_comp_xform =
196 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
197 	if (ts_params->def_comp_xform == NULL) {
198 		RTE_LOG(ERR, USER1,
199 			"Default compress xform could not be created\n");
200 		goto exit;
201 	}
202 	ts_params->def_decomp_xform =
203 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
204 	if (ts_params->def_decomp_xform == NULL) {
205 		RTE_LOG(ERR, USER1,
206 			"Default decompress xform could not be created\n");
207 		goto exit;
208 	}
209 
210 	/* Initializes default values for compress/decompress xforms */
211 	ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
212 	ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
213 	ts_params->def_comp_xform->compress.deflate.huffman =
214 						RTE_COMP_HUFFMAN_DEFAULT;
215 	ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
216 	ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
217 	ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
218 
219 	ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
220 	ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
221 	ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
222 	ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
223 
224 	return TEST_SUCCESS;
225 
226 exit:
227 	testsuite_teardown();
228 
229 	return TEST_FAILED;
230 }
231 
232 static int
233 generic_ut_setup(void)
234 {
235 	/* Configure compressdev (one device, one queue pair) */
236 	struct rte_compressdev_config config = {
237 		.socket_id = rte_socket_id(),
238 		.nb_queue_pairs = 1,
239 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
240 		.max_nb_streams = 0
241 	};
242 
243 	if (rte_compressdev_configure(0, &config) < 0) {
244 		RTE_LOG(ERR, USER1, "Device configuration failed\n");
245 		return -1;
246 	}
247 
248 	if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
249 			rte_socket_id()) < 0) {
250 		RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
251 		return -1;
252 	}
253 
254 	if (rte_compressdev_start(0) < 0) {
255 		RTE_LOG(ERR, USER1, "Device could not be started\n");
256 		return -1;
257 	}
258 
259 	return 0;
260 }
261 
262 static void
263 generic_ut_teardown(void)
264 {
265 	rte_compressdev_stop(0);
266 	if (rte_compressdev_close(0) < 0)
267 		RTE_LOG(ERR, USER1, "Device could not be closed\n");
268 }
269 
270 static int
271 test_compressdev_invalid_configuration(void)
272 {
273 	struct rte_compressdev_config invalid_config;
274 	struct rte_compressdev_config valid_config = {
275 		.socket_id = rte_socket_id(),
276 		.nb_queue_pairs = 1,
277 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
278 		.max_nb_streams = 0
279 	};
280 	struct rte_compressdev_info dev_info;
281 
282 	/* Invalid configuration with 0 queue pairs */
283 	memcpy(&invalid_config, &valid_config,
284 			sizeof(struct rte_compressdev_config));
285 	invalid_config.nb_queue_pairs = 0;
286 
287 	TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
288 			"Device configuration was successful "
289 			"with no queue pairs (invalid)\n");
290 
291 	/*
292 	 * Invalid configuration with too many queue pairs
293 	 * (if there is an actual maximum number of queue pairs)
294 	 */
295 	rte_compressdev_info_get(0, &dev_info);
296 	if (dev_info.max_nb_queue_pairs != 0) {
297 		memcpy(&invalid_config, &valid_config,
298 			sizeof(struct rte_compressdev_config));
299 		invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
300 
301 		TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
302 				"Device configuration was successful "
303 				"with too many queue pairs (invalid)\n");
304 	}
305 
306 	/* Invalid queue pair setup, with no number of queue pairs set */
307 	TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
308 				NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
309 			"Queue pair setup was successful "
310 			"with no queue pairs set (invalid)\n");
311 
312 	return TEST_SUCCESS;
313 }
314 
315 static int
316 compare_buffers(const char *buffer1, uint32_t buffer1_len,
317 		const char *buffer2, uint32_t buffer2_len)
318 {
319 	if (buffer1_len != buffer2_len) {
320 		RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
321 		return -1;
322 	}
323 
324 	if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
325 		RTE_LOG(ERR, USER1, "Buffers are different\n");
326 		return -1;
327 	}
328 
329 	return 0;
330 }
331 
332 /*
333  * Maps compressdev and Zlib flush flags
334  */
335 static int
336 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
337 {
338 	switch (flag) {
339 	case RTE_COMP_FLUSH_NONE:
340 		return Z_NO_FLUSH;
341 	case RTE_COMP_FLUSH_SYNC:
342 		return Z_SYNC_FLUSH;
343 	case RTE_COMP_FLUSH_FULL:
344 		return Z_FULL_FLUSH;
345 	case RTE_COMP_FLUSH_FINAL:
346 		return Z_FINISH;
347 	/*
348 	 * There should be only the values above,
349 	 * so this should never happen
350 	 */
351 	default:
352 		return -1;
353 	}
354 }
355 
356 static int
357 compress_zlib(struct rte_comp_op *op,
358 		const struct rte_comp_xform *xform, int mem_level)
359 {
360 	z_stream stream;
361 	int zlib_flush;
362 	int strategy, window_bits, comp_level;
363 	int ret = TEST_FAILED;
364 	uint8_t *single_src_buf = NULL;
365 	uint8_t *single_dst_buf = NULL;
366 
367 	/* initialize zlib stream */
368 	stream.zalloc = Z_NULL;
369 	stream.zfree = Z_NULL;
370 	stream.opaque = Z_NULL;
371 
372 	if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
373 		strategy = Z_FIXED;
374 	else
375 		strategy = Z_DEFAULT_STRATEGY;
376 
377 	/*
378 	 * Window bits is the base two logarithm of the window size (in bytes).
379 	 * When doing raw DEFLATE, this number will be negative.
380 	 */
381 	window_bits = -(xform->compress.window_size);
382 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
383 		window_bits *= -1;
384 	else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
385 		window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
386 
387 	comp_level = xform->compress.level;
388 
389 	if (comp_level != RTE_COMP_LEVEL_NONE)
390 		ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
391 			window_bits, mem_level, strategy);
392 	else
393 		ret = deflateInit(&stream, Z_NO_COMPRESSION);
394 
395 	if (ret != Z_OK) {
396 		printf("Zlib deflate could not be initialized\n");
397 		goto exit;
398 	}
399 
400 	/* Assuming stateless operation */
401 	/* SGL Input */
402 	if (op->m_src->nb_segs > 1) {
403 		single_src_buf = rte_malloc(NULL,
404 				rte_pktmbuf_pkt_len(op->m_src), 0);
405 		if (single_src_buf == NULL) {
406 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
407 			goto exit;
408 		}
409 
410 		if (rte_pktmbuf_read(op->m_src, op->src.offset,
411 					rte_pktmbuf_pkt_len(op->m_src) -
412 					op->src.offset,
413 					single_src_buf) == NULL) {
414 			RTE_LOG(ERR, USER1,
415 				"Buffer could not be read entirely\n");
416 			goto exit;
417 		}
418 
419 		stream.avail_in = op->src.length;
420 		stream.next_in = single_src_buf;
421 
422 	} else {
423 		stream.avail_in = op->src.length;
424 		stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
425 				op->src.offset);
426 	}
427 	/* SGL output */
428 	if (op->m_dst->nb_segs > 1) {
429 
430 		single_dst_buf = rte_malloc(NULL,
431 				rte_pktmbuf_pkt_len(op->m_dst), 0);
432 			if (single_dst_buf == NULL) {
433 				RTE_LOG(ERR, USER1,
434 					"Buffer could not be allocated\n");
435 			goto exit;
436 		}
437 
438 		stream.avail_out = op->m_dst->pkt_len;
439 		stream.next_out = single_dst_buf;
440 
441 	} else {/* linear output */
442 		stream.avail_out = op->m_dst->data_len;
443 		stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
444 				op->dst.offset);
445 	}
446 
447 	/* Stateless operation, all buffer will be compressed in one go */
448 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
449 	ret = deflate(&stream, zlib_flush);
450 
451 	if (stream.avail_in != 0) {
452 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
453 		goto exit;
454 	}
455 
456 	if (ret != Z_STREAM_END)
457 		goto exit;
458 
459 	/* Copy data to destination SGL */
460 	if (op->m_dst->nb_segs > 1) {
461 		uint32_t remaining_data = stream.total_out;
462 		uint8_t *src_data = single_dst_buf;
463 		struct rte_mbuf *dst_buf = op->m_dst;
464 
465 		while (remaining_data > 0) {
466 			uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
467 						uint8_t *, op->dst.offset);
468 			/* Last segment */
469 			if (remaining_data < dst_buf->data_len) {
470 				memcpy(dst_data, src_data, remaining_data);
471 				remaining_data = 0;
472 			} else {
473 				memcpy(dst_data, src_data, dst_buf->data_len);
474 				remaining_data -= dst_buf->data_len;
475 				src_data += dst_buf->data_len;
476 				dst_buf = dst_buf->next;
477 			}
478 		}
479 	}
480 
481 	op->consumed = stream.total_in;
482 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
483 		rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
484 		rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
485 		op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
486 				ZLIB_TRAILER_SIZE);
487 	} else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
488 		rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
489 		rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
490 		op->produced = stream.total_out - (GZIP_HEADER_SIZE +
491 				GZIP_TRAILER_SIZE);
492 	} else
493 		op->produced = stream.total_out;
494 
495 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
496 	op->output_chksum = stream.adler;
497 
498 	deflateReset(&stream);
499 
500 	ret = 0;
501 exit:
502 	deflateEnd(&stream);
503 	rte_free(single_src_buf);
504 	rte_free(single_dst_buf);
505 
506 	return ret;
507 }
508 
509 static int
510 decompress_zlib(struct rte_comp_op *op,
511 		const struct rte_comp_xform *xform)
512 {
513 	z_stream stream;
514 	int window_bits;
515 	int zlib_flush;
516 	int ret = TEST_FAILED;
517 	uint8_t *single_src_buf = NULL;
518 	uint8_t *single_dst_buf = NULL;
519 
520 	/* initialize zlib stream */
521 	stream.zalloc = Z_NULL;
522 	stream.zfree = Z_NULL;
523 	stream.opaque = Z_NULL;
524 
525 	/*
526 	 * Window bits is the base two logarithm of the window size (in bytes).
527 	 * When doing raw DEFLATE, this number will be negative.
528 	 */
529 	window_bits = -(xform->decompress.window_size);
530 	ret = inflateInit2(&stream, window_bits);
531 
532 	if (ret != Z_OK) {
533 		printf("Zlib deflate could not be initialized\n");
534 		goto exit;
535 	}
536 
537 	/* Assuming stateless operation */
538 	/* SGL */
539 	if (op->m_src->nb_segs > 1) {
540 		single_src_buf = rte_malloc(NULL,
541 				rte_pktmbuf_pkt_len(op->m_src), 0);
542 		if (single_src_buf == NULL) {
543 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
544 			goto exit;
545 		}
546 		single_dst_buf = rte_malloc(NULL,
547 				rte_pktmbuf_pkt_len(op->m_dst), 0);
548 		if (single_dst_buf == NULL) {
549 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
550 			goto exit;
551 		}
552 		if (rte_pktmbuf_read(op->m_src, 0,
553 					rte_pktmbuf_pkt_len(op->m_src),
554 					single_src_buf) == NULL) {
555 			RTE_LOG(ERR, USER1,
556 				"Buffer could not be read entirely\n");
557 			goto exit;
558 		}
559 
560 		stream.avail_in = op->src.length;
561 		stream.next_in = single_src_buf;
562 		stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
563 		stream.next_out = single_dst_buf;
564 
565 	} else {
566 		stream.avail_in = op->src.length;
567 		stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
568 		stream.avail_out = op->m_dst->data_len;
569 		stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
570 	}
571 
572 	/* Stateless operation, all buffer will be compressed in one go */
573 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
574 	ret = inflate(&stream, zlib_flush);
575 
576 	if (stream.avail_in != 0) {
577 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
578 		goto exit;
579 	}
580 
581 	if (ret != Z_STREAM_END)
582 		goto exit;
583 
584 	if (op->m_src->nb_segs > 1) {
585 		uint32_t remaining_data = stream.total_out;
586 		uint8_t *src_data = single_dst_buf;
587 		struct rte_mbuf *dst_buf = op->m_dst;
588 
589 		while (remaining_data > 0) {
590 			uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
591 					uint8_t *);
592 			/* Last segment */
593 			if (remaining_data < dst_buf->data_len) {
594 				memcpy(dst_data, src_data, remaining_data);
595 				remaining_data = 0;
596 			} else {
597 				memcpy(dst_data, src_data, dst_buf->data_len);
598 				remaining_data -= dst_buf->data_len;
599 				src_data += dst_buf->data_len;
600 				dst_buf = dst_buf->next;
601 			}
602 		}
603 	}
604 
605 	op->consumed = stream.total_in;
606 	op->produced = stream.total_out;
607 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
608 
609 	inflateReset(&stream);
610 
611 	ret = 0;
612 exit:
613 	inflateEnd(&stream);
614 
615 	return ret;
616 }
617 
618 static int
619 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
620 		uint32_t total_data_size,
621 		struct rte_mempool *small_mbuf_pool,
622 		struct rte_mempool *large_mbuf_pool,
623 		uint8_t limit_segs_in_sgl,
624 		uint16_t seg_size)
625 {
626 	uint32_t remaining_data = total_data_size;
627 	uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
628 	struct rte_mempool *pool;
629 	struct rte_mbuf *next_seg;
630 	uint32_t data_size;
631 	char *buf_ptr;
632 	const char *data_ptr = test_buf;
633 	uint16_t i;
634 	int ret;
635 
636 	if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
637 		num_remaining_segs = limit_segs_in_sgl - 1;
638 
639 	/*
640 	 * Allocate data in the first segment (header) and
641 	 * copy data if test buffer is provided
642 	 */
643 	if (remaining_data < seg_size)
644 		data_size = remaining_data;
645 	else
646 		data_size = seg_size;
647 	buf_ptr = rte_pktmbuf_append(head_buf, data_size);
648 	if (buf_ptr == NULL) {
649 		RTE_LOG(ERR, USER1,
650 			"Not enough space in the 1st buffer\n");
651 		return -1;
652 	}
653 
654 	if (data_ptr != NULL) {
655 		/* Copy characters without NULL terminator */
656 		strncpy(buf_ptr, data_ptr, data_size);
657 		data_ptr += data_size;
658 	}
659 	remaining_data -= data_size;
660 	num_remaining_segs--;
661 
662 	/*
663 	 * Allocate the rest of the segments,
664 	 * copy the rest of the data and chain the segments.
665 	 */
666 	for (i = 0; i < num_remaining_segs; i++) {
667 
668 		if (i == (num_remaining_segs - 1)) {
669 			/* last segment */
670 			if (remaining_data > seg_size)
671 				pool = large_mbuf_pool;
672 			else
673 				pool = small_mbuf_pool;
674 			data_size = remaining_data;
675 		} else {
676 			data_size = seg_size;
677 			pool = small_mbuf_pool;
678 		}
679 
680 		next_seg = rte_pktmbuf_alloc(pool);
681 		if (next_seg == NULL) {
682 			RTE_LOG(ERR, USER1,
683 				"New segment could not be allocated "
684 				"from the mempool\n");
685 			return -1;
686 		}
687 		buf_ptr = rte_pktmbuf_append(next_seg, data_size);
688 		if (buf_ptr == NULL) {
689 			RTE_LOG(ERR, USER1,
690 				"Not enough space in the buffer\n");
691 			rte_pktmbuf_free(next_seg);
692 			return -1;
693 		}
694 		if (data_ptr != NULL) {
695 			/* Copy characters without NULL terminator */
696 			strncpy(buf_ptr, data_ptr, data_size);
697 			data_ptr += data_size;
698 		}
699 		remaining_data -= data_size;
700 
701 		ret = rte_pktmbuf_chain(head_buf, next_seg);
702 		if (ret != 0) {
703 			rte_pktmbuf_free(next_seg);
704 			RTE_LOG(ERR, USER1,
705 				"Segment could not chained\n");
706 			return -1;
707 		}
708 	}
709 
710 	return 0;
711 }
712 
713 /*
714  * Compresses and decompresses buffer with compressdev API and Zlib API
715  */
716 static int
717 test_deflate_comp_decomp(const struct interim_data_params *int_data,
718 		const struct test_data_params *test_data)
719 {
720 	struct comp_testsuite_params *ts_params = &testsuite_params;
721 	const char * const *test_bufs = int_data->test_bufs;
722 	unsigned int num_bufs = int_data->num_bufs;
723 	uint16_t *buf_idx = int_data->buf_idx;
724 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
725 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
726 	unsigned int num_xforms = int_data->num_xforms;
727 	enum rte_comp_op_type state = test_data->state;
728 	unsigned int buff_type = test_data->buff_type;
729 	unsigned int out_of_space = test_data->out_of_space;
730 	unsigned int big_data = test_data->big_data;
731 	enum zlib_direction zlib_dir = test_data->zlib_dir;
732 	int ret_status = -1;
733 	int ret;
734 	struct rte_mbuf *uncomp_bufs[num_bufs];
735 	struct rte_mbuf *comp_bufs[num_bufs];
736 	struct rte_comp_op *ops[num_bufs];
737 	struct rte_comp_op *ops_processed[num_bufs];
738 	void *priv_xforms[num_bufs];
739 	uint16_t num_enqd, num_deqd, num_total_deqd;
740 	uint16_t num_priv_xforms = 0;
741 	unsigned int deqd_retries = 0;
742 	struct priv_op_data *priv_data;
743 	char *buf_ptr;
744 	unsigned int i;
745 	struct rte_mempool *buf_pool;
746 	uint32_t data_size;
747 	/* Compressing with CompressDev */
748 	unsigned int oos_zlib_decompress =
749 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
750 	/* Decompressing with CompressDev */
751 	unsigned int oos_zlib_compress =
752 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
753 	const struct rte_compressdev_capabilities *capa =
754 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
755 	char *contig_buf = NULL;
756 	uint64_t compress_checksum[num_bufs];
757 
758 	if (capa == NULL) {
759 		RTE_LOG(ERR, USER1,
760 			"Compress device does not support DEFLATE\n");
761 		return -1;
762 	}
763 
764 	/* Initialize all arrays to NULL */
765 	memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
766 	memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
767 	memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
768 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
769 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
770 
771 	if (big_data)
772 		buf_pool = ts_params->big_mbuf_pool;
773 	else if (buff_type == SGL_BOTH)
774 		buf_pool = ts_params->small_mbuf_pool;
775 	else
776 		buf_pool = ts_params->large_mbuf_pool;
777 
778 	/* Prepare the source mbufs with the data */
779 	ret = rte_pktmbuf_alloc_bulk(buf_pool,
780 				uncomp_bufs, num_bufs);
781 	if (ret < 0) {
782 		RTE_LOG(ERR, USER1,
783 			"Source mbufs could not be allocated "
784 			"from the mempool\n");
785 		goto exit;
786 	}
787 
788 	if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
789 		for (i = 0; i < num_bufs; i++) {
790 			data_size = strlen(test_bufs[i]) + 1;
791 			if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
792 			    data_size,
793 			    big_data ? buf_pool : ts_params->small_mbuf_pool,
794 			    big_data ? buf_pool : ts_params->large_mbuf_pool,
795 			    big_data ? 0 : MAX_SEGS,
796 			    big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
797 				goto exit;
798 		}
799 	} else {
800 		for (i = 0; i < num_bufs; i++) {
801 			data_size = strlen(test_bufs[i]) + 1;
802 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
803 			strlcpy(buf_ptr, test_bufs[i], data_size);
804 		}
805 	}
806 
807 	/* Prepare the destination mbufs */
808 	ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
809 	if (ret < 0) {
810 		RTE_LOG(ERR, USER1,
811 			"Destination mbufs could not be allocated "
812 			"from the mempool\n");
813 		goto exit;
814 	}
815 
816 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
817 		for (i = 0; i < num_bufs; i++) {
818 			if (out_of_space == 1 && oos_zlib_decompress)
819 				data_size = OUT_OF_SPACE_BUF;
820 			else
821 				(data_size = strlen(test_bufs[i]) *
822 					COMPRESS_BUF_SIZE_RATIO);
823 
824 			if (prepare_sgl_bufs(NULL, comp_bufs[i],
825 			      data_size,
826 			      big_data ? buf_pool : ts_params->small_mbuf_pool,
827 			      big_data ? buf_pool : ts_params->large_mbuf_pool,
828 			      big_data ? 0 : MAX_SEGS,
829 			      big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
830 					< 0)
831 				goto exit;
832 		}
833 
834 	} else {
835 		for (i = 0; i < num_bufs; i++) {
836 			if (out_of_space == 1 && oos_zlib_decompress)
837 				data_size = OUT_OF_SPACE_BUF;
838 			else
839 				(data_size = strlen(test_bufs[i]) *
840 					COMPRESS_BUF_SIZE_RATIO);
841 
842 			rte_pktmbuf_append(comp_bufs[i], data_size);
843 		}
844 	}
845 
846 	/* Build the compression operations */
847 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
848 	if (ret < 0) {
849 		RTE_LOG(ERR, USER1,
850 			"Compress operations could not be allocated "
851 			"from the mempool\n");
852 		goto exit;
853 	}
854 
855 
856 	for (i = 0; i < num_bufs; i++) {
857 		ops[i]->m_src = uncomp_bufs[i];
858 		ops[i]->m_dst = comp_bufs[i];
859 		ops[i]->src.offset = 0;
860 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
861 		ops[i]->dst.offset = 0;
862 		if (state == RTE_COMP_OP_STATELESS) {
863 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
864 		} else {
865 			RTE_LOG(ERR, USER1,
866 				"Stateful operations are not supported "
867 				"in these tests yet\n");
868 			goto exit;
869 		}
870 		ops[i]->input_chksum = 0;
871 		/*
872 		 * Store original operation index in private data,
873 		 * since ordering does not have to be maintained,
874 		 * when dequeueing from compressdev, so a comparison
875 		 * at the end of the test can be done.
876 		 */
877 		priv_data = (struct priv_op_data *) (ops[i] + 1);
878 		priv_data->orig_idx = i;
879 	}
880 
881 	/* Compress data (either with Zlib API or compressdev API */
882 	if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
883 		for (i = 0; i < num_bufs; i++) {
884 			const struct rte_comp_xform *compress_xform =
885 				compress_xforms[i % num_xforms];
886 			ret = compress_zlib(ops[i], compress_xform,
887 					DEFAULT_MEM_LEVEL);
888 			if (ret < 0)
889 				goto exit;
890 
891 			ops_processed[i] = ops[i];
892 		}
893 	} else {
894 		/* Create compress private xform data */
895 		for (i = 0; i < num_xforms; i++) {
896 			ret = rte_compressdev_private_xform_create(0,
897 				(const struct rte_comp_xform *)compress_xforms[i],
898 				&priv_xforms[i]);
899 			if (ret < 0) {
900 				RTE_LOG(ERR, USER1,
901 					"Compression private xform "
902 					"could not be created\n");
903 				goto exit;
904 			}
905 			num_priv_xforms++;
906 		}
907 
908 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
909 			/* Attach shareable private xform data to ops */
910 			for (i = 0; i < num_bufs; i++)
911 				ops[i]->private_xform = priv_xforms[i % num_xforms];
912 		} else {
913 			/* Create rest of the private xforms for the other ops */
914 			for (i = num_xforms; i < num_bufs; i++) {
915 				ret = rte_compressdev_private_xform_create(0,
916 					compress_xforms[i % num_xforms],
917 					&priv_xforms[i]);
918 				if (ret < 0) {
919 					RTE_LOG(ERR, USER1,
920 						"Compression private xform "
921 						"could not be created\n");
922 					goto exit;
923 				}
924 				num_priv_xforms++;
925 			}
926 
927 			/* Attach non shareable private xform data to ops */
928 			for (i = 0; i < num_bufs; i++)
929 				ops[i]->private_xform = priv_xforms[i];
930 		}
931 
932 		/* Enqueue and dequeue all operations */
933 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
934 		if (num_enqd < num_bufs) {
935 			RTE_LOG(ERR, USER1,
936 				"The operations could not be enqueued\n");
937 			goto exit;
938 		}
939 
940 		num_total_deqd = 0;
941 		do {
942 			/*
943 			 * If retrying a dequeue call, wait for 10 ms to allow
944 			 * enough time to the driver to process the operations
945 			 */
946 			if (deqd_retries != 0) {
947 				/*
948 				 * Avoid infinite loop if not all the
949 				 * operations get out of the device
950 				 */
951 				if (deqd_retries == MAX_DEQD_RETRIES) {
952 					RTE_LOG(ERR, USER1,
953 						"Not all operations could be "
954 						"dequeued\n");
955 					goto exit;
956 				}
957 				usleep(DEQUEUE_WAIT_TIME);
958 			}
959 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
960 					&ops_processed[num_total_deqd], num_bufs);
961 			num_total_deqd += num_deqd;
962 			deqd_retries++;
963 
964 		} while (num_total_deqd < num_enqd);
965 
966 		deqd_retries = 0;
967 
968 		/* Free compress private xforms */
969 		for (i = 0; i < num_priv_xforms; i++) {
970 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
971 			priv_xforms[i] = NULL;
972 		}
973 		num_priv_xforms = 0;
974 	}
975 
976 	for (i = 0; i < num_bufs; i++) {
977 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
978 		uint16_t xform_idx = priv_data->orig_idx % num_xforms;
979 		const struct rte_comp_compress_xform *compress_xform =
980 				&compress_xforms[xform_idx]->compress;
981 		enum rte_comp_huffman huffman_type =
982 			compress_xform->deflate.huffman;
983 		char engine[] = "zlib (directly, not PMD)";
984 		if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
985 			strlcpy(engine, "PMD", sizeof(engine));
986 
987 		RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
988 			" %u bytes (level = %d, huffman = %s)\n",
989 			buf_idx[priv_data->orig_idx], engine,
990 			ops_processed[i]->consumed, ops_processed[i]->produced,
991 			compress_xform->level,
992 			huffman_type_strings[huffman_type]);
993 		RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
994 			ops_processed[i]->consumed == 0 ? 0 :
995 			(float)ops_processed[i]->produced /
996 			ops_processed[i]->consumed * 100);
997 		if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
998 			compress_checksum[i] = ops_processed[i]->output_chksum;
999 		ops[i] = NULL;
1000 	}
1001 
1002 	/*
1003 	 * Check operation status and free source mbufs (destination mbuf and
1004 	 * compress operation information is needed for the decompression stage)
1005 	 */
1006 	for (i = 0; i < num_bufs; i++) {
1007 		if (out_of_space && oos_zlib_decompress) {
1008 			if (ops_processed[i]->status !=
1009 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1010 				ret_status = -1;
1011 
1012 				RTE_LOG(ERR, USER1,
1013 					"Operation without expected out of "
1014 					"space status error\n");
1015 				goto exit;
1016 			} else
1017 				continue;
1018 		}
1019 
1020 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1021 			RTE_LOG(ERR, USER1,
1022 				"Some operations were not successful\n");
1023 			goto exit;
1024 		}
1025 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1026 		rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1027 		uncomp_bufs[priv_data->orig_idx] = NULL;
1028 	}
1029 
1030 	if (out_of_space && oos_zlib_decompress) {
1031 		ret_status = 0;
1032 		goto exit;
1033 	}
1034 
1035 	/* Allocate buffers for decompressed data */
1036 	ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1037 	if (ret < 0) {
1038 		RTE_LOG(ERR, USER1,
1039 			"Destination mbufs could not be allocated "
1040 			"from the mempool\n");
1041 		goto exit;
1042 	}
1043 
1044 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1045 		for (i = 0; i < num_bufs; i++) {
1046 			priv_data = (struct priv_op_data *)
1047 					(ops_processed[i] + 1);
1048 			if (out_of_space == 1 && oos_zlib_compress)
1049 				data_size = OUT_OF_SPACE_BUF;
1050 			else
1051 				data_size =
1052 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1053 
1054 			if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1055 			       data_size,
1056 			       big_data ? buf_pool : ts_params->small_mbuf_pool,
1057 			       big_data ? buf_pool : ts_params->large_mbuf_pool,
1058 			       big_data ? 0 : MAX_SEGS,
1059 			       big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1060 					< 0)
1061 				goto exit;
1062 		}
1063 
1064 	} else {
1065 		for (i = 0; i < num_bufs; i++) {
1066 			priv_data = (struct priv_op_data *)
1067 					(ops_processed[i] + 1);
1068 			if (out_of_space == 1 && oos_zlib_compress)
1069 				data_size = OUT_OF_SPACE_BUF;
1070 			else
1071 				data_size =
1072 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1073 
1074 			rte_pktmbuf_append(uncomp_bufs[i], data_size);
1075 		}
1076 	}
1077 
1078 	/* Build the decompression operations */
1079 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1080 	if (ret < 0) {
1081 		RTE_LOG(ERR, USER1,
1082 			"Decompress operations could not be allocated "
1083 			"from the mempool\n");
1084 		goto exit;
1085 	}
1086 
1087 	/* Source buffer is the compressed data from the previous operations */
1088 	for (i = 0; i < num_bufs; i++) {
1089 		ops[i]->m_src = ops_processed[i]->m_dst;
1090 		ops[i]->m_dst = uncomp_bufs[i];
1091 		ops[i]->src.offset = 0;
1092 		/*
1093 		 * Set the length of the compressed data to the
1094 		 * number of bytes that were produced in the previous stage
1095 		 */
1096 		ops[i]->src.length = ops_processed[i]->produced;
1097 		ops[i]->dst.offset = 0;
1098 		if (state == RTE_COMP_OP_STATELESS) {
1099 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1100 		} else {
1101 			RTE_LOG(ERR, USER1,
1102 				"Stateful operations are not supported "
1103 				"in these tests yet\n");
1104 			goto exit;
1105 		}
1106 		ops[i]->input_chksum = 0;
1107 		/*
1108 		 * Copy private data from previous operations,
1109 		 * to keep the pointer to the original buffer
1110 		 */
1111 		memcpy(ops[i] + 1, ops_processed[i] + 1,
1112 				sizeof(struct priv_op_data));
1113 	}
1114 
1115 	/*
1116 	 * Free the previous compress operations,
1117 	 * as they are not needed anymore
1118 	 */
1119 	rte_comp_op_bulk_free(ops_processed, num_bufs);
1120 
1121 	/* Decompress data (either with Zlib API or compressdev API */
1122 	if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1123 		for (i = 0; i < num_bufs; i++) {
1124 			priv_data = (struct priv_op_data *)(ops[i] + 1);
1125 			uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1126 			const struct rte_comp_xform *decompress_xform =
1127 				decompress_xforms[xform_idx];
1128 
1129 			ret = decompress_zlib(ops[i], decompress_xform);
1130 			if (ret < 0)
1131 				goto exit;
1132 
1133 			ops_processed[i] = ops[i];
1134 		}
1135 	} else {
1136 		/* Create decompress private xform data */
1137 		for (i = 0; i < num_xforms; i++) {
1138 			ret = rte_compressdev_private_xform_create(0,
1139 				(const struct rte_comp_xform *)decompress_xforms[i],
1140 				&priv_xforms[i]);
1141 			if (ret < 0) {
1142 				RTE_LOG(ERR, USER1,
1143 					"Decompression private xform "
1144 					"could not be created\n");
1145 				goto exit;
1146 			}
1147 			num_priv_xforms++;
1148 		}
1149 
1150 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1151 			/* Attach shareable private xform data to ops */
1152 			for (i = 0; i < num_bufs; i++) {
1153 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1154 				uint16_t xform_idx = priv_data->orig_idx %
1155 								num_xforms;
1156 				ops[i]->private_xform = priv_xforms[xform_idx];
1157 			}
1158 		} else {
1159 			/* Create rest of the private xforms for the other ops */
1160 			for (i = num_xforms; i < num_bufs; i++) {
1161 				ret = rte_compressdev_private_xform_create(0,
1162 					decompress_xforms[i % num_xforms],
1163 					&priv_xforms[i]);
1164 				if (ret < 0) {
1165 					RTE_LOG(ERR, USER1,
1166 						"Decompression private xform "
1167 						"could not be created\n");
1168 					goto exit;
1169 				}
1170 				num_priv_xforms++;
1171 			}
1172 
1173 			/* Attach non shareable private xform data to ops */
1174 			for (i = 0; i < num_bufs; i++) {
1175 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1176 				uint16_t xform_idx = priv_data->orig_idx;
1177 				ops[i]->private_xform = priv_xforms[xform_idx];
1178 			}
1179 		}
1180 
1181 		/* Enqueue and dequeue all operations */
1182 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1183 		if (num_enqd < num_bufs) {
1184 			RTE_LOG(ERR, USER1,
1185 				"The operations could not be enqueued\n");
1186 			goto exit;
1187 		}
1188 
1189 		num_total_deqd = 0;
1190 		do {
1191 			/*
1192 			 * If retrying a dequeue call, wait for 10 ms to allow
1193 			 * enough time to the driver to process the operations
1194 			 */
1195 			if (deqd_retries != 0) {
1196 				/*
1197 				 * Avoid infinite loop if not all the
1198 				 * operations get out of the device
1199 				 */
1200 				if (deqd_retries == MAX_DEQD_RETRIES) {
1201 					RTE_LOG(ERR, USER1,
1202 						"Not all operations could be "
1203 						"dequeued\n");
1204 					goto exit;
1205 				}
1206 				usleep(DEQUEUE_WAIT_TIME);
1207 			}
1208 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
1209 					&ops_processed[num_total_deqd], num_bufs);
1210 			num_total_deqd += num_deqd;
1211 			deqd_retries++;
1212 		} while (num_total_deqd < num_enqd);
1213 
1214 		deqd_retries = 0;
1215 	}
1216 
1217 	for (i = 0; i < num_bufs; i++) {
1218 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1219 		char engine[] = "zlib, (directly, no PMD)";
1220 		if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1221 			strlcpy(engine, "pmd", sizeof(engine));
1222 		RTE_LOG(DEBUG, USER1,
1223 			"Buffer %u decompressed by %s from %u to %u bytes\n",
1224 			buf_idx[priv_data->orig_idx], engine,
1225 			ops_processed[i]->consumed, ops_processed[i]->produced);
1226 		ops[i] = NULL;
1227 	}
1228 
1229 	/*
1230 	 * Check operation status and free source mbuf (destination mbuf and
1231 	 * compress operation information is still needed)
1232 	 */
1233 	for (i = 0; i < num_bufs; i++) {
1234 		if (out_of_space && oos_zlib_compress) {
1235 			if (ops_processed[i]->status !=
1236 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1237 				ret_status = -1;
1238 
1239 				RTE_LOG(ERR, USER1,
1240 					"Operation without expected out of "
1241 					"space status error\n");
1242 				goto exit;
1243 			} else
1244 				continue;
1245 		}
1246 
1247 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1248 			RTE_LOG(ERR, USER1,
1249 				"Some operations were not successful\n");
1250 			goto exit;
1251 		}
1252 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1253 		rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1254 		comp_bufs[priv_data->orig_idx] = NULL;
1255 	}
1256 
1257 	if (out_of_space && oos_zlib_compress) {
1258 		ret_status = 0;
1259 		goto exit;
1260 	}
1261 
1262 	/*
1263 	 * Compare the original stream with the decompressed stream
1264 	 * (in size and the data)
1265 	 */
1266 	for (i = 0; i < num_bufs; i++) {
1267 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1268 		const char *buf1 = test_bufs[priv_data->orig_idx];
1269 		const char *buf2;
1270 		contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1271 		if (contig_buf == NULL) {
1272 			RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1273 					"be allocated\n");
1274 			goto exit;
1275 		}
1276 
1277 		buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1278 				ops_processed[i]->produced, contig_buf);
1279 		if (compare_buffers(buf1, strlen(buf1) + 1,
1280 				buf2, ops_processed[i]->produced) < 0)
1281 			goto exit;
1282 
1283 		/* Test checksums */
1284 		if (compress_xforms[0]->compress.chksum !=
1285 				RTE_COMP_CHECKSUM_NONE) {
1286 			if (ops_processed[i]->output_chksum !=
1287 					compress_checksum[i]) {
1288 				RTE_LOG(ERR, USER1, "The checksums differ\n"
1289 			"Compression Checksum: %" PRIu64 "\tDecompression "
1290 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1291 			ops_processed[i]->output_chksum);
1292 				goto exit;
1293 			}
1294 		}
1295 
1296 		rte_free(contig_buf);
1297 		contig_buf = NULL;
1298 	}
1299 
1300 	ret_status = 0;
1301 
1302 exit:
1303 	/* Free resources */
1304 	for (i = 0; i < num_bufs; i++) {
1305 		rte_pktmbuf_free(uncomp_bufs[i]);
1306 		rte_pktmbuf_free(comp_bufs[i]);
1307 		rte_comp_op_free(ops[i]);
1308 		rte_comp_op_free(ops_processed[i]);
1309 	}
1310 	for (i = 0; i < num_priv_xforms; i++) {
1311 		if (priv_xforms[i] != NULL)
1312 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1313 	}
1314 	rte_free(contig_buf);
1315 
1316 	return ret_status;
1317 }
1318 
1319 static int
1320 test_compressdev_deflate_stateless_fixed(void)
1321 {
1322 	struct comp_testsuite_params *ts_params = &testsuite_params;
1323 	uint16_t i;
1324 	int ret;
1325 	const struct rte_compressdev_capabilities *capab;
1326 
1327 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1328 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1329 
1330 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1331 		return -ENOTSUP;
1332 
1333 	struct rte_comp_xform *compress_xform =
1334 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1335 
1336 	if (compress_xform == NULL) {
1337 		RTE_LOG(ERR, USER1,
1338 			"Compress xform could not be created\n");
1339 		ret = TEST_FAILED;
1340 		goto exit;
1341 	}
1342 
1343 	memcpy(compress_xform, ts_params->def_comp_xform,
1344 			sizeof(struct rte_comp_xform));
1345 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1346 
1347 	struct interim_data_params int_data = {
1348 		NULL,
1349 		1,
1350 		NULL,
1351 		&compress_xform,
1352 		&ts_params->def_decomp_xform,
1353 		1
1354 	};
1355 
1356 	struct test_data_params test_data = {
1357 		RTE_COMP_OP_STATELESS,
1358 		LB_BOTH,
1359 		ZLIB_DECOMPRESS,
1360 		0,
1361 		0
1362 	};
1363 
1364 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1365 		int_data.test_bufs = &compress_test_bufs[i];
1366 		int_data.buf_idx = &i;
1367 
1368 		/* Compress with compressdev, decompress with Zlib */
1369 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1370 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1371 			ret = TEST_FAILED;
1372 			goto exit;
1373 		}
1374 
1375 		/* Compress with Zlib, decompress with compressdev */
1376 		test_data.zlib_dir = ZLIB_COMPRESS;
1377 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1378 			ret = TEST_FAILED;
1379 			goto exit;
1380 		}
1381 	}
1382 
1383 	ret = TEST_SUCCESS;
1384 
1385 exit:
1386 	rte_free(compress_xform);
1387 	return ret;
1388 }
1389 
1390 static int
1391 test_compressdev_deflate_stateless_dynamic(void)
1392 {
1393 	struct comp_testsuite_params *ts_params = &testsuite_params;
1394 	uint16_t i;
1395 	int ret;
1396 	struct rte_comp_xform *compress_xform =
1397 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1398 
1399 	const struct rte_compressdev_capabilities *capab;
1400 
1401 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1402 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1403 
1404 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1405 		return -ENOTSUP;
1406 
1407 	if (compress_xform == NULL) {
1408 		RTE_LOG(ERR, USER1,
1409 			"Compress xform could not be created\n");
1410 		ret = TEST_FAILED;
1411 		goto exit;
1412 	}
1413 
1414 	memcpy(compress_xform, ts_params->def_comp_xform,
1415 			sizeof(struct rte_comp_xform));
1416 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1417 
1418 	struct interim_data_params int_data = {
1419 		NULL,
1420 		1,
1421 		NULL,
1422 		&compress_xform,
1423 		&ts_params->def_decomp_xform,
1424 		1
1425 	};
1426 
1427 	struct test_data_params test_data = {
1428 		RTE_COMP_OP_STATELESS,
1429 		LB_BOTH,
1430 		ZLIB_DECOMPRESS,
1431 		0,
1432 		0
1433 	};
1434 
1435 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1436 		int_data.test_bufs = &compress_test_bufs[i];
1437 		int_data.buf_idx = &i;
1438 
1439 		/* Compress with compressdev, decompress with Zlib */
1440 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1441 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1442 			ret = TEST_FAILED;
1443 			goto exit;
1444 		}
1445 
1446 		/* Compress with Zlib, decompress with compressdev */
1447 		test_data.zlib_dir = ZLIB_COMPRESS;
1448 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1449 			ret = TEST_FAILED;
1450 			goto exit;
1451 		}
1452 	}
1453 
1454 	ret = TEST_SUCCESS;
1455 
1456 exit:
1457 	rte_free(compress_xform);
1458 	return ret;
1459 }
1460 
1461 static int
1462 test_compressdev_deflate_stateless_multi_op(void)
1463 {
1464 	struct comp_testsuite_params *ts_params = &testsuite_params;
1465 	uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1466 	uint16_t buf_idx[num_bufs];
1467 	uint16_t i;
1468 
1469 	for (i = 0; i < num_bufs; i++)
1470 		buf_idx[i] = i;
1471 
1472 	struct interim_data_params int_data = {
1473 		compress_test_bufs,
1474 		num_bufs,
1475 		buf_idx,
1476 		&ts_params->def_comp_xform,
1477 		&ts_params->def_decomp_xform,
1478 		1
1479 	};
1480 
1481 	struct test_data_params test_data = {
1482 		RTE_COMP_OP_STATELESS,
1483 		LB_BOTH,
1484 		ZLIB_DECOMPRESS,
1485 		0,
1486 		0
1487 	};
1488 
1489 	/* Compress with compressdev, decompress with Zlib */
1490 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1491 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1492 		return TEST_FAILED;
1493 
1494 	/* Compress with Zlib, decompress with compressdev */
1495 	test_data.zlib_dir = ZLIB_COMPRESS;
1496 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1497 		return TEST_FAILED;
1498 
1499 	return TEST_SUCCESS;
1500 }
1501 
1502 static int
1503 test_compressdev_deflate_stateless_multi_level(void)
1504 {
1505 	struct comp_testsuite_params *ts_params = &testsuite_params;
1506 	unsigned int level;
1507 	uint16_t i;
1508 	int ret;
1509 	struct rte_comp_xform *compress_xform =
1510 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1511 
1512 	if (compress_xform == NULL) {
1513 		RTE_LOG(ERR, USER1,
1514 			"Compress xform could not be created\n");
1515 		ret = TEST_FAILED;
1516 		goto exit;
1517 	}
1518 
1519 	memcpy(compress_xform, ts_params->def_comp_xform,
1520 			sizeof(struct rte_comp_xform));
1521 
1522 	struct interim_data_params int_data = {
1523 		NULL,
1524 		1,
1525 		NULL,
1526 		&compress_xform,
1527 		&ts_params->def_decomp_xform,
1528 		1
1529 	};
1530 
1531 	struct test_data_params test_data = {
1532 		RTE_COMP_OP_STATELESS,
1533 		LB_BOTH,
1534 		ZLIB_DECOMPRESS,
1535 		0,
1536 		0
1537 	};
1538 
1539 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1540 		int_data.test_bufs = &compress_test_bufs[i];
1541 		int_data.buf_idx = &i;
1542 
1543 		for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1544 				level++) {
1545 			compress_xform->compress.level = level;
1546 			/* Compress with compressdev, decompress with Zlib */
1547 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1548 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1549 				ret = TEST_FAILED;
1550 				goto exit;
1551 			}
1552 		}
1553 	}
1554 
1555 	ret = TEST_SUCCESS;
1556 
1557 exit:
1558 	rte_free(compress_xform);
1559 	return ret;
1560 }
1561 
1562 #define NUM_XFORMS 3
1563 static int
1564 test_compressdev_deflate_stateless_multi_xform(void)
1565 {
1566 	struct comp_testsuite_params *ts_params = &testsuite_params;
1567 	uint16_t num_bufs = NUM_XFORMS;
1568 	struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1569 	struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1570 	const char *test_buffers[NUM_XFORMS];
1571 	uint16_t i;
1572 	unsigned int level = RTE_COMP_LEVEL_MIN;
1573 	uint16_t buf_idx[num_bufs];
1574 
1575 	int ret;
1576 
1577 	/* Create multiple xforms with various levels */
1578 	for (i = 0; i < NUM_XFORMS; i++) {
1579 		compress_xforms[i] = rte_malloc(NULL,
1580 				sizeof(struct rte_comp_xform), 0);
1581 		if (compress_xforms[i] == NULL) {
1582 			RTE_LOG(ERR, USER1,
1583 				"Compress xform could not be created\n");
1584 			ret = TEST_FAILED;
1585 			goto exit;
1586 		}
1587 
1588 		memcpy(compress_xforms[i], ts_params->def_comp_xform,
1589 				sizeof(struct rte_comp_xform));
1590 		compress_xforms[i]->compress.level = level;
1591 		level++;
1592 
1593 		decompress_xforms[i] = rte_malloc(NULL,
1594 				sizeof(struct rte_comp_xform), 0);
1595 		if (decompress_xforms[i] == NULL) {
1596 			RTE_LOG(ERR, USER1,
1597 				"Decompress xform could not be created\n");
1598 			ret = TEST_FAILED;
1599 			goto exit;
1600 		}
1601 
1602 		memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1603 				sizeof(struct rte_comp_xform));
1604 	}
1605 
1606 	for (i = 0; i < NUM_XFORMS; i++) {
1607 		buf_idx[i] = 0;
1608 		/* Use the same buffer in all sessions */
1609 		test_buffers[i] = compress_test_bufs[0];
1610 	}
1611 
1612 	struct interim_data_params int_data = {
1613 		test_buffers,
1614 		num_bufs,
1615 		buf_idx,
1616 		compress_xforms,
1617 		decompress_xforms,
1618 		NUM_XFORMS
1619 	};
1620 
1621 	struct test_data_params test_data = {
1622 		RTE_COMP_OP_STATELESS,
1623 		LB_BOTH,
1624 		ZLIB_DECOMPRESS,
1625 		0,
1626 		0
1627 	};
1628 
1629 	/* Compress with compressdev, decompress with Zlib */
1630 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1631 		ret = TEST_FAILED;
1632 		goto exit;
1633 	}
1634 
1635 	ret = TEST_SUCCESS;
1636 exit:
1637 	for (i = 0; i < NUM_XFORMS; i++) {
1638 		rte_free(compress_xforms[i]);
1639 		rte_free(decompress_xforms[i]);
1640 	}
1641 
1642 	return ret;
1643 }
1644 
1645 static int
1646 test_compressdev_deflate_stateless_sgl(void)
1647 {
1648 	struct comp_testsuite_params *ts_params = &testsuite_params;
1649 	uint16_t i;
1650 	const struct rte_compressdev_capabilities *capab;
1651 
1652 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1653 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1654 
1655 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1656 		return -ENOTSUP;
1657 
1658 	struct interim_data_params int_data = {
1659 		NULL,
1660 		1,
1661 		NULL,
1662 		&ts_params->def_comp_xform,
1663 		&ts_params->def_decomp_xform,
1664 		1
1665 	};
1666 
1667 	struct test_data_params test_data = {
1668 		RTE_COMP_OP_STATELESS,
1669 		SGL_BOTH,
1670 		ZLIB_DECOMPRESS,
1671 		0,
1672 		0
1673 	};
1674 
1675 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1676 		int_data.test_bufs = &compress_test_bufs[i];
1677 		int_data.buf_idx = &i;
1678 
1679 		/* Compress with compressdev, decompress with Zlib */
1680 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1681 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1682 			return TEST_FAILED;
1683 
1684 		/* Compress with Zlib, decompress with compressdev */
1685 		test_data.zlib_dir = ZLIB_COMPRESS;
1686 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1687 			return TEST_FAILED;
1688 
1689 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1690 			/* Compress with compressdev, decompress with Zlib */
1691 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1692 			test_data.buff_type = SGL_TO_LB;
1693 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1694 				return TEST_FAILED;
1695 
1696 			/* Compress with Zlib, decompress with compressdev */
1697 			test_data.zlib_dir = ZLIB_COMPRESS;
1698 			test_data.buff_type = SGL_TO_LB;
1699 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1700 				return TEST_FAILED;
1701 		}
1702 
1703 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1704 			/* Compress with compressdev, decompress with Zlib */
1705 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1706 			test_data.buff_type = LB_TO_SGL;
1707 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1708 				return TEST_FAILED;
1709 
1710 			/* Compress with Zlib, decompress with compressdev */
1711 			test_data.zlib_dir = ZLIB_COMPRESS;
1712 			test_data.buff_type = LB_TO_SGL;
1713 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1714 				return TEST_FAILED;
1715 		}
1716 
1717 
1718 	}
1719 
1720 	return TEST_SUCCESS;
1721 
1722 }
1723 
1724 static int
1725 test_compressdev_deflate_stateless_checksum(void)
1726 {
1727 	struct comp_testsuite_params *ts_params = &testsuite_params;
1728 	uint16_t i;
1729 	int ret;
1730 	const struct rte_compressdev_capabilities *capab;
1731 
1732 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1733 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1734 
1735 	/* Check if driver supports any checksum */
1736 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1737 			(capab->comp_feature_flags &
1738 			RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1739 			(capab->comp_feature_flags &
1740 			RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1741 		return -ENOTSUP;
1742 
1743 	struct rte_comp_xform *compress_xform =
1744 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1745 	if (compress_xform == NULL) {
1746 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1747 		ret = TEST_FAILED;
1748 		return ret;
1749 	}
1750 
1751 	memcpy(compress_xform, ts_params->def_comp_xform,
1752 			sizeof(struct rte_comp_xform));
1753 
1754 	struct rte_comp_xform *decompress_xform =
1755 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1756 	if (decompress_xform == NULL) {
1757 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1758 		rte_free(compress_xform);
1759 		ret = TEST_FAILED;
1760 		return ret;
1761 	}
1762 
1763 	memcpy(decompress_xform, ts_params->def_decomp_xform,
1764 			sizeof(struct rte_comp_xform));
1765 
1766 	struct interim_data_params int_data = {
1767 		NULL,
1768 		1,
1769 		NULL,
1770 		&compress_xform,
1771 		&decompress_xform,
1772 		1
1773 	};
1774 
1775 	struct test_data_params test_data = {
1776 		RTE_COMP_OP_STATELESS,
1777 		LB_BOTH,
1778 		ZLIB_DECOMPRESS,
1779 		0,
1780 		0
1781 	};
1782 
1783 	/* Check if driver supports crc32 checksum and test */
1784 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1785 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1786 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1787 
1788 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1789 			/* Compress with compressdev, decompress with Zlib */
1790 			int_data.test_bufs = &compress_test_bufs[i];
1791 			int_data.buf_idx = &i;
1792 
1793 			/* Generate zlib checksum and test against selected
1794 			 * drivers decompression checksum
1795 			 */
1796 			test_data.zlib_dir = ZLIB_COMPRESS;
1797 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1798 				ret = TEST_FAILED;
1799 				goto exit;
1800 			}
1801 
1802 			/* Generate compression and decompression
1803 			 * checksum of selected driver
1804 			 */
1805 			test_data.zlib_dir = ZLIB_NONE;
1806 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1807 				ret = TEST_FAILED;
1808 				goto exit;
1809 			}
1810 		}
1811 	}
1812 
1813 	/* Check if driver supports adler32 checksum and test */
1814 	if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1815 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1816 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1817 
1818 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1819 			int_data.test_bufs = &compress_test_bufs[i];
1820 			int_data.buf_idx = &i;
1821 
1822 			/* Generate zlib checksum and test against selected
1823 			 * drivers decompression checksum
1824 			 */
1825 			test_data.zlib_dir = ZLIB_COMPRESS;
1826 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1827 				ret = TEST_FAILED;
1828 				goto exit;
1829 			}
1830 			/* Generate compression and decompression
1831 			 * checksum of selected driver
1832 			 */
1833 			test_data.zlib_dir = ZLIB_NONE;
1834 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1835 				ret = TEST_FAILED;
1836 				goto exit;
1837 			}
1838 		}
1839 	}
1840 
1841 	/* Check if driver supports combined crc and adler checksum and test */
1842 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1843 		compress_xform->compress.chksum =
1844 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1845 		decompress_xform->decompress.chksum =
1846 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1847 
1848 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1849 			int_data.test_bufs = &compress_test_bufs[i];
1850 			int_data.buf_idx = &i;
1851 
1852 			/* Generate compression and decompression
1853 			 * checksum of selected driver
1854 			 */
1855 			test_data.zlib_dir = ZLIB_NONE;
1856 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1857 				ret = TEST_FAILED;
1858 				goto exit;
1859 			}
1860 		}
1861 	}
1862 
1863 	ret = TEST_SUCCESS;
1864 
1865 exit:
1866 	rte_free(compress_xform);
1867 	rte_free(decompress_xform);
1868 	return ret;
1869 }
1870 
1871 static int
1872 test_compressdev_out_of_space_buffer(void)
1873 {
1874 	struct comp_testsuite_params *ts_params = &testsuite_params;
1875 	int ret;
1876 	uint16_t i;
1877 	const struct rte_compressdev_capabilities *capab;
1878 
1879 	RTE_LOG(ERR, USER1, "This is a negative test errors are expected\n");
1880 
1881 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1882 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1883 
1884 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1885 		return -ENOTSUP;
1886 
1887 	struct rte_comp_xform *compress_xform =
1888 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1889 
1890 	if (compress_xform == NULL) {
1891 		RTE_LOG(ERR, USER1,
1892 			"Compress xform could not be created\n");
1893 		ret = TEST_FAILED;
1894 		goto exit;
1895 	}
1896 
1897 	struct interim_data_params int_data = {
1898 		&compress_test_bufs[0],
1899 		1,
1900 		&i,
1901 		&ts_params->def_comp_xform,
1902 		&ts_params->def_decomp_xform,
1903 		1
1904 	};
1905 
1906 	struct test_data_params test_data = {
1907 		RTE_COMP_OP_STATELESS,
1908 		LB_BOTH,
1909 		ZLIB_DECOMPRESS,
1910 		1,  /* run out-of-space test */
1911 		0
1912 	};
1913 	/* Compress with compressdev, decompress with Zlib */
1914 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1915 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1916 		ret = TEST_FAILED;
1917 		goto exit;
1918 	}
1919 
1920 	/* Compress with Zlib, decompress with compressdev */
1921 	test_data.zlib_dir = ZLIB_COMPRESS;
1922 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1923 		ret = TEST_FAILED;
1924 		goto exit;
1925 	}
1926 
1927 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1928 		/* Compress with compressdev, decompress with Zlib */
1929 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1930 		test_data.buff_type = SGL_BOTH;
1931 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1932 			ret = TEST_FAILED;
1933 			goto exit;
1934 		}
1935 
1936 		/* Compress with Zlib, decompress with compressdev */
1937 		test_data.zlib_dir = ZLIB_COMPRESS;
1938 		test_data.buff_type = SGL_BOTH;
1939 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1940 			ret = TEST_FAILED;
1941 			goto exit;
1942 		}
1943 	}
1944 
1945 	ret  = TEST_SUCCESS;
1946 
1947 exit:
1948 	rte_free(compress_xform);
1949 	return ret;
1950 }
1951 
1952 static int
1953 test_compressdev_deflate_stateless_dynamic_big(void)
1954 {
1955 	struct comp_testsuite_params *ts_params = &testsuite_params;
1956 	uint16_t i = 0;
1957 	int ret = TEST_SUCCESS;
1958 	int j;
1959 	const struct rte_compressdev_capabilities *capab;
1960 	char *test_buffer = NULL;
1961 
1962 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1963 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1964 
1965 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1966 		return -ENOTSUP;
1967 
1968 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1969 		return -ENOTSUP;
1970 
1971 	test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1972 	if (test_buffer == NULL) {
1973 		RTE_LOG(ERR, USER1,
1974 			"Can't allocate buffer for big-data\n");
1975 		return TEST_FAILED;
1976 	}
1977 
1978 	struct interim_data_params int_data = {
1979 		(const char * const *)&test_buffer,
1980 		1,
1981 		&i,
1982 		&ts_params->def_comp_xform,
1983 		&ts_params->def_decomp_xform,
1984 		1
1985 	};
1986 
1987 	struct test_data_params test_data = {
1988 		RTE_COMP_OP_STATELESS,
1989 		SGL_BOTH,
1990 		ZLIB_DECOMPRESS,
1991 		0,
1992 		1
1993 	};
1994 
1995 	ts_params->def_comp_xform->compress.deflate.huffman =
1996 						RTE_COMP_HUFFMAN_DYNAMIC;
1997 
1998 	/* fill the buffer with data based on rand. data */
1999 	srand(BIG_DATA_TEST_SIZE);
2000 	for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2001 		test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2002 	test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2003 
2004 	/* Compress with compressdev, decompress with Zlib */
2005 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2006 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2007 		ret = TEST_FAILED;
2008 		goto end;
2009 	}
2010 
2011 	/* Compress with Zlib, decompress with compressdev */
2012 	test_data.zlib_dir = ZLIB_COMPRESS;
2013 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2014 		ret = TEST_FAILED;
2015 		goto end;
2016 	}
2017 
2018 end:
2019 	ts_params->def_comp_xform->compress.deflate.huffman =
2020 						RTE_COMP_HUFFMAN_DEFAULT;
2021 	rte_free(test_buffer);
2022 	return ret;
2023 }
2024 
2025 
2026 static struct unit_test_suite compressdev_testsuite  = {
2027 	.suite_name = "compressdev unit test suite",
2028 	.setup = testsuite_setup,
2029 	.teardown = testsuite_teardown,
2030 	.unit_test_cases = {
2031 		TEST_CASE_ST(NULL, NULL,
2032 			test_compressdev_invalid_configuration),
2033 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2034 			test_compressdev_deflate_stateless_fixed),
2035 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2036 			test_compressdev_deflate_stateless_dynamic),
2037 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2038 			test_compressdev_deflate_stateless_dynamic_big),
2039 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2040 			test_compressdev_deflate_stateless_multi_op),
2041 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2042 			test_compressdev_deflate_stateless_multi_level),
2043 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2044 			test_compressdev_deflate_stateless_multi_xform),
2045 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2046 			test_compressdev_deflate_stateless_sgl),
2047 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2048 			test_compressdev_deflate_stateless_checksum),
2049 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2050 			test_compressdev_out_of_space_buffer),
2051 		TEST_CASES_END() /**< NULL terminate unit test array */
2052 	}
2053 };
2054 
2055 static int
2056 test_compressdev(void)
2057 {
2058 	return unit_test_suite_runner(&compressdev_testsuite);
2059 }
2060 
2061 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
2062