xref: /dpdk/app/test/test_compressdev.c (revision 200bc52e5aa0d72e70464c9cd22b55cf536ed13c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 - 2019 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
13 #include <rte_mbuf.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
16 
17 #include "test_compressdev_test_buffer.h"
18 #include "test.h"
19 
20 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
21 
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
26 
27 /*
28  * 30% extra size for compressed data compared to original data,
29  * in case data size cannot be reduced and it is actually bigger
30  * due to the compress block headers
31  */
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
35 #define MAX_SEGS 16
36 #define NUM_OPS 16
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
39 #define CACHE_SIZE 0
40 
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
46 
47 #define OUT_OF_SPACE_BUF 1
48 
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
53 
54 const char *
55 huffman_type_strings[] = {
56 	[RTE_COMP_HUFFMAN_DEFAULT]	= "PMD default",
57 	[RTE_COMP_HUFFMAN_FIXED]	= "Fixed",
58 	[RTE_COMP_HUFFMAN_DYNAMIC]	= "Dynamic"
59 };
60 
61 enum zlib_direction {
62 	ZLIB_NONE,
63 	ZLIB_COMPRESS,
64 	ZLIB_DECOMPRESS,
65 	ZLIB_ALL
66 };
67 
68 enum varied_buff {
69 	LB_BOTH = 0,	/* both input and output are linear*/
70 	SGL_BOTH,	/* both input and output are chained */
71 	SGL_TO_LB,	/* input buffer is chained */
72 	LB_TO_SGL	/* output buffer is chained */
73 };
74 
75 struct priv_op_data {
76 	uint16_t orig_idx;
77 };
78 
79 struct comp_testsuite_params {
80 	struct rte_mempool *large_mbuf_pool;
81 	struct rte_mempool *small_mbuf_pool;
82 	struct rte_mempool *big_mbuf_pool;
83 	struct rte_mempool *op_pool;
84 	struct rte_comp_xform *def_comp_xform;
85 	struct rte_comp_xform *def_decomp_xform;
86 };
87 
88 struct interim_data_params {
89 	const char * const *test_bufs;
90 	unsigned int num_bufs;
91 	uint16_t *buf_idx;
92 	struct rte_comp_xform **compress_xforms;
93 	struct rte_comp_xform **decompress_xforms;
94 	unsigned int num_xforms;
95 };
96 
97 struct test_data_params {
98 	enum rte_comp_op_type state;
99 	enum varied_buff buff_type;
100 	enum zlib_direction zlib_dir;
101 	unsigned int out_of_space;
102 	unsigned int big_data;
103 };
104 
105 static struct comp_testsuite_params testsuite_params = { 0 };
106 
107 static void
108 testsuite_teardown(void)
109 {
110 	struct comp_testsuite_params *ts_params = &testsuite_params;
111 
112 	if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
113 		RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
114 	if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
115 		RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
116 	if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
117 		RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
118 	if (rte_mempool_in_use_count(ts_params->op_pool))
119 		RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
120 
121 	rte_mempool_free(ts_params->large_mbuf_pool);
122 	rte_mempool_free(ts_params->small_mbuf_pool);
123 	rte_mempool_free(ts_params->big_mbuf_pool);
124 	rte_mempool_free(ts_params->op_pool);
125 	rte_free(ts_params->def_comp_xform);
126 	rte_free(ts_params->def_decomp_xform);
127 }
128 
129 static int
130 testsuite_setup(void)
131 {
132 	struct comp_testsuite_params *ts_params = &testsuite_params;
133 	uint32_t max_buf_size = 0;
134 	unsigned int i;
135 
136 	if (rte_compressdev_count() == 0) {
137 		RTE_LOG(ERR, USER1, "Need at least one compress device\n");
138 		return TEST_FAILED;
139 	}
140 
141 	RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
142 				rte_compressdev_name_get(0));
143 
144 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
145 		max_buf_size = RTE_MAX(max_buf_size,
146 				strlen(compress_test_bufs[i]) + 1);
147 
148 	/*
149 	 * Buffers to be used in compression and decompression.
150 	 * Since decompressed data might be larger than
151 	 * compressed data (due to block header),
152 	 * buffers should be big enough for both cases.
153 	 */
154 	max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
155 	ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
156 			NUM_LARGE_MBUFS,
157 			CACHE_SIZE, 0,
158 			max_buf_size + RTE_PKTMBUF_HEADROOM,
159 			rte_socket_id());
160 	if (ts_params->large_mbuf_pool == NULL) {
161 		RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
162 		return TEST_FAILED;
163 	}
164 
165 	/* Create mempool with smaller buffers for SGL testing */
166 	ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
167 			NUM_LARGE_MBUFS * MAX_SEGS,
168 			CACHE_SIZE, 0,
169 			SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
170 			rte_socket_id());
171 	if (ts_params->small_mbuf_pool == NULL) {
172 		RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
173 		goto exit;
174 	}
175 
176 	/* Create mempool with big buffers for SGL testing */
177 	ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
178 			NUM_BIG_MBUFS + 1,
179 			CACHE_SIZE, 0,
180 			MAX_MBUF_SEGMENT_SIZE,
181 			rte_socket_id());
182 	if (ts_params->big_mbuf_pool == NULL) {
183 		RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
184 		goto exit;
185 	}
186 
187 	ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
188 				0, sizeof(struct priv_op_data),
189 				rte_socket_id());
190 	if (ts_params->op_pool == NULL) {
191 		RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
192 		goto exit;
193 	}
194 
195 	ts_params->def_comp_xform =
196 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
197 	if (ts_params->def_comp_xform == NULL) {
198 		RTE_LOG(ERR, USER1,
199 			"Default compress xform could not be created\n");
200 		goto exit;
201 	}
202 	ts_params->def_decomp_xform =
203 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
204 	if (ts_params->def_decomp_xform == NULL) {
205 		RTE_LOG(ERR, USER1,
206 			"Default decompress xform could not be created\n");
207 		goto exit;
208 	}
209 
210 	/* Initializes default values for compress/decompress xforms */
211 	ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
212 	ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
213 	ts_params->def_comp_xform->compress.deflate.huffman =
214 						RTE_COMP_HUFFMAN_DEFAULT;
215 	ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
216 	ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
217 	ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
218 
219 	ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
220 	ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
221 	ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
222 	ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
223 
224 	return TEST_SUCCESS;
225 
226 exit:
227 	testsuite_teardown();
228 
229 	return TEST_FAILED;
230 }
231 
232 static int
233 generic_ut_setup(void)
234 {
235 	/* Configure compressdev (one device, one queue pair) */
236 	struct rte_compressdev_config config = {
237 		.socket_id = rte_socket_id(),
238 		.nb_queue_pairs = 1,
239 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
240 		.max_nb_streams = 0
241 	};
242 
243 	if (rte_compressdev_configure(0, &config) < 0) {
244 		RTE_LOG(ERR, USER1, "Device configuration failed\n");
245 		return -1;
246 	}
247 
248 	if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
249 			rte_socket_id()) < 0) {
250 		RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
251 		return -1;
252 	}
253 
254 	if (rte_compressdev_start(0) < 0) {
255 		RTE_LOG(ERR, USER1, "Device could not be started\n");
256 		return -1;
257 	}
258 
259 	return 0;
260 }
261 
262 static void
263 generic_ut_teardown(void)
264 {
265 	rte_compressdev_stop(0);
266 	if (rte_compressdev_close(0) < 0)
267 		RTE_LOG(ERR, USER1, "Device could not be closed\n");
268 }
269 
270 static int
271 test_compressdev_invalid_configuration(void)
272 {
273 	struct rte_compressdev_config invalid_config;
274 	struct rte_compressdev_config valid_config = {
275 		.socket_id = rte_socket_id(),
276 		.nb_queue_pairs = 1,
277 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
278 		.max_nb_streams = 0
279 	};
280 	struct rte_compressdev_info dev_info;
281 
282 	/* Invalid configuration with 0 queue pairs */
283 	memcpy(&invalid_config, &valid_config,
284 			sizeof(struct rte_compressdev_config));
285 	invalid_config.nb_queue_pairs = 0;
286 
287 	TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
288 			"Device configuration was successful "
289 			"with no queue pairs (invalid)\n");
290 
291 	/*
292 	 * Invalid configuration with too many queue pairs
293 	 * (if there is an actual maximum number of queue pairs)
294 	 */
295 	rte_compressdev_info_get(0, &dev_info);
296 	if (dev_info.max_nb_queue_pairs != 0) {
297 		memcpy(&invalid_config, &valid_config,
298 			sizeof(struct rte_compressdev_config));
299 		invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
300 
301 		TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
302 				"Device configuration was successful "
303 				"with too many queue pairs (invalid)\n");
304 	}
305 
306 	/* Invalid queue pair setup, with no number of queue pairs set */
307 	TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
308 				NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
309 			"Queue pair setup was successful "
310 			"with no queue pairs set (invalid)\n");
311 
312 	return TEST_SUCCESS;
313 }
314 
315 static int
316 compare_buffers(const char *buffer1, uint32_t buffer1_len,
317 		const char *buffer2, uint32_t buffer2_len)
318 {
319 	if (buffer1_len != buffer2_len) {
320 		RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
321 		return -1;
322 	}
323 
324 	if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
325 		RTE_LOG(ERR, USER1, "Buffers are different\n");
326 		return -1;
327 	}
328 
329 	return 0;
330 }
331 
332 /*
333  * Maps compressdev and Zlib flush flags
334  */
335 static int
336 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
337 {
338 	switch (flag) {
339 	case RTE_COMP_FLUSH_NONE:
340 		return Z_NO_FLUSH;
341 	case RTE_COMP_FLUSH_SYNC:
342 		return Z_SYNC_FLUSH;
343 	case RTE_COMP_FLUSH_FULL:
344 		return Z_FULL_FLUSH;
345 	case RTE_COMP_FLUSH_FINAL:
346 		return Z_FINISH;
347 	/*
348 	 * There should be only the values above,
349 	 * so this should never happen
350 	 */
351 	default:
352 		return -1;
353 	}
354 }
355 
356 static int
357 compress_zlib(struct rte_comp_op *op,
358 		const struct rte_comp_xform *xform, int mem_level)
359 {
360 	z_stream stream;
361 	int zlib_flush;
362 	int strategy, window_bits, comp_level;
363 	int ret = TEST_FAILED;
364 	uint8_t *single_src_buf = NULL;
365 	uint8_t *single_dst_buf = NULL;
366 
367 	/* initialize zlib stream */
368 	stream.zalloc = Z_NULL;
369 	stream.zfree = Z_NULL;
370 	stream.opaque = Z_NULL;
371 
372 	if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
373 		strategy = Z_FIXED;
374 	else
375 		strategy = Z_DEFAULT_STRATEGY;
376 
377 	/*
378 	 * Window bits is the base two logarithm of the window size (in bytes).
379 	 * When doing raw DEFLATE, this number will be negative.
380 	 */
381 	window_bits = -(xform->compress.window_size);
382 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
383 		window_bits *= -1;
384 	else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
385 		window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
386 
387 	comp_level = xform->compress.level;
388 
389 	if (comp_level != RTE_COMP_LEVEL_NONE)
390 		ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
391 			window_bits, mem_level, strategy);
392 	else
393 		ret = deflateInit(&stream, Z_NO_COMPRESSION);
394 
395 	if (ret != Z_OK) {
396 		printf("Zlib deflate could not be initialized\n");
397 		goto exit;
398 	}
399 
400 	/* Assuming stateless operation */
401 	/* SGL Input */
402 	if (op->m_src->nb_segs > 1) {
403 		single_src_buf = rte_malloc(NULL,
404 				rte_pktmbuf_pkt_len(op->m_src), 0);
405 		if (single_src_buf == NULL) {
406 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
407 			goto exit;
408 		}
409 
410 		if (rte_pktmbuf_read(op->m_src, op->src.offset,
411 					rte_pktmbuf_pkt_len(op->m_src) -
412 					op->src.offset,
413 					single_src_buf) == NULL) {
414 			RTE_LOG(ERR, USER1,
415 				"Buffer could not be read entirely\n");
416 			goto exit;
417 		}
418 
419 		stream.avail_in = op->src.length;
420 		stream.next_in = single_src_buf;
421 
422 	} else {
423 		stream.avail_in = op->src.length;
424 		stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
425 				op->src.offset);
426 	}
427 	/* SGL output */
428 	if (op->m_dst->nb_segs > 1) {
429 
430 		single_dst_buf = rte_malloc(NULL,
431 				rte_pktmbuf_pkt_len(op->m_dst), 0);
432 			if (single_dst_buf == NULL) {
433 				RTE_LOG(ERR, USER1,
434 					"Buffer could not be allocated\n");
435 			goto exit;
436 		}
437 
438 		stream.avail_out = op->m_dst->pkt_len;
439 		stream.next_out = single_dst_buf;
440 
441 	} else {/* linear output */
442 		stream.avail_out = op->m_dst->data_len;
443 		stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
444 				op->dst.offset);
445 	}
446 
447 	/* Stateless operation, all buffer will be compressed in one go */
448 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
449 	ret = deflate(&stream, zlib_flush);
450 
451 	if (stream.avail_in != 0) {
452 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
453 		goto exit;
454 	}
455 
456 	if (ret != Z_STREAM_END)
457 		goto exit;
458 
459 	/* Copy data to destination SGL */
460 	if (op->m_dst->nb_segs > 1) {
461 		uint32_t remaining_data = stream.total_out;
462 		uint8_t *src_data = single_dst_buf;
463 		struct rte_mbuf *dst_buf = op->m_dst;
464 
465 		while (remaining_data > 0) {
466 			uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
467 						uint8_t *, op->dst.offset);
468 			/* Last segment */
469 			if (remaining_data < dst_buf->data_len) {
470 				memcpy(dst_data, src_data, remaining_data);
471 				remaining_data = 0;
472 			} else {
473 				memcpy(dst_data, src_data, dst_buf->data_len);
474 				remaining_data -= dst_buf->data_len;
475 				src_data += dst_buf->data_len;
476 				dst_buf = dst_buf->next;
477 			}
478 		}
479 	}
480 
481 	op->consumed = stream.total_in;
482 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
483 		rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
484 		rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
485 		op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
486 				ZLIB_TRAILER_SIZE);
487 	} else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
488 		rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
489 		rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
490 		op->produced = stream.total_out - (GZIP_HEADER_SIZE +
491 				GZIP_TRAILER_SIZE);
492 	} else
493 		op->produced = stream.total_out;
494 
495 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
496 	op->output_chksum = stream.adler;
497 
498 	deflateReset(&stream);
499 
500 	ret = 0;
501 exit:
502 	deflateEnd(&stream);
503 	rte_free(single_src_buf);
504 	rte_free(single_dst_buf);
505 
506 	return ret;
507 }
508 
509 static int
510 decompress_zlib(struct rte_comp_op *op,
511 		const struct rte_comp_xform *xform)
512 {
513 	z_stream stream;
514 	int window_bits;
515 	int zlib_flush;
516 	int ret = TEST_FAILED;
517 	uint8_t *single_src_buf = NULL;
518 	uint8_t *single_dst_buf = NULL;
519 
520 	/* initialize zlib stream */
521 	stream.zalloc = Z_NULL;
522 	stream.zfree = Z_NULL;
523 	stream.opaque = Z_NULL;
524 
525 	/*
526 	 * Window bits is the base two logarithm of the window size (in bytes).
527 	 * When doing raw DEFLATE, this number will be negative.
528 	 */
529 	window_bits = -(xform->decompress.window_size);
530 	ret = inflateInit2(&stream, window_bits);
531 
532 	if (ret != Z_OK) {
533 		printf("Zlib deflate could not be initialized\n");
534 		goto exit;
535 	}
536 
537 	/* Assuming stateless operation */
538 	/* SGL */
539 	if (op->m_src->nb_segs > 1) {
540 		single_src_buf = rte_malloc(NULL,
541 				rte_pktmbuf_pkt_len(op->m_src), 0);
542 		if (single_src_buf == NULL) {
543 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
544 			goto exit;
545 		}
546 		single_dst_buf = rte_malloc(NULL,
547 				rte_pktmbuf_pkt_len(op->m_dst), 0);
548 		if (single_dst_buf == NULL) {
549 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
550 			goto exit;
551 		}
552 		if (rte_pktmbuf_read(op->m_src, 0,
553 					rte_pktmbuf_pkt_len(op->m_src),
554 					single_src_buf) == NULL) {
555 			RTE_LOG(ERR, USER1,
556 				"Buffer could not be read entirely\n");
557 			goto exit;
558 		}
559 
560 		stream.avail_in = op->src.length;
561 		stream.next_in = single_src_buf;
562 		stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
563 		stream.next_out = single_dst_buf;
564 
565 	} else {
566 		stream.avail_in = op->src.length;
567 		stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
568 		stream.avail_out = op->m_dst->data_len;
569 		stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
570 	}
571 
572 	/* Stateless operation, all buffer will be compressed in one go */
573 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
574 	ret = inflate(&stream, zlib_flush);
575 
576 	if (stream.avail_in != 0) {
577 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
578 		goto exit;
579 	}
580 
581 	if (ret != Z_STREAM_END)
582 		goto exit;
583 
584 	if (op->m_src->nb_segs > 1) {
585 		uint32_t remaining_data = stream.total_out;
586 		uint8_t *src_data = single_dst_buf;
587 		struct rte_mbuf *dst_buf = op->m_dst;
588 
589 		while (remaining_data > 0) {
590 			uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
591 					uint8_t *);
592 			/* Last segment */
593 			if (remaining_data < dst_buf->data_len) {
594 				memcpy(dst_data, src_data, remaining_data);
595 				remaining_data = 0;
596 			} else {
597 				memcpy(dst_data, src_data, dst_buf->data_len);
598 				remaining_data -= dst_buf->data_len;
599 				src_data += dst_buf->data_len;
600 				dst_buf = dst_buf->next;
601 			}
602 		}
603 	}
604 
605 	op->consumed = stream.total_in;
606 	op->produced = stream.total_out;
607 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
608 
609 	inflateReset(&stream);
610 
611 	ret = 0;
612 exit:
613 	inflateEnd(&stream);
614 
615 	return ret;
616 }
617 
618 static int
619 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
620 		uint32_t total_data_size,
621 		struct rte_mempool *small_mbuf_pool,
622 		struct rte_mempool *large_mbuf_pool,
623 		uint8_t limit_segs_in_sgl,
624 		uint16_t seg_size)
625 {
626 	uint32_t remaining_data = total_data_size;
627 	uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
628 	struct rte_mempool *pool;
629 	struct rte_mbuf *next_seg;
630 	uint32_t data_size;
631 	char *buf_ptr;
632 	const char *data_ptr = test_buf;
633 	uint16_t i;
634 	int ret;
635 
636 	if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
637 		num_remaining_segs = limit_segs_in_sgl - 1;
638 
639 	/*
640 	 * Allocate data in the first segment (header) and
641 	 * copy data if test buffer is provided
642 	 */
643 	if (remaining_data < seg_size)
644 		data_size = remaining_data;
645 	else
646 		data_size = seg_size;
647 	buf_ptr = rte_pktmbuf_append(head_buf, data_size);
648 	if (buf_ptr == NULL) {
649 		RTE_LOG(ERR, USER1,
650 			"Not enough space in the 1st buffer\n");
651 		return -1;
652 	}
653 
654 	if (data_ptr != NULL) {
655 		/* Copy characters without NULL terminator */
656 		strncpy(buf_ptr, data_ptr, data_size);
657 		data_ptr += data_size;
658 	}
659 	remaining_data -= data_size;
660 	num_remaining_segs--;
661 
662 	/*
663 	 * Allocate the rest of the segments,
664 	 * copy the rest of the data and chain the segments.
665 	 */
666 	for (i = 0; i < num_remaining_segs; i++) {
667 
668 		if (i == (num_remaining_segs - 1)) {
669 			/* last segment */
670 			if (remaining_data > seg_size)
671 				pool = large_mbuf_pool;
672 			else
673 				pool = small_mbuf_pool;
674 			data_size = remaining_data;
675 		} else {
676 			data_size = seg_size;
677 			pool = small_mbuf_pool;
678 		}
679 
680 		next_seg = rte_pktmbuf_alloc(pool);
681 		if (next_seg == NULL) {
682 			RTE_LOG(ERR, USER1,
683 				"New segment could not be allocated "
684 				"from the mempool\n");
685 			return -1;
686 		}
687 		buf_ptr = rte_pktmbuf_append(next_seg, data_size);
688 		if (buf_ptr == NULL) {
689 			RTE_LOG(ERR, USER1,
690 				"Not enough space in the buffer\n");
691 			rte_pktmbuf_free(next_seg);
692 			return -1;
693 		}
694 		if (data_ptr != NULL) {
695 			/* Copy characters without NULL terminator */
696 			strncpy(buf_ptr, data_ptr, data_size);
697 			data_ptr += data_size;
698 		}
699 		remaining_data -= data_size;
700 
701 		ret = rte_pktmbuf_chain(head_buf, next_seg);
702 		if (ret != 0) {
703 			rte_pktmbuf_free(next_seg);
704 			RTE_LOG(ERR, USER1,
705 				"Segment could not chained\n");
706 			return -1;
707 		}
708 	}
709 
710 	return 0;
711 }
712 
713 /*
714  * Compresses and decompresses buffer with compressdev API and Zlib API
715  */
716 static int
717 test_deflate_comp_decomp(const struct interim_data_params *int_data,
718 		const struct test_data_params *test_data)
719 {
720 	struct comp_testsuite_params *ts_params = &testsuite_params;
721 	const char * const *test_bufs = int_data->test_bufs;
722 	unsigned int num_bufs = int_data->num_bufs;
723 	uint16_t *buf_idx = int_data->buf_idx;
724 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
725 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
726 	unsigned int num_xforms = int_data->num_xforms;
727 	enum rte_comp_op_type state = test_data->state;
728 	unsigned int buff_type = test_data->buff_type;
729 	unsigned int out_of_space = test_data->out_of_space;
730 	unsigned int big_data = test_data->big_data;
731 	enum zlib_direction zlib_dir = test_data->zlib_dir;
732 	int ret_status = -1;
733 	int ret;
734 	struct rte_mbuf *uncomp_bufs[num_bufs];
735 	struct rte_mbuf *comp_bufs[num_bufs];
736 	struct rte_comp_op *ops[num_bufs];
737 	struct rte_comp_op *ops_processed[num_bufs];
738 	void *priv_xforms[num_bufs];
739 	uint16_t num_enqd, num_deqd, num_total_deqd;
740 	uint16_t num_priv_xforms = 0;
741 	unsigned int deqd_retries = 0;
742 	struct priv_op_data *priv_data;
743 	char *buf_ptr;
744 	unsigned int i;
745 	struct rte_mempool *buf_pool;
746 	uint32_t data_size;
747 	/* Compressing with CompressDev */
748 	unsigned int oos_zlib_decompress =
749 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
750 	/* Decompressing with CompressDev */
751 	unsigned int oos_zlib_compress =
752 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
753 	const struct rte_compressdev_capabilities *capa =
754 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
755 	char *contig_buf = NULL;
756 	uint64_t compress_checksum[num_bufs];
757 
758 	/* Initialize all arrays to NULL */
759 	memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
760 	memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
761 	memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
762 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
763 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
764 
765 	if (big_data)
766 		buf_pool = ts_params->big_mbuf_pool;
767 	else if (buff_type == SGL_BOTH)
768 		buf_pool = ts_params->small_mbuf_pool;
769 	else
770 		buf_pool = ts_params->large_mbuf_pool;
771 
772 	/* Prepare the source mbufs with the data */
773 	ret = rte_pktmbuf_alloc_bulk(buf_pool,
774 				uncomp_bufs, num_bufs);
775 	if (ret < 0) {
776 		RTE_LOG(ERR, USER1,
777 			"Source mbufs could not be allocated "
778 			"from the mempool\n");
779 		goto exit;
780 	}
781 
782 	if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
783 		for (i = 0; i < num_bufs; i++) {
784 			data_size = strlen(test_bufs[i]) + 1;
785 			if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
786 			    data_size,
787 			    big_data ? buf_pool : ts_params->small_mbuf_pool,
788 			    big_data ? buf_pool : ts_params->large_mbuf_pool,
789 			    big_data ? 0 : MAX_SEGS,
790 			    big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
791 				goto exit;
792 		}
793 	} else {
794 		for (i = 0; i < num_bufs; i++) {
795 			data_size = strlen(test_bufs[i]) + 1;
796 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
797 			strlcpy(buf_ptr, test_bufs[i], data_size);
798 		}
799 	}
800 
801 	/* Prepare the destination mbufs */
802 	ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
803 	if (ret < 0) {
804 		RTE_LOG(ERR, USER1,
805 			"Destination mbufs could not be allocated "
806 			"from the mempool\n");
807 		goto exit;
808 	}
809 
810 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
811 		for (i = 0; i < num_bufs; i++) {
812 			if (out_of_space == 1 && oos_zlib_decompress)
813 				data_size = OUT_OF_SPACE_BUF;
814 			else
815 				(data_size = strlen(test_bufs[i]) *
816 					COMPRESS_BUF_SIZE_RATIO);
817 
818 			if (prepare_sgl_bufs(NULL, comp_bufs[i],
819 			      data_size,
820 			      big_data ? buf_pool : ts_params->small_mbuf_pool,
821 			      big_data ? buf_pool : ts_params->large_mbuf_pool,
822 			      big_data ? 0 : MAX_SEGS,
823 			      big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
824 					< 0)
825 				goto exit;
826 		}
827 
828 	} else {
829 		for (i = 0; i < num_bufs; i++) {
830 			if (out_of_space == 1 && oos_zlib_decompress)
831 				data_size = OUT_OF_SPACE_BUF;
832 			else
833 				(data_size = strlen(test_bufs[i]) *
834 					COMPRESS_BUF_SIZE_RATIO);
835 
836 			rte_pktmbuf_append(comp_bufs[i], data_size);
837 		}
838 	}
839 
840 	/* Build the compression operations */
841 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
842 	if (ret < 0) {
843 		RTE_LOG(ERR, USER1,
844 			"Compress operations could not be allocated "
845 			"from the mempool\n");
846 		goto exit;
847 	}
848 
849 
850 	for (i = 0; i < num_bufs; i++) {
851 		ops[i]->m_src = uncomp_bufs[i];
852 		ops[i]->m_dst = comp_bufs[i];
853 		ops[i]->src.offset = 0;
854 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
855 		ops[i]->dst.offset = 0;
856 		if (state == RTE_COMP_OP_STATELESS) {
857 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
858 		} else {
859 			RTE_LOG(ERR, USER1,
860 				"Stateful operations are not supported "
861 				"in these tests yet\n");
862 			goto exit;
863 		}
864 		ops[i]->input_chksum = 0;
865 		/*
866 		 * Store original operation index in private data,
867 		 * since ordering does not have to be maintained,
868 		 * when dequeueing from compressdev, so a comparison
869 		 * at the end of the test can be done.
870 		 */
871 		priv_data = (struct priv_op_data *) (ops[i] + 1);
872 		priv_data->orig_idx = i;
873 	}
874 
875 	/* Compress data (either with Zlib API or compressdev API */
876 	if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
877 		for (i = 0; i < num_bufs; i++) {
878 			const struct rte_comp_xform *compress_xform =
879 				compress_xforms[i % num_xforms];
880 			ret = compress_zlib(ops[i], compress_xform,
881 					DEFAULT_MEM_LEVEL);
882 			if (ret < 0)
883 				goto exit;
884 
885 			ops_processed[i] = ops[i];
886 		}
887 	} else {
888 		/* Create compress private xform data */
889 		for (i = 0; i < num_xforms; i++) {
890 			ret = rte_compressdev_private_xform_create(0,
891 				(const struct rte_comp_xform *)compress_xforms[i],
892 				&priv_xforms[i]);
893 			if (ret < 0) {
894 				RTE_LOG(ERR, USER1,
895 					"Compression private xform "
896 					"could not be created\n");
897 				goto exit;
898 			}
899 			num_priv_xforms++;
900 		}
901 
902 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
903 			/* Attach shareable private xform data to ops */
904 			for (i = 0; i < num_bufs; i++)
905 				ops[i]->private_xform = priv_xforms[i % num_xforms];
906 		} else {
907 			/* Create rest of the private xforms for the other ops */
908 			for (i = num_xforms; i < num_bufs; i++) {
909 				ret = rte_compressdev_private_xform_create(0,
910 					compress_xforms[i % num_xforms],
911 					&priv_xforms[i]);
912 				if (ret < 0) {
913 					RTE_LOG(ERR, USER1,
914 						"Compression private xform "
915 						"could not be created\n");
916 					goto exit;
917 				}
918 				num_priv_xforms++;
919 			}
920 
921 			/* Attach non shareable private xform data to ops */
922 			for (i = 0; i < num_bufs; i++)
923 				ops[i]->private_xform = priv_xforms[i];
924 		}
925 
926 		/* Enqueue and dequeue all operations */
927 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
928 		if (num_enqd < num_bufs) {
929 			RTE_LOG(ERR, USER1,
930 				"The operations could not be enqueued\n");
931 			goto exit;
932 		}
933 
934 		num_total_deqd = 0;
935 		do {
936 			/*
937 			 * If retrying a dequeue call, wait for 10 ms to allow
938 			 * enough time to the driver to process the operations
939 			 */
940 			if (deqd_retries != 0) {
941 				/*
942 				 * Avoid infinite loop if not all the
943 				 * operations get out of the device
944 				 */
945 				if (deqd_retries == MAX_DEQD_RETRIES) {
946 					RTE_LOG(ERR, USER1,
947 						"Not all operations could be "
948 						"dequeued\n");
949 					goto exit;
950 				}
951 				usleep(DEQUEUE_WAIT_TIME);
952 			}
953 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
954 					&ops_processed[num_total_deqd], num_bufs);
955 			num_total_deqd += num_deqd;
956 			deqd_retries++;
957 
958 		} while (num_total_deqd < num_enqd);
959 
960 		deqd_retries = 0;
961 
962 		/* Free compress private xforms */
963 		for (i = 0; i < num_priv_xforms; i++) {
964 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
965 			priv_xforms[i] = NULL;
966 		}
967 		num_priv_xforms = 0;
968 	}
969 
970 	for (i = 0; i < num_bufs; i++) {
971 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
972 		uint16_t xform_idx = priv_data->orig_idx % num_xforms;
973 		const struct rte_comp_compress_xform *compress_xform =
974 				&compress_xforms[xform_idx]->compress;
975 		enum rte_comp_huffman huffman_type =
976 			compress_xform->deflate.huffman;
977 		char engine[] = "zlib (directly, not PMD)";
978 		if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
979 			strlcpy(engine, "PMD", sizeof(engine));
980 
981 		RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
982 			" %u bytes (level = %d, huffman = %s)\n",
983 			buf_idx[priv_data->orig_idx], engine,
984 			ops_processed[i]->consumed, ops_processed[i]->produced,
985 			compress_xform->level,
986 			huffman_type_strings[huffman_type]);
987 		RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
988 			ops_processed[i]->consumed == 0 ? 0 :
989 			(float)ops_processed[i]->produced /
990 			ops_processed[i]->consumed * 100);
991 		if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
992 			compress_checksum[i] = ops_processed[i]->output_chksum;
993 		ops[i] = NULL;
994 	}
995 
996 	/*
997 	 * Check operation status and free source mbufs (destination mbuf and
998 	 * compress operation information is needed for the decompression stage)
999 	 */
1000 	for (i = 0; i < num_bufs; i++) {
1001 		if (out_of_space && oos_zlib_decompress) {
1002 			if (ops_processed[i]->status !=
1003 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1004 				ret_status = -1;
1005 
1006 				RTE_LOG(ERR, USER1,
1007 					"Operation without expected out of "
1008 					"space status error\n");
1009 				goto exit;
1010 			} else
1011 				continue;
1012 		}
1013 
1014 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1015 			RTE_LOG(ERR, USER1,
1016 				"Some operations were not successful\n");
1017 			goto exit;
1018 		}
1019 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1020 		rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1021 		uncomp_bufs[priv_data->orig_idx] = NULL;
1022 	}
1023 
1024 	if (out_of_space && oos_zlib_decompress) {
1025 		ret_status = 0;
1026 		goto exit;
1027 	}
1028 
1029 	/* Allocate buffers for decompressed data */
1030 	ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1031 	if (ret < 0) {
1032 		RTE_LOG(ERR, USER1,
1033 			"Destination mbufs could not be allocated "
1034 			"from the mempool\n");
1035 		goto exit;
1036 	}
1037 
1038 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1039 		for (i = 0; i < num_bufs; i++) {
1040 			priv_data = (struct priv_op_data *)
1041 					(ops_processed[i] + 1);
1042 			if (out_of_space == 1 && oos_zlib_compress)
1043 				data_size = OUT_OF_SPACE_BUF;
1044 			else
1045 				data_size =
1046 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1047 
1048 			if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1049 			       data_size,
1050 			       big_data ? buf_pool : ts_params->small_mbuf_pool,
1051 			       big_data ? buf_pool : ts_params->large_mbuf_pool,
1052 			       big_data ? 0 : MAX_SEGS,
1053 			       big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1054 					< 0)
1055 				goto exit;
1056 		}
1057 
1058 	} else {
1059 		for (i = 0; i < num_bufs; i++) {
1060 			priv_data = (struct priv_op_data *)
1061 					(ops_processed[i] + 1);
1062 			if (out_of_space == 1 && oos_zlib_compress)
1063 				data_size = OUT_OF_SPACE_BUF;
1064 			else
1065 				data_size =
1066 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1067 
1068 			rte_pktmbuf_append(uncomp_bufs[i], data_size);
1069 		}
1070 	}
1071 
1072 	/* Build the decompression operations */
1073 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1074 	if (ret < 0) {
1075 		RTE_LOG(ERR, USER1,
1076 			"Decompress operations could not be allocated "
1077 			"from the mempool\n");
1078 		goto exit;
1079 	}
1080 
1081 	/* Source buffer is the compressed data from the previous operations */
1082 	for (i = 0; i < num_bufs; i++) {
1083 		ops[i]->m_src = ops_processed[i]->m_dst;
1084 		ops[i]->m_dst = uncomp_bufs[i];
1085 		ops[i]->src.offset = 0;
1086 		/*
1087 		 * Set the length of the compressed data to the
1088 		 * number of bytes that were produced in the previous stage
1089 		 */
1090 		ops[i]->src.length = ops_processed[i]->produced;
1091 		ops[i]->dst.offset = 0;
1092 		if (state == RTE_COMP_OP_STATELESS) {
1093 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1094 		} else {
1095 			RTE_LOG(ERR, USER1,
1096 				"Stateful operations are not supported "
1097 				"in these tests yet\n");
1098 			goto exit;
1099 		}
1100 		ops[i]->input_chksum = 0;
1101 		/*
1102 		 * Copy private data from previous operations,
1103 		 * to keep the pointer to the original buffer
1104 		 */
1105 		memcpy(ops[i] + 1, ops_processed[i] + 1,
1106 				sizeof(struct priv_op_data));
1107 	}
1108 
1109 	/*
1110 	 * Free the previous compress operations,
1111 	 * as they are not needed anymore
1112 	 */
1113 	rte_comp_op_bulk_free(ops_processed, num_bufs);
1114 
1115 	/* Decompress data (either with Zlib API or compressdev API */
1116 	if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1117 		for (i = 0; i < num_bufs; i++) {
1118 			priv_data = (struct priv_op_data *)(ops[i] + 1);
1119 			uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1120 			const struct rte_comp_xform *decompress_xform =
1121 				decompress_xforms[xform_idx];
1122 
1123 			ret = decompress_zlib(ops[i], decompress_xform);
1124 			if (ret < 0)
1125 				goto exit;
1126 
1127 			ops_processed[i] = ops[i];
1128 		}
1129 	} else {
1130 		/* Create decompress private xform data */
1131 		for (i = 0; i < num_xforms; i++) {
1132 			ret = rte_compressdev_private_xform_create(0,
1133 				(const struct rte_comp_xform *)decompress_xforms[i],
1134 				&priv_xforms[i]);
1135 			if (ret < 0) {
1136 				RTE_LOG(ERR, USER1,
1137 					"Decompression private xform "
1138 					"could not be created\n");
1139 				goto exit;
1140 			}
1141 			num_priv_xforms++;
1142 		}
1143 
1144 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1145 			/* Attach shareable private xform data to ops */
1146 			for (i = 0; i < num_bufs; i++) {
1147 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1148 				uint16_t xform_idx = priv_data->orig_idx %
1149 								num_xforms;
1150 				ops[i]->private_xform = priv_xforms[xform_idx];
1151 			}
1152 		} else {
1153 			/* Create rest of the private xforms for the other ops */
1154 			for (i = num_xforms; i < num_bufs; i++) {
1155 				ret = rte_compressdev_private_xform_create(0,
1156 					decompress_xforms[i % num_xforms],
1157 					&priv_xforms[i]);
1158 				if (ret < 0) {
1159 					RTE_LOG(ERR, USER1,
1160 						"Decompression private xform "
1161 						"could not be created\n");
1162 					goto exit;
1163 				}
1164 				num_priv_xforms++;
1165 			}
1166 
1167 			/* Attach non shareable private xform data to ops */
1168 			for (i = 0; i < num_bufs; i++) {
1169 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1170 				uint16_t xform_idx = priv_data->orig_idx;
1171 				ops[i]->private_xform = priv_xforms[xform_idx];
1172 			}
1173 		}
1174 
1175 		/* Enqueue and dequeue all operations */
1176 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1177 		if (num_enqd < num_bufs) {
1178 			RTE_LOG(ERR, USER1,
1179 				"The operations could not be enqueued\n");
1180 			goto exit;
1181 		}
1182 
1183 		num_total_deqd = 0;
1184 		do {
1185 			/*
1186 			 * If retrying a dequeue call, wait for 10 ms to allow
1187 			 * enough time to the driver to process the operations
1188 			 */
1189 			if (deqd_retries != 0) {
1190 				/*
1191 				 * Avoid infinite loop if not all the
1192 				 * operations get out of the device
1193 				 */
1194 				if (deqd_retries == MAX_DEQD_RETRIES) {
1195 					RTE_LOG(ERR, USER1,
1196 						"Not all operations could be "
1197 						"dequeued\n");
1198 					goto exit;
1199 				}
1200 				usleep(DEQUEUE_WAIT_TIME);
1201 			}
1202 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
1203 					&ops_processed[num_total_deqd], num_bufs);
1204 			num_total_deqd += num_deqd;
1205 			deqd_retries++;
1206 		} while (num_total_deqd < num_enqd);
1207 
1208 		deqd_retries = 0;
1209 	}
1210 
1211 	for (i = 0; i < num_bufs; i++) {
1212 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1213 		char engine[] = "zlib, (directly, no PMD)";
1214 		if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1215 			strlcpy(engine, "pmd", sizeof(engine));
1216 		RTE_LOG(DEBUG, USER1,
1217 			"Buffer %u decompressed by %s from %u to %u bytes\n",
1218 			buf_idx[priv_data->orig_idx], engine,
1219 			ops_processed[i]->consumed, ops_processed[i]->produced);
1220 		ops[i] = NULL;
1221 	}
1222 
1223 	/*
1224 	 * Check operation status and free source mbuf (destination mbuf and
1225 	 * compress operation information is still needed)
1226 	 */
1227 	for (i = 0; i < num_bufs; i++) {
1228 		if (out_of_space && oos_zlib_compress) {
1229 			if (ops_processed[i]->status !=
1230 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1231 				ret_status = -1;
1232 
1233 				RTE_LOG(ERR, USER1,
1234 					"Operation without expected out of "
1235 					"space status error\n");
1236 				goto exit;
1237 			} else
1238 				continue;
1239 		}
1240 
1241 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1242 			RTE_LOG(ERR, USER1,
1243 				"Some operations were not successful\n");
1244 			goto exit;
1245 		}
1246 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1247 		rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1248 		comp_bufs[priv_data->orig_idx] = NULL;
1249 	}
1250 
1251 	if (out_of_space && oos_zlib_compress) {
1252 		ret_status = 0;
1253 		goto exit;
1254 	}
1255 
1256 	/*
1257 	 * Compare the original stream with the decompressed stream
1258 	 * (in size and the data)
1259 	 */
1260 	for (i = 0; i < num_bufs; i++) {
1261 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1262 		const char *buf1 = test_bufs[priv_data->orig_idx];
1263 		const char *buf2;
1264 		contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1265 		if (contig_buf == NULL) {
1266 			RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1267 					"be allocated\n");
1268 			goto exit;
1269 		}
1270 
1271 		buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1272 				ops_processed[i]->produced, contig_buf);
1273 		if (compare_buffers(buf1, strlen(buf1) + 1,
1274 				buf2, ops_processed[i]->produced) < 0)
1275 			goto exit;
1276 
1277 		/* Test checksums */
1278 		if (compress_xforms[0]->compress.chksum !=
1279 				RTE_COMP_CHECKSUM_NONE) {
1280 			if (ops_processed[i]->output_chksum !=
1281 					compress_checksum[i]) {
1282 				RTE_LOG(ERR, USER1, "The checksums differ\n"
1283 			"Compression Checksum: %" PRIu64 "\tDecompression "
1284 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1285 			ops_processed[i]->output_chksum);
1286 				goto exit;
1287 			}
1288 		}
1289 
1290 		rte_free(contig_buf);
1291 		contig_buf = NULL;
1292 	}
1293 
1294 	ret_status = 0;
1295 
1296 exit:
1297 	/* Free resources */
1298 	for (i = 0; i < num_bufs; i++) {
1299 		rte_pktmbuf_free(uncomp_bufs[i]);
1300 		rte_pktmbuf_free(comp_bufs[i]);
1301 		rte_comp_op_free(ops[i]);
1302 		rte_comp_op_free(ops_processed[i]);
1303 	}
1304 	for (i = 0; i < num_priv_xforms; i++) {
1305 		if (priv_xforms[i] != NULL)
1306 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1307 	}
1308 	rte_free(contig_buf);
1309 
1310 	return ret_status;
1311 }
1312 
1313 static int
1314 test_compressdev_deflate_stateless_fixed(void)
1315 {
1316 	struct comp_testsuite_params *ts_params = &testsuite_params;
1317 	uint16_t i;
1318 	int ret;
1319 	const struct rte_compressdev_capabilities *capab;
1320 
1321 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1322 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1323 
1324 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1325 		return -ENOTSUP;
1326 
1327 	struct rte_comp_xform *compress_xform =
1328 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1329 
1330 	if (compress_xform == NULL) {
1331 		RTE_LOG(ERR, USER1,
1332 			"Compress xform could not be created\n");
1333 		ret = TEST_FAILED;
1334 		goto exit;
1335 	}
1336 
1337 	memcpy(compress_xform, ts_params->def_comp_xform,
1338 			sizeof(struct rte_comp_xform));
1339 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1340 
1341 	struct interim_data_params int_data = {
1342 		NULL,
1343 		1,
1344 		NULL,
1345 		&compress_xform,
1346 		&ts_params->def_decomp_xform,
1347 		1
1348 	};
1349 
1350 	struct test_data_params test_data = {
1351 		RTE_COMP_OP_STATELESS,
1352 		LB_BOTH,
1353 		ZLIB_DECOMPRESS,
1354 		0,
1355 		0
1356 	};
1357 
1358 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1359 		int_data.test_bufs = &compress_test_bufs[i];
1360 		int_data.buf_idx = &i;
1361 
1362 		/* Compress with compressdev, decompress with Zlib */
1363 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1364 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1365 			ret = TEST_FAILED;
1366 			goto exit;
1367 		}
1368 
1369 		/* Compress with Zlib, decompress with compressdev */
1370 		test_data.zlib_dir = ZLIB_COMPRESS;
1371 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1372 			ret = TEST_FAILED;
1373 			goto exit;
1374 		}
1375 	}
1376 
1377 	ret = TEST_SUCCESS;
1378 
1379 exit:
1380 	rte_free(compress_xform);
1381 	return ret;
1382 }
1383 
1384 static int
1385 test_compressdev_deflate_stateless_dynamic(void)
1386 {
1387 	struct comp_testsuite_params *ts_params = &testsuite_params;
1388 	uint16_t i;
1389 	int ret;
1390 	struct rte_comp_xform *compress_xform =
1391 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1392 
1393 	const struct rte_compressdev_capabilities *capab;
1394 
1395 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1396 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1397 
1398 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1399 		return -ENOTSUP;
1400 
1401 	if (compress_xform == NULL) {
1402 		RTE_LOG(ERR, USER1,
1403 			"Compress xform could not be created\n");
1404 		ret = TEST_FAILED;
1405 		goto exit;
1406 	}
1407 
1408 	memcpy(compress_xform, ts_params->def_comp_xform,
1409 			sizeof(struct rte_comp_xform));
1410 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1411 
1412 	struct interim_data_params int_data = {
1413 		NULL,
1414 		1,
1415 		NULL,
1416 		&compress_xform,
1417 		&ts_params->def_decomp_xform,
1418 		1
1419 	};
1420 
1421 	struct test_data_params test_data = {
1422 		RTE_COMP_OP_STATELESS,
1423 		LB_BOTH,
1424 		ZLIB_DECOMPRESS,
1425 		0,
1426 		0
1427 	};
1428 
1429 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1430 		int_data.test_bufs = &compress_test_bufs[i];
1431 		int_data.buf_idx = &i;
1432 
1433 		/* Compress with compressdev, decompress with Zlib */
1434 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1435 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1436 			ret = TEST_FAILED;
1437 			goto exit;
1438 		}
1439 
1440 		/* Compress with Zlib, decompress with compressdev */
1441 		test_data.zlib_dir = ZLIB_COMPRESS;
1442 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1443 			ret = TEST_FAILED;
1444 			goto exit;
1445 		}
1446 	}
1447 
1448 	ret = TEST_SUCCESS;
1449 
1450 exit:
1451 	rte_free(compress_xform);
1452 	return ret;
1453 }
1454 
1455 static int
1456 test_compressdev_deflate_stateless_multi_op(void)
1457 {
1458 	struct comp_testsuite_params *ts_params = &testsuite_params;
1459 	uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1460 	uint16_t buf_idx[num_bufs];
1461 	uint16_t i;
1462 
1463 	for (i = 0; i < num_bufs; i++)
1464 		buf_idx[i] = i;
1465 
1466 	struct interim_data_params int_data = {
1467 		compress_test_bufs,
1468 		num_bufs,
1469 		buf_idx,
1470 		&ts_params->def_comp_xform,
1471 		&ts_params->def_decomp_xform,
1472 		1
1473 	};
1474 
1475 	struct test_data_params test_data = {
1476 		RTE_COMP_OP_STATELESS,
1477 		LB_BOTH,
1478 		ZLIB_DECOMPRESS,
1479 		0,
1480 		0
1481 	};
1482 
1483 	/* Compress with compressdev, decompress with Zlib */
1484 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1485 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1486 		return TEST_FAILED;
1487 
1488 	/* Compress with Zlib, decompress with compressdev */
1489 	test_data.zlib_dir = ZLIB_COMPRESS;
1490 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1491 		return TEST_FAILED;
1492 
1493 	return TEST_SUCCESS;
1494 }
1495 
1496 static int
1497 test_compressdev_deflate_stateless_multi_level(void)
1498 {
1499 	struct comp_testsuite_params *ts_params = &testsuite_params;
1500 	unsigned int level;
1501 	uint16_t i;
1502 	int ret;
1503 	struct rte_comp_xform *compress_xform =
1504 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1505 
1506 	if (compress_xform == NULL) {
1507 		RTE_LOG(ERR, USER1,
1508 			"Compress xform could not be created\n");
1509 		ret = TEST_FAILED;
1510 		goto exit;
1511 	}
1512 
1513 	memcpy(compress_xform, ts_params->def_comp_xform,
1514 			sizeof(struct rte_comp_xform));
1515 
1516 	struct interim_data_params int_data = {
1517 		NULL,
1518 		1,
1519 		NULL,
1520 		&compress_xform,
1521 		&ts_params->def_decomp_xform,
1522 		1
1523 	};
1524 
1525 	struct test_data_params test_data = {
1526 		RTE_COMP_OP_STATELESS,
1527 		LB_BOTH,
1528 		ZLIB_DECOMPRESS,
1529 		0,
1530 		0
1531 	};
1532 
1533 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1534 		int_data.test_bufs = &compress_test_bufs[i];
1535 		int_data.buf_idx = &i;
1536 
1537 		for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1538 				level++) {
1539 			compress_xform->compress.level = level;
1540 			/* Compress with compressdev, decompress with Zlib */
1541 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1542 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1543 				ret = TEST_FAILED;
1544 				goto exit;
1545 			}
1546 		}
1547 	}
1548 
1549 	ret = TEST_SUCCESS;
1550 
1551 exit:
1552 	rte_free(compress_xform);
1553 	return ret;
1554 }
1555 
1556 #define NUM_XFORMS 3
1557 static int
1558 test_compressdev_deflate_stateless_multi_xform(void)
1559 {
1560 	struct comp_testsuite_params *ts_params = &testsuite_params;
1561 	uint16_t num_bufs = NUM_XFORMS;
1562 	struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1563 	struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1564 	const char *test_buffers[NUM_XFORMS];
1565 	uint16_t i;
1566 	unsigned int level = RTE_COMP_LEVEL_MIN;
1567 	uint16_t buf_idx[num_bufs];
1568 
1569 	int ret;
1570 
1571 	/* Create multiple xforms with various levels */
1572 	for (i = 0; i < NUM_XFORMS; i++) {
1573 		compress_xforms[i] = rte_malloc(NULL,
1574 				sizeof(struct rte_comp_xform), 0);
1575 		if (compress_xforms[i] == NULL) {
1576 			RTE_LOG(ERR, USER1,
1577 				"Compress xform could not be created\n");
1578 			ret = TEST_FAILED;
1579 			goto exit;
1580 		}
1581 
1582 		memcpy(compress_xforms[i], ts_params->def_comp_xform,
1583 				sizeof(struct rte_comp_xform));
1584 		compress_xforms[i]->compress.level = level;
1585 		level++;
1586 
1587 		decompress_xforms[i] = rte_malloc(NULL,
1588 				sizeof(struct rte_comp_xform), 0);
1589 		if (decompress_xforms[i] == NULL) {
1590 			RTE_LOG(ERR, USER1,
1591 				"Decompress xform could not be created\n");
1592 			ret = TEST_FAILED;
1593 			goto exit;
1594 		}
1595 
1596 		memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1597 				sizeof(struct rte_comp_xform));
1598 	}
1599 
1600 	for (i = 0; i < NUM_XFORMS; i++) {
1601 		buf_idx[i] = 0;
1602 		/* Use the same buffer in all sessions */
1603 		test_buffers[i] = compress_test_bufs[0];
1604 	}
1605 
1606 	struct interim_data_params int_data = {
1607 		test_buffers,
1608 		num_bufs,
1609 		buf_idx,
1610 		compress_xforms,
1611 		decompress_xforms,
1612 		NUM_XFORMS
1613 	};
1614 
1615 	struct test_data_params test_data = {
1616 		RTE_COMP_OP_STATELESS,
1617 		LB_BOTH,
1618 		ZLIB_DECOMPRESS,
1619 		0,
1620 		0
1621 	};
1622 
1623 	/* Compress with compressdev, decompress with Zlib */
1624 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1625 		ret = TEST_FAILED;
1626 		goto exit;
1627 	}
1628 
1629 	ret = TEST_SUCCESS;
1630 exit:
1631 	for (i = 0; i < NUM_XFORMS; i++) {
1632 		rte_free(compress_xforms[i]);
1633 		rte_free(decompress_xforms[i]);
1634 	}
1635 
1636 	return ret;
1637 }
1638 
1639 static int
1640 test_compressdev_deflate_stateless_sgl(void)
1641 {
1642 	struct comp_testsuite_params *ts_params = &testsuite_params;
1643 	uint16_t i;
1644 	const struct rte_compressdev_capabilities *capab;
1645 
1646 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1647 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1648 
1649 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1650 		return -ENOTSUP;
1651 
1652 	struct interim_data_params int_data = {
1653 		NULL,
1654 		1,
1655 		NULL,
1656 		&ts_params->def_comp_xform,
1657 		&ts_params->def_decomp_xform,
1658 		1
1659 	};
1660 
1661 	struct test_data_params test_data = {
1662 		RTE_COMP_OP_STATELESS,
1663 		SGL_BOTH,
1664 		ZLIB_DECOMPRESS,
1665 		0,
1666 		0
1667 	};
1668 
1669 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1670 		int_data.test_bufs = &compress_test_bufs[i];
1671 		int_data.buf_idx = &i;
1672 
1673 		/* Compress with compressdev, decompress with Zlib */
1674 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1675 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1676 			return TEST_FAILED;
1677 
1678 		/* Compress with Zlib, decompress with compressdev */
1679 		test_data.zlib_dir = ZLIB_COMPRESS;
1680 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1681 			return TEST_FAILED;
1682 
1683 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1684 			/* Compress with compressdev, decompress with Zlib */
1685 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1686 			test_data.buff_type = SGL_TO_LB;
1687 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1688 				return TEST_FAILED;
1689 
1690 			/* Compress with Zlib, decompress with compressdev */
1691 			test_data.zlib_dir = ZLIB_COMPRESS;
1692 			test_data.buff_type = SGL_TO_LB;
1693 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1694 				return TEST_FAILED;
1695 		}
1696 
1697 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1698 			/* Compress with compressdev, decompress with Zlib */
1699 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1700 			test_data.buff_type = LB_TO_SGL;
1701 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1702 				return TEST_FAILED;
1703 
1704 			/* Compress with Zlib, decompress with compressdev */
1705 			test_data.zlib_dir = ZLIB_COMPRESS;
1706 			test_data.buff_type = LB_TO_SGL;
1707 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1708 				return TEST_FAILED;
1709 		}
1710 
1711 
1712 	}
1713 
1714 	return TEST_SUCCESS;
1715 
1716 }
1717 
1718 static int
1719 test_compressdev_deflate_stateless_checksum(void)
1720 {
1721 	struct comp_testsuite_params *ts_params = &testsuite_params;
1722 	uint16_t i;
1723 	int ret;
1724 	const struct rte_compressdev_capabilities *capab;
1725 
1726 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1727 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1728 
1729 	/* Check if driver supports any checksum */
1730 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1731 			(capab->comp_feature_flags &
1732 			RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1733 			(capab->comp_feature_flags &
1734 			RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1735 		return -ENOTSUP;
1736 
1737 	struct rte_comp_xform *compress_xform =
1738 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1739 	if (compress_xform == NULL) {
1740 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1741 		ret = TEST_FAILED;
1742 		return ret;
1743 	}
1744 
1745 	memcpy(compress_xform, ts_params->def_comp_xform,
1746 			sizeof(struct rte_comp_xform));
1747 
1748 	struct rte_comp_xform *decompress_xform =
1749 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1750 	if (decompress_xform == NULL) {
1751 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1752 		rte_free(compress_xform);
1753 		ret = TEST_FAILED;
1754 		return ret;
1755 	}
1756 
1757 	memcpy(decompress_xform, ts_params->def_decomp_xform,
1758 			sizeof(struct rte_comp_xform));
1759 
1760 	struct interim_data_params int_data = {
1761 		NULL,
1762 		1,
1763 		NULL,
1764 		&compress_xform,
1765 		&decompress_xform,
1766 		1
1767 	};
1768 
1769 	struct test_data_params test_data = {
1770 		RTE_COMP_OP_STATELESS,
1771 		LB_BOTH,
1772 		ZLIB_DECOMPRESS,
1773 		0,
1774 		0
1775 	};
1776 
1777 	/* Check if driver supports crc32 checksum and test */
1778 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1779 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1780 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1781 
1782 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1783 			/* Compress with compressdev, decompress with Zlib */
1784 			int_data.test_bufs = &compress_test_bufs[i];
1785 			int_data.buf_idx = &i;
1786 
1787 			/* Generate zlib checksum and test against selected
1788 			 * drivers decompression checksum
1789 			 */
1790 			test_data.zlib_dir = ZLIB_COMPRESS;
1791 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1792 				ret = TEST_FAILED;
1793 				goto exit;
1794 			}
1795 
1796 			/* Generate compression and decompression
1797 			 * checksum of selected driver
1798 			 */
1799 			test_data.zlib_dir = ZLIB_NONE;
1800 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1801 				ret = TEST_FAILED;
1802 				goto exit;
1803 			}
1804 		}
1805 	}
1806 
1807 	/* Check if driver supports adler32 checksum and test */
1808 	if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1809 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1810 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1811 
1812 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1813 			int_data.test_bufs = &compress_test_bufs[i];
1814 			int_data.buf_idx = &i;
1815 
1816 			/* Generate zlib checksum and test against selected
1817 			 * drivers decompression checksum
1818 			 */
1819 			test_data.zlib_dir = ZLIB_COMPRESS;
1820 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1821 				ret = TEST_FAILED;
1822 				goto exit;
1823 			}
1824 			/* Generate compression and decompression
1825 			 * checksum of selected driver
1826 			 */
1827 			test_data.zlib_dir = ZLIB_NONE;
1828 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1829 				ret = TEST_FAILED;
1830 				goto exit;
1831 			}
1832 		}
1833 	}
1834 
1835 	/* Check if driver supports combined crc and adler checksum and test */
1836 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1837 		compress_xform->compress.chksum =
1838 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1839 		decompress_xform->decompress.chksum =
1840 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1841 
1842 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1843 			int_data.test_bufs = &compress_test_bufs[i];
1844 			int_data.buf_idx = &i;
1845 
1846 			/* Generate compression and decompression
1847 			 * checksum of selected driver
1848 			 */
1849 			test_data.zlib_dir = ZLIB_NONE;
1850 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1851 				ret = TEST_FAILED;
1852 				goto exit;
1853 			}
1854 		}
1855 	}
1856 
1857 	ret = TEST_SUCCESS;
1858 
1859 exit:
1860 	rte_free(compress_xform);
1861 	rte_free(decompress_xform);
1862 	return ret;
1863 }
1864 
1865 static int
1866 test_compressdev_out_of_space_buffer(void)
1867 {
1868 	struct comp_testsuite_params *ts_params = &testsuite_params;
1869 	int ret;
1870 	uint16_t i;
1871 	const struct rte_compressdev_capabilities *capab;
1872 
1873 	RTE_LOG(INFO, USER1, "This is a negative test errors are expected\n");
1874 
1875 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1876 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1877 
1878 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1879 		return -ENOTSUP;
1880 
1881 	struct rte_comp_xform *compress_xform =
1882 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1883 
1884 	if (compress_xform == NULL) {
1885 		RTE_LOG(ERR, USER1,
1886 			"Compress xform could not be created\n");
1887 		ret = TEST_FAILED;
1888 		goto exit;
1889 	}
1890 
1891 	struct interim_data_params int_data = {
1892 		&compress_test_bufs[0],
1893 		1,
1894 		&i,
1895 		&ts_params->def_comp_xform,
1896 		&ts_params->def_decomp_xform,
1897 		1
1898 	};
1899 
1900 	struct test_data_params test_data = {
1901 		RTE_COMP_OP_STATELESS,
1902 		LB_BOTH,
1903 		ZLIB_DECOMPRESS,
1904 		1,
1905 		0
1906 	};
1907 	/* Compress with compressdev, decompress with Zlib */
1908 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1909 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1910 		ret = TEST_FAILED;
1911 		goto exit;
1912 	}
1913 
1914 	/* Compress with Zlib, decompress with compressdev */
1915 	test_data.zlib_dir = ZLIB_COMPRESS;
1916 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1917 		ret = TEST_FAILED;
1918 		goto exit;
1919 	}
1920 
1921 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1922 		/* Compress with compressdev, decompress with Zlib */
1923 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1924 		test_data.buff_type = SGL_BOTH;
1925 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1926 			ret = TEST_FAILED;
1927 			goto exit;
1928 		}
1929 
1930 		/* Compress with Zlib, decompress with compressdev */
1931 		test_data.zlib_dir = ZLIB_COMPRESS;
1932 		test_data.buff_type = SGL_BOTH;
1933 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1934 			ret = TEST_FAILED;
1935 			goto exit;
1936 		}
1937 	}
1938 
1939 	ret  = TEST_SUCCESS;
1940 
1941 exit:
1942 	rte_free(compress_xform);
1943 	return ret;
1944 }
1945 
1946 static int
1947 test_compressdev_deflate_stateless_dynamic_big(void)
1948 {
1949 	struct comp_testsuite_params *ts_params = &testsuite_params;
1950 	uint16_t i = 0;
1951 	int ret = TEST_SUCCESS;
1952 	int j;
1953 	const struct rte_compressdev_capabilities *capab;
1954 	char *test_buffer = NULL;
1955 
1956 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1957 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1958 
1959 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1960 		return -ENOTSUP;
1961 
1962 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1963 		return -ENOTSUP;
1964 
1965 	test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1966 	if (test_buffer == NULL) {
1967 		RTE_LOG(ERR, USER1,
1968 			"Can't allocate buffer for big-data\n");
1969 		return TEST_FAILED;
1970 	}
1971 
1972 	struct interim_data_params int_data = {
1973 		(const char * const *)&test_buffer,
1974 		1,
1975 		&i,
1976 		&ts_params->def_comp_xform,
1977 		&ts_params->def_decomp_xform,
1978 		1
1979 	};
1980 
1981 	struct test_data_params test_data = {
1982 		RTE_COMP_OP_STATELESS,
1983 		SGL_BOTH,
1984 		ZLIB_DECOMPRESS,
1985 		0,
1986 		1
1987 	};
1988 
1989 	ts_params->def_comp_xform->compress.deflate.huffman =
1990 						RTE_COMP_HUFFMAN_DYNAMIC;
1991 
1992 	/* fill the buffer with data based on rand. data */
1993 	srand(BIG_DATA_TEST_SIZE);
1994 	for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
1995 		test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
1996 	test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
1997 
1998 	/* Compress with compressdev, decompress with Zlib */
1999 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2000 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2001 		ret = TEST_FAILED;
2002 		goto end;
2003 	}
2004 
2005 	/* Compress with Zlib, decompress with compressdev */
2006 	test_data.zlib_dir = ZLIB_COMPRESS;
2007 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2008 		ret = TEST_FAILED;
2009 		goto end;
2010 	}
2011 
2012 end:
2013 	ts_params->def_comp_xform->compress.deflate.huffman =
2014 						RTE_COMP_HUFFMAN_DEFAULT;
2015 	rte_free(test_buffer);
2016 	return ret;
2017 }
2018 
2019 
2020 static struct unit_test_suite compressdev_testsuite  = {
2021 	.suite_name = "compressdev unit test suite",
2022 	.setup = testsuite_setup,
2023 	.teardown = testsuite_teardown,
2024 	.unit_test_cases = {
2025 		TEST_CASE_ST(NULL, NULL,
2026 			test_compressdev_invalid_configuration),
2027 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2028 			test_compressdev_deflate_stateless_fixed),
2029 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2030 			test_compressdev_deflate_stateless_dynamic),
2031 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2032 			test_compressdev_deflate_stateless_dynamic_big),
2033 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2034 			test_compressdev_deflate_stateless_multi_op),
2035 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2036 			test_compressdev_deflate_stateless_multi_level),
2037 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2038 			test_compressdev_deflate_stateless_multi_xform),
2039 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2040 			test_compressdev_deflate_stateless_sgl),
2041 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2042 			test_compressdev_deflate_stateless_checksum),
2043 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2044 			test_compressdev_out_of_space_buffer),
2045 		TEST_CASES_END() /**< NULL terminate unit test array */
2046 	}
2047 };
2048 
2049 static int
2050 test_compressdev(void)
2051 {
2052 	return unit_test_suite_runner(&compressdev_testsuite);
2053 }
2054 
2055 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
2056