xref: /dpdk/app/test/test_compressdev.c (revision 52d719d86e7b2005987cc8e4b0ff538e19fa57d2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 - 2019 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
13 #include <rte_mbuf.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
16 
17 #include "test_compressdev_test_buffer.h"
18 #include "test.h"
19 
20 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
21 
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
26 
27 /*
28  * 30% extra size for compressed data compared to original data,
29  * in case data size cannot be reduced and it is actually bigger
30  * due to the compress block headers
31  */
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
35 #define MAX_SEGS 16
36 #define NUM_OPS 16
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
39 #define CACHE_SIZE 0
40 
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
46 
47 #define OUT_OF_SPACE_BUF 1
48 
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
53 
54 const char *
55 huffman_type_strings[] = {
56 	[RTE_COMP_HUFFMAN_DEFAULT]	= "PMD default",
57 	[RTE_COMP_HUFFMAN_FIXED]	= "Fixed",
58 	[RTE_COMP_HUFFMAN_DYNAMIC]	= "Dynamic"
59 };
60 
61 enum zlib_direction {
62 	ZLIB_NONE,
63 	ZLIB_COMPRESS,
64 	ZLIB_DECOMPRESS,
65 	ZLIB_ALL
66 };
67 
68 enum varied_buff {
69 	LB_BOTH = 0,	/* both input and output are linear*/
70 	SGL_BOTH,	/* both input and output are chained */
71 	SGL_TO_LB,	/* input buffer is chained */
72 	LB_TO_SGL	/* output buffer is chained */
73 };
74 
75 struct priv_op_data {
76 	uint16_t orig_idx;
77 };
78 
79 struct comp_testsuite_params {
80 	struct rte_mempool *large_mbuf_pool;
81 	struct rte_mempool *small_mbuf_pool;
82 	struct rte_mempool *big_mbuf_pool;
83 	struct rte_mempool *op_pool;
84 	struct rte_comp_xform *def_comp_xform;
85 	struct rte_comp_xform *def_decomp_xform;
86 };
87 
88 struct interim_data_params {
89 	const char * const *test_bufs;
90 	unsigned int num_bufs;
91 	uint16_t *buf_idx;
92 	struct rte_comp_xform **compress_xforms;
93 	struct rte_comp_xform **decompress_xforms;
94 	unsigned int num_xforms;
95 };
96 
97 struct test_data_params {
98 	enum rte_comp_op_type state;
99 	enum varied_buff buff_type;
100 	enum zlib_direction zlib_dir;
101 	unsigned int out_of_space;
102 	unsigned int big_data;
103 };
104 
105 static struct comp_testsuite_params testsuite_params = { 0 };
106 
107 static void
108 testsuite_teardown(void)
109 {
110 	struct comp_testsuite_params *ts_params = &testsuite_params;
111 
112 	if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
113 		RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
114 	if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
115 		RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
116 	if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
117 		RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
118 	if (rte_mempool_in_use_count(ts_params->op_pool))
119 		RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
120 
121 	rte_mempool_free(ts_params->large_mbuf_pool);
122 	rte_mempool_free(ts_params->small_mbuf_pool);
123 	rte_mempool_free(ts_params->big_mbuf_pool);
124 	rte_mempool_free(ts_params->op_pool);
125 	rte_free(ts_params->def_comp_xform);
126 	rte_free(ts_params->def_decomp_xform);
127 }
128 
129 static int
130 testsuite_setup(void)
131 {
132 	struct comp_testsuite_params *ts_params = &testsuite_params;
133 	uint32_t max_buf_size = 0;
134 	unsigned int i;
135 
136 	if (rte_compressdev_count() == 0) {
137 		RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
138 		return TEST_SKIPPED;
139 	}
140 
141 	RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
142 				rte_compressdev_name_get(0));
143 
144 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
145 		max_buf_size = RTE_MAX(max_buf_size,
146 				strlen(compress_test_bufs[i]) + 1);
147 
148 	/*
149 	 * Buffers to be used in compression and decompression.
150 	 * Since decompressed data might be larger than
151 	 * compressed data (due to block header),
152 	 * buffers should be big enough for both cases.
153 	 */
154 	max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
155 	ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
156 			NUM_LARGE_MBUFS,
157 			CACHE_SIZE, 0,
158 			max_buf_size + RTE_PKTMBUF_HEADROOM,
159 			rte_socket_id());
160 	if (ts_params->large_mbuf_pool == NULL) {
161 		RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
162 		return TEST_FAILED;
163 	}
164 
165 	/* Create mempool with smaller buffers for SGL testing */
166 	ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
167 			NUM_LARGE_MBUFS * MAX_SEGS,
168 			CACHE_SIZE, 0,
169 			SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
170 			rte_socket_id());
171 	if (ts_params->small_mbuf_pool == NULL) {
172 		RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
173 		goto exit;
174 	}
175 
176 	/* Create mempool with big buffers for SGL testing */
177 	ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
178 			NUM_BIG_MBUFS + 1,
179 			CACHE_SIZE, 0,
180 			MAX_MBUF_SEGMENT_SIZE,
181 			rte_socket_id());
182 	if (ts_params->big_mbuf_pool == NULL) {
183 		RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
184 		goto exit;
185 	}
186 
187 	ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
188 				0, sizeof(struct priv_op_data),
189 				rte_socket_id());
190 	if (ts_params->op_pool == NULL) {
191 		RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
192 		goto exit;
193 	}
194 
195 	ts_params->def_comp_xform =
196 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
197 	if (ts_params->def_comp_xform == NULL) {
198 		RTE_LOG(ERR, USER1,
199 			"Default compress xform could not be created\n");
200 		goto exit;
201 	}
202 	ts_params->def_decomp_xform =
203 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
204 	if (ts_params->def_decomp_xform == NULL) {
205 		RTE_LOG(ERR, USER1,
206 			"Default decompress xform could not be created\n");
207 		goto exit;
208 	}
209 
210 	/* Initializes default values for compress/decompress xforms */
211 	ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
212 	ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
213 	ts_params->def_comp_xform->compress.deflate.huffman =
214 						RTE_COMP_HUFFMAN_DEFAULT;
215 	ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
216 	ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
217 	ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
218 
219 	ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
220 	ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
221 	ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
222 	ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
223 
224 	return TEST_SUCCESS;
225 
226 exit:
227 	testsuite_teardown();
228 
229 	return TEST_FAILED;
230 }
231 
232 static int
233 generic_ut_setup(void)
234 {
235 	/* Configure compressdev (one device, one queue pair) */
236 	struct rte_compressdev_config config = {
237 		.socket_id = rte_socket_id(),
238 		.nb_queue_pairs = 1,
239 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
240 		.max_nb_streams = 0
241 	};
242 
243 	if (rte_compressdev_configure(0, &config) < 0) {
244 		RTE_LOG(ERR, USER1, "Device configuration failed\n");
245 		return -1;
246 	}
247 
248 	if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
249 			rte_socket_id()) < 0) {
250 		RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
251 		return -1;
252 	}
253 
254 	if (rte_compressdev_start(0) < 0) {
255 		RTE_LOG(ERR, USER1, "Device could not be started\n");
256 		return -1;
257 	}
258 
259 	return 0;
260 }
261 
262 static void
263 generic_ut_teardown(void)
264 {
265 	rte_compressdev_stop(0);
266 	if (rte_compressdev_close(0) < 0)
267 		RTE_LOG(ERR, USER1, "Device could not be closed\n");
268 }
269 
270 static int
271 test_compressdev_invalid_configuration(void)
272 {
273 	struct rte_compressdev_config invalid_config;
274 	struct rte_compressdev_config valid_config = {
275 		.socket_id = rte_socket_id(),
276 		.nb_queue_pairs = 1,
277 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
278 		.max_nb_streams = 0
279 	};
280 	struct rte_compressdev_info dev_info;
281 
282 	/* Invalid configuration with 0 queue pairs */
283 	memcpy(&invalid_config, &valid_config,
284 			sizeof(struct rte_compressdev_config));
285 	invalid_config.nb_queue_pairs = 0;
286 
287 	TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
288 			"Device configuration was successful "
289 			"with no queue pairs (invalid)\n");
290 
291 	/*
292 	 * Invalid configuration with too many queue pairs
293 	 * (if there is an actual maximum number of queue pairs)
294 	 */
295 	rte_compressdev_info_get(0, &dev_info);
296 	if (dev_info.max_nb_queue_pairs != 0) {
297 		memcpy(&invalid_config, &valid_config,
298 			sizeof(struct rte_compressdev_config));
299 		invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
300 
301 		TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
302 				"Device configuration was successful "
303 				"with too many queue pairs (invalid)\n");
304 	}
305 
306 	/* Invalid queue pair setup, with no number of queue pairs set */
307 	TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
308 				NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
309 			"Queue pair setup was successful "
310 			"with no queue pairs set (invalid)\n");
311 
312 	return TEST_SUCCESS;
313 }
314 
315 static int
316 compare_buffers(const char *buffer1, uint32_t buffer1_len,
317 		const char *buffer2, uint32_t buffer2_len)
318 {
319 	if (buffer1_len != buffer2_len) {
320 		RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
321 		return -1;
322 	}
323 
324 	if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
325 		RTE_LOG(ERR, USER1, "Buffers are different\n");
326 		return -1;
327 	}
328 
329 	return 0;
330 }
331 
332 /*
333  * Maps compressdev and Zlib flush flags
334  */
335 static int
336 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
337 {
338 	switch (flag) {
339 	case RTE_COMP_FLUSH_NONE:
340 		return Z_NO_FLUSH;
341 	case RTE_COMP_FLUSH_SYNC:
342 		return Z_SYNC_FLUSH;
343 	case RTE_COMP_FLUSH_FULL:
344 		return Z_FULL_FLUSH;
345 	case RTE_COMP_FLUSH_FINAL:
346 		return Z_FINISH;
347 	/*
348 	 * There should be only the values above,
349 	 * so this should never happen
350 	 */
351 	default:
352 		return -1;
353 	}
354 }
355 
356 static int
357 compress_zlib(struct rte_comp_op *op,
358 		const struct rte_comp_xform *xform, int mem_level)
359 {
360 	z_stream stream;
361 	int zlib_flush;
362 	int strategy, window_bits, comp_level;
363 	int ret = TEST_FAILED;
364 	uint8_t *single_src_buf = NULL;
365 	uint8_t *single_dst_buf = NULL;
366 
367 	/* initialize zlib stream */
368 	stream.zalloc = Z_NULL;
369 	stream.zfree = Z_NULL;
370 	stream.opaque = Z_NULL;
371 
372 	if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
373 		strategy = Z_FIXED;
374 	else
375 		strategy = Z_DEFAULT_STRATEGY;
376 
377 	/*
378 	 * Window bits is the base two logarithm of the window size (in bytes).
379 	 * When doing raw DEFLATE, this number will be negative.
380 	 */
381 	window_bits = -(xform->compress.window_size);
382 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
383 		window_bits *= -1;
384 	else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
385 		window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
386 
387 	comp_level = xform->compress.level;
388 
389 	if (comp_level != RTE_COMP_LEVEL_NONE)
390 		ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
391 			window_bits, mem_level, strategy);
392 	else
393 		ret = deflateInit(&stream, Z_NO_COMPRESSION);
394 
395 	if (ret != Z_OK) {
396 		printf("Zlib deflate could not be initialized\n");
397 		goto exit;
398 	}
399 
400 	/* Assuming stateless operation */
401 	/* SGL Input */
402 	if (op->m_src->nb_segs > 1) {
403 		single_src_buf = rte_malloc(NULL,
404 				rte_pktmbuf_pkt_len(op->m_src), 0);
405 		if (single_src_buf == NULL) {
406 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
407 			goto exit;
408 		}
409 
410 		if (rte_pktmbuf_read(op->m_src, op->src.offset,
411 					rte_pktmbuf_pkt_len(op->m_src) -
412 					op->src.offset,
413 					single_src_buf) == NULL) {
414 			RTE_LOG(ERR, USER1,
415 				"Buffer could not be read entirely\n");
416 			goto exit;
417 		}
418 
419 		stream.avail_in = op->src.length;
420 		stream.next_in = single_src_buf;
421 
422 	} else {
423 		stream.avail_in = op->src.length;
424 		stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
425 				op->src.offset);
426 	}
427 	/* SGL output */
428 	if (op->m_dst->nb_segs > 1) {
429 
430 		single_dst_buf = rte_malloc(NULL,
431 				rte_pktmbuf_pkt_len(op->m_dst), 0);
432 			if (single_dst_buf == NULL) {
433 				RTE_LOG(ERR, USER1,
434 					"Buffer could not be allocated\n");
435 			goto exit;
436 		}
437 
438 		stream.avail_out = op->m_dst->pkt_len;
439 		stream.next_out = single_dst_buf;
440 
441 	} else {/* linear output */
442 		stream.avail_out = op->m_dst->data_len;
443 		stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
444 				op->dst.offset);
445 	}
446 
447 	/* Stateless operation, all buffer will be compressed in one go */
448 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
449 	ret = deflate(&stream, zlib_flush);
450 
451 	if (stream.avail_in != 0) {
452 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
453 		goto exit;
454 	}
455 
456 	if (ret != Z_STREAM_END)
457 		goto exit;
458 
459 	/* Copy data to destination SGL */
460 	if (op->m_dst->nb_segs > 1) {
461 		uint32_t remaining_data = stream.total_out;
462 		uint8_t *src_data = single_dst_buf;
463 		struct rte_mbuf *dst_buf = op->m_dst;
464 
465 		while (remaining_data > 0) {
466 			uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
467 						uint8_t *, op->dst.offset);
468 			/* Last segment */
469 			if (remaining_data < dst_buf->data_len) {
470 				memcpy(dst_data, src_data, remaining_data);
471 				remaining_data = 0;
472 			} else {
473 				memcpy(dst_data, src_data, dst_buf->data_len);
474 				remaining_data -= dst_buf->data_len;
475 				src_data += dst_buf->data_len;
476 				dst_buf = dst_buf->next;
477 			}
478 		}
479 	}
480 
481 	op->consumed = stream.total_in;
482 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
483 		rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
484 		rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
485 		op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
486 				ZLIB_TRAILER_SIZE);
487 	} else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
488 		rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
489 		rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
490 		op->produced = stream.total_out - (GZIP_HEADER_SIZE +
491 				GZIP_TRAILER_SIZE);
492 	} else
493 		op->produced = stream.total_out;
494 
495 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
496 	op->output_chksum = stream.adler;
497 
498 	deflateReset(&stream);
499 
500 	ret = 0;
501 exit:
502 	deflateEnd(&stream);
503 	rte_free(single_src_buf);
504 	rte_free(single_dst_buf);
505 
506 	return ret;
507 }
508 
509 static int
510 decompress_zlib(struct rte_comp_op *op,
511 		const struct rte_comp_xform *xform)
512 {
513 	z_stream stream;
514 	int window_bits;
515 	int zlib_flush;
516 	int ret = TEST_FAILED;
517 	uint8_t *single_src_buf = NULL;
518 	uint8_t *single_dst_buf = NULL;
519 
520 	/* initialize zlib stream */
521 	stream.zalloc = Z_NULL;
522 	stream.zfree = Z_NULL;
523 	stream.opaque = Z_NULL;
524 
525 	/*
526 	 * Window bits is the base two logarithm of the window size (in bytes).
527 	 * When doing raw DEFLATE, this number will be negative.
528 	 */
529 	window_bits = -(xform->decompress.window_size);
530 	ret = inflateInit2(&stream, window_bits);
531 
532 	if (ret != Z_OK) {
533 		printf("Zlib deflate could not be initialized\n");
534 		goto exit;
535 	}
536 
537 	/* Assuming stateless operation */
538 	/* SGL */
539 	if (op->m_src->nb_segs > 1) {
540 		single_src_buf = rte_malloc(NULL,
541 				rte_pktmbuf_pkt_len(op->m_src), 0);
542 		if (single_src_buf == NULL) {
543 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
544 			goto exit;
545 		}
546 		single_dst_buf = rte_malloc(NULL,
547 				rte_pktmbuf_pkt_len(op->m_dst), 0);
548 		if (single_dst_buf == NULL) {
549 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
550 			goto exit;
551 		}
552 		if (rte_pktmbuf_read(op->m_src, 0,
553 					rte_pktmbuf_pkt_len(op->m_src),
554 					single_src_buf) == NULL) {
555 			RTE_LOG(ERR, USER1,
556 				"Buffer could not be read entirely\n");
557 			goto exit;
558 		}
559 
560 		stream.avail_in = op->src.length;
561 		stream.next_in = single_src_buf;
562 		stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
563 		stream.next_out = single_dst_buf;
564 
565 	} else {
566 		stream.avail_in = op->src.length;
567 		stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
568 		stream.avail_out = op->m_dst->data_len;
569 		stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
570 	}
571 
572 	/* Stateless operation, all buffer will be compressed in one go */
573 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
574 	ret = inflate(&stream, zlib_flush);
575 
576 	if (stream.avail_in != 0) {
577 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
578 		goto exit;
579 	}
580 
581 	if (ret != Z_STREAM_END)
582 		goto exit;
583 
584 	if (op->m_src->nb_segs > 1) {
585 		uint32_t remaining_data = stream.total_out;
586 		uint8_t *src_data = single_dst_buf;
587 		struct rte_mbuf *dst_buf = op->m_dst;
588 
589 		while (remaining_data > 0) {
590 			uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
591 					uint8_t *);
592 			/* Last segment */
593 			if (remaining_data < dst_buf->data_len) {
594 				memcpy(dst_data, src_data, remaining_data);
595 				remaining_data = 0;
596 			} else {
597 				memcpy(dst_data, src_data, dst_buf->data_len);
598 				remaining_data -= dst_buf->data_len;
599 				src_data += dst_buf->data_len;
600 				dst_buf = dst_buf->next;
601 			}
602 		}
603 	}
604 
605 	op->consumed = stream.total_in;
606 	op->produced = stream.total_out;
607 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
608 
609 	inflateReset(&stream);
610 
611 	ret = 0;
612 exit:
613 	inflateEnd(&stream);
614 
615 	return ret;
616 }
617 
618 static int
619 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
620 		uint32_t total_data_size,
621 		struct rte_mempool *small_mbuf_pool,
622 		struct rte_mempool *large_mbuf_pool,
623 		uint8_t limit_segs_in_sgl,
624 		uint16_t seg_size)
625 {
626 	uint32_t remaining_data = total_data_size;
627 	uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
628 	struct rte_mempool *pool;
629 	struct rte_mbuf *next_seg;
630 	uint32_t data_size;
631 	char *buf_ptr;
632 	const char *data_ptr = test_buf;
633 	uint16_t i;
634 	int ret;
635 
636 	if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
637 		num_remaining_segs = limit_segs_in_sgl - 1;
638 
639 	/*
640 	 * Allocate data in the first segment (header) and
641 	 * copy data if test buffer is provided
642 	 */
643 	if (remaining_data < seg_size)
644 		data_size = remaining_data;
645 	else
646 		data_size = seg_size;
647 	buf_ptr = rte_pktmbuf_append(head_buf, data_size);
648 	if (buf_ptr == NULL) {
649 		RTE_LOG(ERR, USER1,
650 			"Not enough space in the 1st buffer\n");
651 		return -1;
652 	}
653 
654 	if (data_ptr != NULL) {
655 		/* Copy characters without NULL terminator */
656 		strncpy(buf_ptr, data_ptr, data_size);
657 		data_ptr += data_size;
658 	}
659 	remaining_data -= data_size;
660 	num_remaining_segs--;
661 
662 	/*
663 	 * Allocate the rest of the segments,
664 	 * copy the rest of the data and chain the segments.
665 	 */
666 	for (i = 0; i < num_remaining_segs; i++) {
667 
668 		if (i == (num_remaining_segs - 1)) {
669 			/* last segment */
670 			if (remaining_data > seg_size)
671 				pool = large_mbuf_pool;
672 			else
673 				pool = small_mbuf_pool;
674 			data_size = remaining_data;
675 		} else {
676 			data_size = seg_size;
677 			pool = small_mbuf_pool;
678 		}
679 
680 		next_seg = rte_pktmbuf_alloc(pool);
681 		if (next_seg == NULL) {
682 			RTE_LOG(ERR, USER1,
683 				"New segment could not be allocated "
684 				"from the mempool\n");
685 			return -1;
686 		}
687 		buf_ptr = rte_pktmbuf_append(next_seg, data_size);
688 		if (buf_ptr == NULL) {
689 			RTE_LOG(ERR, USER1,
690 				"Not enough space in the buffer\n");
691 			rte_pktmbuf_free(next_seg);
692 			return -1;
693 		}
694 		if (data_ptr != NULL) {
695 			/* Copy characters without NULL terminator */
696 			strncpy(buf_ptr, data_ptr, data_size);
697 			data_ptr += data_size;
698 		}
699 		remaining_data -= data_size;
700 
701 		ret = rte_pktmbuf_chain(head_buf, next_seg);
702 		if (ret != 0) {
703 			rte_pktmbuf_free(next_seg);
704 			RTE_LOG(ERR, USER1,
705 				"Segment could not chained\n");
706 			return -1;
707 		}
708 	}
709 
710 	return 0;
711 }
712 
713 /*
714  * Compresses and decompresses buffer with compressdev API and Zlib API
715  */
716 static int
717 test_deflate_comp_decomp(const struct interim_data_params *int_data,
718 		const struct test_data_params *test_data)
719 {
720 	struct comp_testsuite_params *ts_params = &testsuite_params;
721 	const char * const *test_bufs = int_data->test_bufs;
722 	unsigned int num_bufs = int_data->num_bufs;
723 	uint16_t *buf_idx = int_data->buf_idx;
724 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
725 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
726 	unsigned int num_xforms = int_data->num_xforms;
727 	enum rte_comp_op_type state = test_data->state;
728 	unsigned int buff_type = test_data->buff_type;
729 	unsigned int out_of_space = test_data->out_of_space;
730 	unsigned int big_data = test_data->big_data;
731 	enum zlib_direction zlib_dir = test_data->zlib_dir;
732 	int ret_status = TEST_FAILED;
733 	int ret;
734 	struct rte_mbuf *uncomp_bufs[num_bufs];
735 	struct rte_mbuf *comp_bufs[num_bufs];
736 	struct rte_comp_op *ops[num_bufs];
737 	struct rte_comp_op *ops_processed[num_bufs];
738 	void *priv_xforms[num_bufs];
739 	uint16_t num_enqd, num_deqd, num_total_deqd;
740 	uint16_t num_priv_xforms = 0;
741 	unsigned int deqd_retries = 0;
742 	struct priv_op_data *priv_data;
743 	char *buf_ptr;
744 	unsigned int i;
745 	struct rte_mempool *buf_pool;
746 	uint32_t data_size;
747 	/* Compressing with CompressDev */
748 	unsigned int oos_zlib_decompress =
749 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
750 	/* Decompressing with CompressDev */
751 	unsigned int oos_zlib_compress =
752 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
753 	const struct rte_compressdev_capabilities *capa =
754 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
755 	char *contig_buf = NULL;
756 	uint64_t compress_checksum[num_bufs];
757 
758 	if (capa == NULL) {
759 		RTE_LOG(ERR, USER1,
760 			"Compress device does not support DEFLATE\n");
761 		return -ENOTSUP;
762 	}
763 
764 	/* Initialize all arrays to NULL */
765 	memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
766 	memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
767 	memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
768 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
769 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
770 
771 	if (big_data)
772 		buf_pool = ts_params->big_mbuf_pool;
773 	else if (buff_type == SGL_BOTH)
774 		buf_pool = ts_params->small_mbuf_pool;
775 	else
776 		buf_pool = ts_params->large_mbuf_pool;
777 
778 	/* Prepare the source mbufs with the data */
779 	ret = rte_pktmbuf_alloc_bulk(buf_pool,
780 				uncomp_bufs, num_bufs);
781 	if (ret < 0) {
782 		RTE_LOG(ERR, USER1,
783 			"Source mbufs could not be allocated "
784 			"from the mempool\n");
785 		goto exit;
786 	}
787 
788 	if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
789 		for (i = 0; i < num_bufs; i++) {
790 			data_size = strlen(test_bufs[i]) + 1;
791 			if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
792 			    data_size,
793 			    big_data ? buf_pool : ts_params->small_mbuf_pool,
794 			    big_data ? buf_pool : ts_params->large_mbuf_pool,
795 			    big_data ? 0 : MAX_SEGS,
796 			    big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
797 				goto exit;
798 		}
799 	} else {
800 		for (i = 0; i < num_bufs; i++) {
801 			data_size = strlen(test_bufs[i]) + 1;
802 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
803 			strlcpy(buf_ptr, test_bufs[i], data_size);
804 		}
805 	}
806 
807 	/* Prepare the destination mbufs */
808 	ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
809 	if (ret < 0) {
810 		RTE_LOG(ERR, USER1,
811 			"Destination mbufs could not be allocated "
812 			"from the mempool\n");
813 		goto exit;
814 	}
815 
816 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
817 		for (i = 0; i < num_bufs; i++) {
818 			if (out_of_space == 1 && oos_zlib_decompress)
819 				data_size = OUT_OF_SPACE_BUF;
820 			else
821 				(data_size = strlen(test_bufs[i]) *
822 					COMPRESS_BUF_SIZE_RATIO);
823 
824 			if (prepare_sgl_bufs(NULL, comp_bufs[i],
825 			      data_size,
826 			      big_data ? buf_pool : ts_params->small_mbuf_pool,
827 			      big_data ? buf_pool : ts_params->large_mbuf_pool,
828 			      big_data ? 0 : MAX_SEGS,
829 			      big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
830 					< 0)
831 				goto exit;
832 		}
833 
834 	} else {
835 		for (i = 0; i < num_bufs; i++) {
836 			if (out_of_space == 1 && oos_zlib_decompress)
837 				data_size = OUT_OF_SPACE_BUF;
838 			else
839 				(data_size = strlen(test_bufs[i]) *
840 					COMPRESS_BUF_SIZE_RATIO);
841 
842 			rte_pktmbuf_append(comp_bufs[i], data_size);
843 		}
844 	}
845 
846 	/* Build the compression operations */
847 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
848 	if (ret < 0) {
849 		RTE_LOG(ERR, USER1,
850 			"Compress operations could not be allocated "
851 			"from the mempool\n");
852 		goto exit;
853 	}
854 
855 
856 	for (i = 0; i < num_bufs; i++) {
857 		ops[i]->m_src = uncomp_bufs[i];
858 		ops[i]->m_dst = comp_bufs[i];
859 		ops[i]->src.offset = 0;
860 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
861 		ops[i]->dst.offset = 0;
862 		if (state == RTE_COMP_OP_STATELESS) {
863 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
864 		} else {
865 			RTE_LOG(ERR, USER1,
866 				"Stateful operations are not supported "
867 				"in these tests yet\n");
868 			goto exit;
869 		}
870 		ops[i]->input_chksum = 0;
871 		/*
872 		 * Store original operation index in private data,
873 		 * since ordering does not have to be maintained,
874 		 * when dequeueing from compressdev, so a comparison
875 		 * at the end of the test can be done.
876 		 */
877 		priv_data = (struct priv_op_data *) (ops[i] + 1);
878 		priv_data->orig_idx = i;
879 	}
880 
881 	/* Compress data (either with Zlib API or compressdev API */
882 	if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
883 		for (i = 0; i < num_bufs; i++) {
884 			const struct rte_comp_xform *compress_xform =
885 				compress_xforms[i % num_xforms];
886 			ret = compress_zlib(ops[i], compress_xform,
887 					DEFAULT_MEM_LEVEL);
888 			if (ret < 0)
889 				goto exit;
890 
891 			ops_processed[i] = ops[i];
892 		}
893 	} else {
894 		/* Create compress private xform data */
895 		for (i = 0; i < num_xforms; i++) {
896 			ret = rte_compressdev_private_xform_create(0,
897 				(const struct rte_comp_xform *)compress_xforms[i],
898 				&priv_xforms[i]);
899 			if (ret < 0) {
900 				RTE_LOG(ERR, USER1,
901 					"Compression private xform "
902 					"could not be created\n");
903 				goto exit;
904 			}
905 			num_priv_xforms++;
906 		}
907 
908 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
909 			/* Attach shareable private xform data to ops */
910 			for (i = 0; i < num_bufs; i++)
911 				ops[i]->private_xform = priv_xforms[i % num_xforms];
912 		} else {
913 			/* Create rest of the private xforms for the other ops */
914 			for (i = num_xforms; i < num_bufs; i++) {
915 				ret = rte_compressdev_private_xform_create(0,
916 					compress_xforms[i % num_xforms],
917 					&priv_xforms[i]);
918 				if (ret < 0) {
919 					RTE_LOG(ERR, USER1,
920 						"Compression private xform "
921 						"could not be created\n");
922 					goto exit;
923 				}
924 				num_priv_xforms++;
925 			}
926 
927 			/* Attach non shareable private xform data to ops */
928 			for (i = 0; i < num_bufs; i++)
929 				ops[i]->private_xform = priv_xforms[i];
930 		}
931 
932 		/* Enqueue and dequeue all operations */
933 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
934 		if (num_enqd < num_bufs) {
935 			RTE_LOG(ERR, USER1,
936 				"The operations could not be enqueued\n");
937 			goto exit;
938 		}
939 
940 		num_total_deqd = 0;
941 		do {
942 			/*
943 			 * If retrying a dequeue call, wait for 10 ms to allow
944 			 * enough time to the driver to process the operations
945 			 */
946 			if (deqd_retries != 0) {
947 				/*
948 				 * Avoid infinite loop if not all the
949 				 * operations get out of the device
950 				 */
951 				if (deqd_retries == MAX_DEQD_RETRIES) {
952 					RTE_LOG(ERR, USER1,
953 						"Not all operations could be "
954 						"dequeued\n");
955 					goto exit;
956 				}
957 				usleep(DEQUEUE_WAIT_TIME);
958 			}
959 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
960 					&ops_processed[num_total_deqd], num_bufs);
961 			num_total_deqd += num_deqd;
962 			deqd_retries++;
963 
964 		} while (num_total_deqd < num_enqd);
965 
966 		deqd_retries = 0;
967 
968 		/* Free compress private xforms */
969 		for (i = 0; i < num_priv_xforms; i++) {
970 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
971 			priv_xforms[i] = NULL;
972 		}
973 		num_priv_xforms = 0;
974 	}
975 
976 	for (i = 0; i < num_bufs; i++) {
977 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
978 		uint16_t xform_idx = priv_data->orig_idx % num_xforms;
979 		const struct rte_comp_compress_xform *compress_xform =
980 				&compress_xforms[xform_idx]->compress;
981 		enum rte_comp_huffman huffman_type =
982 			compress_xform->deflate.huffman;
983 		char engine[] = "zlib (directly, not PMD)";
984 		if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
985 			strlcpy(engine, "PMD", sizeof(engine));
986 
987 		RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
988 			" %u bytes (level = %d, huffman = %s)\n",
989 			buf_idx[priv_data->orig_idx], engine,
990 			ops_processed[i]->consumed, ops_processed[i]->produced,
991 			compress_xform->level,
992 			huffman_type_strings[huffman_type]);
993 		RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
994 			ops_processed[i]->consumed == 0 ? 0 :
995 			(float)ops_processed[i]->produced /
996 			ops_processed[i]->consumed * 100);
997 		if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
998 			compress_checksum[i] = ops_processed[i]->output_chksum;
999 		ops[i] = NULL;
1000 	}
1001 
1002 	/*
1003 	 * Check operation status and free source mbufs (destination mbuf and
1004 	 * compress operation information is needed for the decompression stage)
1005 	 */
1006 	for (i = 0; i < num_bufs; i++) {
1007 		if (out_of_space && oos_zlib_decompress) {
1008 			if (ops_processed[i]->status !=
1009 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1010 				ret_status = TEST_FAILED;
1011 				RTE_LOG(ERR, USER1,
1012 					"Operation without expected out of "
1013 					"space status error\n");
1014 				goto exit;
1015 			} else
1016 				continue;
1017 		}
1018 
1019 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1020 			RTE_LOG(ERR, USER1,
1021 				"Some operations were not successful\n");
1022 			goto exit;
1023 		}
1024 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1025 		rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1026 		uncomp_bufs[priv_data->orig_idx] = NULL;
1027 	}
1028 
1029 	if (out_of_space && oos_zlib_decompress) {
1030 		ret_status = TEST_SUCCESS;
1031 		goto exit;
1032 	}
1033 
1034 	/* Allocate buffers for decompressed data */
1035 	ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1036 	if (ret < 0) {
1037 		RTE_LOG(ERR, USER1,
1038 			"Destination mbufs could not be allocated "
1039 			"from the mempool\n");
1040 		goto exit;
1041 	}
1042 
1043 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1044 		for (i = 0; i < num_bufs; i++) {
1045 			priv_data = (struct priv_op_data *)
1046 					(ops_processed[i] + 1);
1047 			if (out_of_space == 1 && oos_zlib_compress)
1048 				data_size = OUT_OF_SPACE_BUF;
1049 			else
1050 				data_size =
1051 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1052 
1053 			if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1054 			       data_size,
1055 			       big_data ? buf_pool : ts_params->small_mbuf_pool,
1056 			       big_data ? buf_pool : ts_params->large_mbuf_pool,
1057 			       big_data ? 0 : MAX_SEGS,
1058 			       big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1059 					< 0)
1060 				goto exit;
1061 		}
1062 
1063 	} else {
1064 		for (i = 0; i < num_bufs; i++) {
1065 			priv_data = (struct priv_op_data *)
1066 					(ops_processed[i] + 1);
1067 			if (out_of_space == 1 && oos_zlib_compress)
1068 				data_size = OUT_OF_SPACE_BUF;
1069 			else
1070 				data_size =
1071 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1072 
1073 			rte_pktmbuf_append(uncomp_bufs[i], data_size);
1074 		}
1075 	}
1076 
1077 	/* Build the decompression operations */
1078 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1079 	if (ret < 0) {
1080 		RTE_LOG(ERR, USER1,
1081 			"Decompress operations could not be allocated "
1082 			"from the mempool\n");
1083 		goto exit;
1084 	}
1085 
1086 	/* Source buffer is the compressed data from the previous operations */
1087 	for (i = 0; i < num_bufs; i++) {
1088 		ops[i]->m_src = ops_processed[i]->m_dst;
1089 		ops[i]->m_dst = uncomp_bufs[i];
1090 		ops[i]->src.offset = 0;
1091 		/*
1092 		 * Set the length of the compressed data to the
1093 		 * number of bytes that were produced in the previous stage
1094 		 */
1095 		ops[i]->src.length = ops_processed[i]->produced;
1096 		ops[i]->dst.offset = 0;
1097 		if (state == RTE_COMP_OP_STATELESS) {
1098 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1099 		} else {
1100 			RTE_LOG(ERR, USER1,
1101 				"Stateful operations are not supported "
1102 				"in these tests yet\n");
1103 			goto exit;
1104 		}
1105 		ops[i]->input_chksum = 0;
1106 		/*
1107 		 * Copy private data from previous operations,
1108 		 * to keep the pointer to the original buffer
1109 		 */
1110 		memcpy(ops[i] + 1, ops_processed[i] + 1,
1111 				sizeof(struct priv_op_data));
1112 	}
1113 
1114 	/*
1115 	 * Free the previous compress operations,
1116 	 * as they are not needed anymore
1117 	 */
1118 	rte_comp_op_bulk_free(ops_processed, num_bufs);
1119 
1120 	/* Decompress data (either with Zlib API or compressdev API */
1121 	if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1122 		for (i = 0; i < num_bufs; i++) {
1123 			priv_data = (struct priv_op_data *)(ops[i] + 1);
1124 			uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1125 			const struct rte_comp_xform *decompress_xform =
1126 				decompress_xforms[xform_idx];
1127 
1128 			ret = decompress_zlib(ops[i], decompress_xform);
1129 			if (ret < 0)
1130 				goto exit;
1131 
1132 			ops_processed[i] = ops[i];
1133 		}
1134 	} else {
1135 		/* Create decompress private xform data */
1136 		for (i = 0; i < num_xforms; i++) {
1137 			ret = rte_compressdev_private_xform_create(0,
1138 				(const struct rte_comp_xform *)decompress_xforms[i],
1139 				&priv_xforms[i]);
1140 			if (ret < 0) {
1141 				RTE_LOG(ERR, USER1,
1142 					"Decompression private xform "
1143 					"could not be created\n");
1144 				goto exit;
1145 			}
1146 			num_priv_xforms++;
1147 		}
1148 
1149 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1150 			/* Attach shareable private xform data to ops */
1151 			for (i = 0; i < num_bufs; i++) {
1152 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1153 				uint16_t xform_idx = priv_data->orig_idx %
1154 								num_xforms;
1155 				ops[i]->private_xform = priv_xforms[xform_idx];
1156 			}
1157 		} else {
1158 			/* Create rest of the private xforms for the other ops */
1159 			for (i = num_xforms; i < num_bufs; i++) {
1160 				ret = rte_compressdev_private_xform_create(0,
1161 					decompress_xforms[i % num_xforms],
1162 					&priv_xforms[i]);
1163 				if (ret < 0) {
1164 					RTE_LOG(ERR, USER1,
1165 						"Decompression private xform "
1166 						"could not be created\n");
1167 					goto exit;
1168 				}
1169 				num_priv_xforms++;
1170 			}
1171 
1172 			/* Attach non shareable private xform data to ops */
1173 			for (i = 0; i < num_bufs; i++) {
1174 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1175 				uint16_t xform_idx = priv_data->orig_idx;
1176 				ops[i]->private_xform = priv_xforms[xform_idx];
1177 			}
1178 		}
1179 
1180 		/* Enqueue and dequeue all operations */
1181 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1182 		if (num_enqd < num_bufs) {
1183 			RTE_LOG(ERR, USER1,
1184 				"The operations could not be enqueued\n");
1185 			goto exit;
1186 		}
1187 
1188 		num_total_deqd = 0;
1189 		do {
1190 			/*
1191 			 * If retrying a dequeue call, wait for 10 ms to allow
1192 			 * enough time to the driver to process the operations
1193 			 */
1194 			if (deqd_retries != 0) {
1195 				/*
1196 				 * Avoid infinite loop if not all the
1197 				 * operations get out of the device
1198 				 */
1199 				if (deqd_retries == MAX_DEQD_RETRIES) {
1200 					RTE_LOG(ERR, USER1,
1201 						"Not all operations could be "
1202 						"dequeued\n");
1203 					goto exit;
1204 				}
1205 				usleep(DEQUEUE_WAIT_TIME);
1206 			}
1207 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
1208 					&ops_processed[num_total_deqd], num_bufs);
1209 			num_total_deqd += num_deqd;
1210 			deqd_retries++;
1211 		} while (num_total_deqd < num_enqd);
1212 
1213 		deqd_retries = 0;
1214 	}
1215 
1216 	for (i = 0; i < num_bufs; i++) {
1217 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1218 		char engine[] = "zlib, (directly, no PMD)";
1219 		if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1220 			strlcpy(engine, "pmd", sizeof(engine));
1221 		RTE_LOG(DEBUG, USER1,
1222 			"Buffer %u decompressed by %s from %u to %u bytes\n",
1223 			buf_idx[priv_data->orig_idx], engine,
1224 			ops_processed[i]->consumed, ops_processed[i]->produced);
1225 		ops[i] = NULL;
1226 	}
1227 
1228 	/*
1229 	 * Check operation status and free source mbuf (destination mbuf and
1230 	 * compress operation information is still needed)
1231 	 */
1232 	for (i = 0; i < num_bufs; i++) {
1233 		if (out_of_space && oos_zlib_compress) {
1234 			if (ops_processed[i]->status !=
1235 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1236 				ret_status = TEST_FAILED;
1237 				RTE_LOG(ERR, USER1,
1238 					"Operation without expected out of "
1239 					"space status error\n");
1240 				goto exit;
1241 			} else
1242 				continue;
1243 		}
1244 
1245 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1246 			RTE_LOG(ERR, USER1,
1247 				"Some operations were not successful\n");
1248 			goto exit;
1249 		}
1250 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1251 		rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1252 		comp_bufs[priv_data->orig_idx] = NULL;
1253 	}
1254 
1255 	if (out_of_space && oos_zlib_compress) {
1256 		ret_status = TEST_SUCCESS;
1257 		goto exit;
1258 	}
1259 
1260 	/*
1261 	 * Compare the original stream with the decompressed stream
1262 	 * (in size and the data)
1263 	 */
1264 	for (i = 0; i < num_bufs; i++) {
1265 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1266 		const char *buf1 = test_bufs[priv_data->orig_idx];
1267 		const char *buf2;
1268 		contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1269 		if (contig_buf == NULL) {
1270 			RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1271 					"be allocated\n");
1272 			goto exit;
1273 		}
1274 
1275 		buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1276 				ops_processed[i]->produced, contig_buf);
1277 		if (compare_buffers(buf1, strlen(buf1) + 1,
1278 				buf2, ops_processed[i]->produced) < 0)
1279 			goto exit;
1280 
1281 		/* Test checksums */
1282 		if (compress_xforms[0]->compress.chksum !=
1283 				RTE_COMP_CHECKSUM_NONE) {
1284 			if (ops_processed[i]->output_chksum !=
1285 					compress_checksum[i]) {
1286 				RTE_LOG(ERR, USER1, "The checksums differ\n"
1287 			"Compression Checksum: %" PRIu64 "\tDecompression "
1288 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1289 			ops_processed[i]->output_chksum);
1290 				goto exit;
1291 			}
1292 		}
1293 
1294 		rte_free(contig_buf);
1295 		contig_buf = NULL;
1296 	}
1297 
1298 	ret_status = TEST_SUCCESS;
1299 
1300 exit:
1301 	/* Free resources */
1302 	for (i = 0; i < num_bufs; i++) {
1303 		rte_pktmbuf_free(uncomp_bufs[i]);
1304 		rte_pktmbuf_free(comp_bufs[i]);
1305 		rte_comp_op_free(ops[i]);
1306 		rte_comp_op_free(ops_processed[i]);
1307 	}
1308 	for (i = 0; i < num_priv_xforms; i++) {
1309 		if (priv_xforms[i] != NULL)
1310 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1311 	}
1312 	rte_free(contig_buf);
1313 
1314 	return ret_status;
1315 }
1316 
1317 static int
1318 test_compressdev_deflate_stateless_fixed(void)
1319 {
1320 	struct comp_testsuite_params *ts_params = &testsuite_params;
1321 	uint16_t i;
1322 	int ret;
1323 	const struct rte_compressdev_capabilities *capab;
1324 
1325 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1326 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1327 
1328 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1329 		return -ENOTSUP;
1330 
1331 	struct rte_comp_xform *compress_xform =
1332 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1333 
1334 	if (compress_xform == NULL) {
1335 		RTE_LOG(ERR, USER1,
1336 			"Compress xform could not be created\n");
1337 		ret = TEST_FAILED;
1338 		goto exit;
1339 	}
1340 
1341 	memcpy(compress_xform, ts_params->def_comp_xform,
1342 			sizeof(struct rte_comp_xform));
1343 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1344 
1345 	struct interim_data_params int_data = {
1346 		NULL,
1347 		1,
1348 		NULL,
1349 		&compress_xform,
1350 		&ts_params->def_decomp_xform,
1351 		1
1352 	};
1353 
1354 	struct test_data_params test_data = {
1355 		RTE_COMP_OP_STATELESS,
1356 		LB_BOTH,
1357 		ZLIB_DECOMPRESS,
1358 		0,
1359 		0
1360 	};
1361 
1362 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1363 		int_data.test_bufs = &compress_test_bufs[i];
1364 		int_data.buf_idx = &i;
1365 
1366 		/* Compress with compressdev, decompress with Zlib */
1367 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1368 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1369 		if (ret < 0)
1370 			goto exit;
1371 
1372 		/* Compress with Zlib, decompress with compressdev */
1373 		test_data.zlib_dir = ZLIB_COMPRESS;
1374 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1375 		if (ret < 0)
1376 			goto exit;
1377 	}
1378 
1379 	ret = TEST_SUCCESS;
1380 
1381 exit:
1382 	rte_free(compress_xform);
1383 	return ret;
1384 }
1385 
1386 static int
1387 test_compressdev_deflate_stateless_dynamic(void)
1388 {
1389 	struct comp_testsuite_params *ts_params = &testsuite_params;
1390 	uint16_t i;
1391 	int ret;
1392 	struct rte_comp_xform *compress_xform =
1393 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1394 
1395 	const struct rte_compressdev_capabilities *capab;
1396 
1397 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1398 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1399 
1400 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1401 		return -ENOTSUP;
1402 
1403 	if (compress_xform == NULL) {
1404 		RTE_LOG(ERR, USER1,
1405 			"Compress xform could not be created\n");
1406 		ret = TEST_FAILED;
1407 		goto exit;
1408 	}
1409 
1410 	memcpy(compress_xform, ts_params->def_comp_xform,
1411 			sizeof(struct rte_comp_xform));
1412 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1413 
1414 	struct interim_data_params int_data = {
1415 		NULL,
1416 		1,
1417 		NULL,
1418 		&compress_xform,
1419 		&ts_params->def_decomp_xform,
1420 		1
1421 	};
1422 
1423 	struct test_data_params test_data = {
1424 		RTE_COMP_OP_STATELESS,
1425 		LB_BOTH,
1426 		ZLIB_DECOMPRESS,
1427 		0,
1428 		0
1429 	};
1430 
1431 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1432 		int_data.test_bufs = &compress_test_bufs[i];
1433 		int_data.buf_idx = &i;
1434 
1435 		/* Compress with compressdev, decompress with Zlib */
1436 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1437 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1438 		if (ret < 0)
1439 			goto exit;
1440 
1441 		/* Compress with Zlib, decompress with compressdev */
1442 		test_data.zlib_dir = ZLIB_COMPRESS;
1443 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1444 		if (ret < 0)
1445 			goto exit;
1446 	}
1447 
1448 	ret = TEST_SUCCESS;
1449 
1450 exit:
1451 	rte_free(compress_xform);
1452 	return ret;
1453 }
1454 
1455 static int
1456 test_compressdev_deflate_stateless_multi_op(void)
1457 {
1458 	struct comp_testsuite_params *ts_params = &testsuite_params;
1459 	uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1460 	uint16_t buf_idx[num_bufs];
1461 	uint16_t i;
1462 	int ret;
1463 
1464 	for (i = 0; i < num_bufs; i++)
1465 		buf_idx[i] = i;
1466 
1467 	struct interim_data_params int_data = {
1468 		compress_test_bufs,
1469 		num_bufs,
1470 		buf_idx,
1471 		&ts_params->def_comp_xform,
1472 		&ts_params->def_decomp_xform,
1473 		1
1474 	};
1475 
1476 	struct test_data_params test_data = {
1477 		RTE_COMP_OP_STATELESS,
1478 		LB_BOTH,
1479 		ZLIB_DECOMPRESS,
1480 		0,
1481 		0
1482 	};
1483 
1484 	/* Compress with compressdev, decompress with Zlib */
1485 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1486 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1487 	if (ret < 0)
1488 		return ret;
1489 
1490 	/* Compress with Zlib, decompress with compressdev */
1491 	test_data.zlib_dir = ZLIB_COMPRESS;
1492 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1493 	if (ret < 0)
1494 		return ret;
1495 
1496 	return TEST_SUCCESS;
1497 }
1498 
1499 static int
1500 test_compressdev_deflate_stateless_multi_level(void)
1501 {
1502 	struct comp_testsuite_params *ts_params = &testsuite_params;
1503 	unsigned int level;
1504 	uint16_t i;
1505 	int ret;
1506 	struct rte_comp_xform *compress_xform =
1507 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1508 
1509 	if (compress_xform == NULL) {
1510 		RTE_LOG(ERR, USER1,
1511 			"Compress xform could not be created\n");
1512 		ret = TEST_FAILED;
1513 		goto exit;
1514 	}
1515 
1516 	memcpy(compress_xform, ts_params->def_comp_xform,
1517 			sizeof(struct rte_comp_xform));
1518 
1519 	struct interim_data_params int_data = {
1520 		NULL,
1521 		1,
1522 		NULL,
1523 		&compress_xform,
1524 		&ts_params->def_decomp_xform,
1525 		1
1526 	};
1527 
1528 	struct test_data_params test_data = {
1529 		RTE_COMP_OP_STATELESS,
1530 		LB_BOTH,
1531 		ZLIB_DECOMPRESS,
1532 		0,
1533 		0
1534 	};
1535 
1536 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1537 		int_data.test_bufs = &compress_test_bufs[i];
1538 		int_data.buf_idx = &i;
1539 
1540 		for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1541 				level++) {
1542 			compress_xform->compress.level = level;
1543 			/* Compress with compressdev, decompress with Zlib */
1544 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1545 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1546 			if (ret < 0)
1547 				goto exit;
1548 		}
1549 	}
1550 
1551 	ret = TEST_SUCCESS;
1552 
1553 exit:
1554 	rte_free(compress_xform);
1555 	return ret;
1556 }
1557 
1558 #define NUM_XFORMS 3
1559 static int
1560 test_compressdev_deflate_stateless_multi_xform(void)
1561 {
1562 	struct comp_testsuite_params *ts_params = &testsuite_params;
1563 	uint16_t num_bufs = NUM_XFORMS;
1564 	struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1565 	struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1566 	const char *test_buffers[NUM_XFORMS];
1567 	uint16_t i;
1568 	unsigned int level = RTE_COMP_LEVEL_MIN;
1569 	uint16_t buf_idx[num_bufs];
1570 	int ret;
1571 
1572 	/* Create multiple xforms with various levels */
1573 	for (i = 0; i < NUM_XFORMS; i++) {
1574 		compress_xforms[i] = rte_malloc(NULL,
1575 				sizeof(struct rte_comp_xform), 0);
1576 		if (compress_xforms[i] == NULL) {
1577 			RTE_LOG(ERR, USER1,
1578 				"Compress xform could not be created\n");
1579 			ret = TEST_FAILED;
1580 			goto exit;
1581 		}
1582 
1583 		memcpy(compress_xforms[i], ts_params->def_comp_xform,
1584 				sizeof(struct rte_comp_xform));
1585 		compress_xforms[i]->compress.level = level;
1586 		level++;
1587 
1588 		decompress_xforms[i] = rte_malloc(NULL,
1589 				sizeof(struct rte_comp_xform), 0);
1590 		if (decompress_xforms[i] == NULL) {
1591 			RTE_LOG(ERR, USER1,
1592 				"Decompress xform could not be created\n");
1593 			ret = TEST_FAILED;
1594 			goto exit;
1595 		}
1596 
1597 		memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1598 				sizeof(struct rte_comp_xform));
1599 	}
1600 
1601 	for (i = 0; i < NUM_XFORMS; i++) {
1602 		buf_idx[i] = 0;
1603 		/* Use the same buffer in all sessions */
1604 		test_buffers[i] = compress_test_bufs[0];
1605 	}
1606 
1607 	struct interim_data_params int_data = {
1608 		test_buffers,
1609 		num_bufs,
1610 		buf_idx,
1611 		compress_xforms,
1612 		decompress_xforms,
1613 		NUM_XFORMS
1614 	};
1615 
1616 	struct test_data_params test_data = {
1617 		RTE_COMP_OP_STATELESS,
1618 		LB_BOTH,
1619 		ZLIB_DECOMPRESS,
1620 		0,
1621 		0
1622 	};
1623 
1624 	/* Compress with compressdev, decompress with Zlib */
1625 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1626 	if (ret < 0)
1627 		goto exit;
1628 
1629 	ret = TEST_SUCCESS;
1630 
1631 exit:
1632 	for (i = 0; i < NUM_XFORMS; i++) {
1633 		rte_free(compress_xforms[i]);
1634 		rte_free(decompress_xforms[i]);
1635 	}
1636 
1637 	return ret;
1638 }
1639 
1640 static int
1641 test_compressdev_deflate_stateless_sgl(void)
1642 {
1643 	struct comp_testsuite_params *ts_params = &testsuite_params;
1644 	uint16_t i;
1645 	int ret;
1646 	const struct rte_compressdev_capabilities *capab;
1647 
1648 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1649 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1650 
1651 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1652 		return -ENOTSUP;
1653 
1654 	struct interim_data_params int_data = {
1655 		NULL,
1656 		1,
1657 		NULL,
1658 		&ts_params->def_comp_xform,
1659 		&ts_params->def_decomp_xform,
1660 		1
1661 	};
1662 
1663 	struct test_data_params test_data = {
1664 		RTE_COMP_OP_STATELESS,
1665 		SGL_BOTH,
1666 		ZLIB_DECOMPRESS,
1667 		0,
1668 		0
1669 	};
1670 
1671 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1672 		int_data.test_bufs = &compress_test_bufs[i];
1673 		int_data.buf_idx = &i;
1674 
1675 		/* Compress with compressdev, decompress with Zlib */
1676 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1677 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1678 		if (ret < 0)
1679 			return ret;
1680 
1681 		/* Compress with Zlib, decompress with compressdev */
1682 		test_data.zlib_dir = ZLIB_COMPRESS;
1683 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1684 		if (ret < 0)
1685 			return ret;
1686 
1687 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1688 			/* Compress with compressdev, decompress with Zlib */
1689 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1690 			test_data.buff_type = SGL_TO_LB;
1691 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1692 			if (ret < 0)
1693 				return ret;
1694 
1695 			/* Compress with Zlib, decompress with compressdev */
1696 			test_data.zlib_dir = ZLIB_COMPRESS;
1697 			test_data.buff_type = SGL_TO_LB;
1698 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1699 			if (ret < 0)
1700 				return ret;
1701 		}
1702 
1703 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1704 			/* Compress with compressdev, decompress with Zlib */
1705 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1706 			test_data.buff_type = LB_TO_SGL;
1707 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1708 			if (ret < 0)
1709 				return ret;
1710 
1711 			/* Compress with Zlib, decompress with compressdev */
1712 			test_data.zlib_dir = ZLIB_COMPRESS;
1713 			test_data.buff_type = LB_TO_SGL;
1714 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1715 			if (ret < 0)
1716 				return ret;
1717 		}
1718 	}
1719 
1720 	return TEST_SUCCESS;
1721 }
1722 
1723 static int
1724 test_compressdev_deflate_stateless_checksum(void)
1725 {
1726 	struct comp_testsuite_params *ts_params = &testsuite_params;
1727 	uint16_t i;
1728 	int ret;
1729 	const struct rte_compressdev_capabilities *capab;
1730 
1731 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1732 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1733 
1734 	/* Check if driver supports any checksum */
1735 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1736 			(capab->comp_feature_flags &
1737 			RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1738 			(capab->comp_feature_flags &
1739 			RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1740 		return -ENOTSUP;
1741 
1742 	struct rte_comp_xform *compress_xform =
1743 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1744 	if (compress_xform == NULL) {
1745 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1746 		return TEST_FAILED;
1747 	}
1748 
1749 	memcpy(compress_xform, ts_params->def_comp_xform,
1750 			sizeof(struct rte_comp_xform));
1751 
1752 	struct rte_comp_xform *decompress_xform =
1753 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1754 	if (decompress_xform == NULL) {
1755 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1756 		rte_free(compress_xform);
1757 		return TEST_FAILED;
1758 	}
1759 
1760 	memcpy(decompress_xform, ts_params->def_decomp_xform,
1761 			sizeof(struct rte_comp_xform));
1762 
1763 	struct interim_data_params int_data = {
1764 		NULL,
1765 		1,
1766 		NULL,
1767 		&compress_xform,
1768 		&decompress_xform,
1769 		1
1770 	};
1771 
1772 	struct test_data_params test_data = {
1773 		RTE_COMP_OP_STATELESS,
1774 		LB_BOTH,
1775 		ZLIB_DECOMPRESS,
1776 		0,
1777 		0
1778 	};
1779 
1780 	/* Check if driver supports crc32 checksum and test */
1781 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1782 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1783 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1784 
1785 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1786 			/* Compress with compressdev, decompress with Zlib */
1787 			int_data.test_bufs = &compress_test_bufs[i];
1788 			int_data.buf_idx = &i;
1789 
1790 			/* Generate zlib checksum and test against selected
1791 			 * drivers decompression checksum
1792 			 */
1793 			test_data.zlib_dir = ZLIB_COMPRESS;
1794 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1795 			if (ret < 0)
1796 				goto exit;
1797 
1798 			/* Generate compression and decompression
1799 			 * checksum of selected driver
1800 			 */
1801 			test_data.zlib_dir = ZLIB_NONE;
1802 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1803 			if (ret < 0)
1804 				goto exit;
1805 		}
1806 	}
1807 
1808 	/* Check if driver supports adler32 checksum and test */
1809 	if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1810 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1811 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1812 
1813 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1814 			int_data.test_bufs = &compress_test_bufs[i];
1815 			int_data.buf_idx = &i;
1816 
1817 			/* Generate zlib checksum and test against selected
1818 			 * drivers decompression checksum
1819 			 */
1820 			test_data.zlib_dir = ZLIB_COMPRESS;
1821 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1822 			if (ret < 0)
1823 				goto exit;
1824 			/* Generate compression and decompression
1825 			 * checksum of selected driver
1826 			 */
1827 			test_data.zlib_dir = ZLIB_NONE;
1828 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1829 			if (ret < 0)
1830 				goto exit;
1831 		}
1832 	}
1833 
1834 	/* Check if driver supports combined crc and adler checksum and test */
1835 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1836 		compress_xform->compress.chksum =
1837 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1838 		decompress_xform->decompress.chksum =
1839 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1840 
1841 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1842 			int_data.test_bufs = &compress_test_bufs[i];
1843 			int_data.buf_idx = &i;
1844 
1845 			/* Generate compression and decompression
1846 			 * checksum of selected driver
1847 			 */
1848 			test_data.zlib_dir = ZLIB_NONE;
1849 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1850 			if (ret < 0)
1851 				goto exit;
1852 		}
1853 	}
1854 
1855 	ret = TEST_SUCCESS;
1856 
1857 exit:
1858 	rte_free(compress_xform);
1859 	rte_free(decompress_xform);
1860 	return ret;
1861 }
1862 
1863 static int
1864 test_compressdev_out_of_space_buffer(void)
1865 {
1866 	struct comp_testsuite_params *ts_params = &testsuite_params;
1867 	int ret;
1868 	uint16_t i;
1869 	const struct rte_compressdev_capabilities *capab;
1870 
1871 	RTE_LOG(ERR, USER1, "This is a negative test errors are expected\n");
1872 
1873 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1874 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1875 
1876 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1877 		return -ENOTSUP;
1878 
1879 	struct rte_comp_xform *compress_xform =
1880 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1881 
1882 	if (compress_xform == NULL) {
1883 		RTE_LOG(ERR, USER1,
1884 			"Compress xform could not be created\n");
1885 		ret = TEST_FAILED;
1886 		goto exit;
1887 	}
1888 
1889 	struct interim_data_params int_data = {
1890 		&compress_test_bufs[0],
1891 		1,
1892 		&i,
1893 		&ts_params->def_comp_xform,
1894 		&ts_params->def_decomp_xform,
1895 		1
1896 	};
1897 
1898 	struct test_data_params test_data = {
1899 		RTE_COMP_OP_STATELESS,
1900 		LB_BOTH,
1901 		ZLIB_DECOMPRESS,
1902 		1,  /* run out-of-space test */
1903 		0
1904 	};
1905 	/* Compress with compressdev, decompress with Zlib */
1906 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1907 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1908 	if (ret < 0)
1909 		goto exit;
1910 
1911 	/* Compress with Zlib, decompress with compressdev */
1912 	test_data.zlib_dir = ZLIB_COMPRESS;
1913 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1914 	if (ret < 0)
1915 		goto exit;
1916 
1917 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1918 		/* Compress with compressdev, decompress with Zlib */
1919 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1920 		test_data.buff_type = SGL_BOTH;
1921 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1922 		if (ret < 0)
1923 			goto exit;
1924 
1925 		/* Compress with Zlib, decompress with compressdev */
1926 		test_data.zlib_dir = ZLIB_COMPRESS;
1927 		test_data.buff_type = SGL_BOTH;
1928 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1929 		if (ret < 0)
1930 			goto exit;
1931 	}
1932 
1933 	ret  = TEST_SUCCESS;
1934 
1935 exit:
1936 	rte_free(compress_xform);
1937 	return ret;
1938 }
1939 
1940 static int
1941 test_compressdev_deflate_stateless_dynamic_big(void)
1942 {
1943 	struct comp_testsuite_params *ts_params = &testsuite_params;
1944 	uint16_t i = 0;
1945 	int ret;
1946 	int j;
1947 	const struct rte_compressdev_capabilities *capab;
1948 	char *test_buffer = NULL;
1949 
1950 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1951 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1952 
1953 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1954 		return -ENOTSUP;
1955 
1956 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1957 		return -ENOTSUP;
1958 
1959 	test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1960 	if (test_buffer == NULL) {
1961 		RTE_LOG(ERR, USER1,
1962 			"Can't allocate buffer for big-data\n");
1963 		return TEST_FAILED;
1964 	}
1965 
1966 	struct interim_data_params int_data = {
1967 		(const char * const *)&test_buffer,
1968 		1,
1969 		&i,
1970 		&ts_params->def_comp_xform,
1971 		&ts_params->def_decomp_xform,
1972 		1
1973 	};
1974 
1975 	struct test_data_params test_data = {
1976 		RTE_COMP_OP_STATELESS,
1977 		SGL_BOTH,
1978 		ZLIB_DECOMPRESS,
1979 		0,
1980 		1
1981 	};
1982 
1983 	ts_params->def_comp_xform->compress.deflate.huffman =
1984 						RTE_COMP_HUFFMAN_DYNAMIC;
1985 
1986 	/* fill the buffer with data based on rand. data */
1987 	srand(BIG_DATA_TEST_SIZE);
1988 	for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
1989 		test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
1990 	test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
1991 
1992 	/* Compress with compressdev, decompress with Zlib */
1993 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1994 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1995 	if (ret < 0)
1996 		goto exit;
1997 
1998 	/* Compress with Zlib, decompress with compressdev */
1999 	test_data.zlib_dir = ZLIB_COMPRESS;
2000 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2001 	if (ret < 0)
2002 		goto exit;
2003 
2004 	ret = TEST_SUCCESS;
2005 
2006 exit:
2007 	ts_params->def_comp_xform->compress.deflate.huffman =
2008 						RTE_COMP_HUFFMAN_DEFAULT;
2009 	rte_free(test_buffer);
2010 	return ret;
2011 }
2012 
2013 
2014 static struct unit_test_suite compressdev_testsuite  = {
2015 	.suite_name = "compressdev unit test suite",
2016 	.setup = testsuite_setup,
2017 	.teardown = testsuite_teardown,
2018 	.unit_test_cases = {
2019 		TEST_CASE_ST(NULL, NULL,
2020 			test_compressdev_invalid_configuration),
2021 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2022 			test_compressdev_deflate_stateless_fixed),
2023 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2024 			test_compressdev_deflate_stateless_dynamic),
2025 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2026 			test_compressdev_deflate_stateless_dynamic_big),
2027 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2028 			test_compressdev_deflate_stateless_multi_op),
2029 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2030 			test_compressdev_deflate_stateless_multi_level),
2031 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2032 			test_compressdev_deflate_stateless_multi_xform),
2033 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2034 			test_compressdev_deflate_stateless_sgl),
2035 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2036 			test_compressdev_deflate_stateless_checksum),
2037 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2038 			test_compressdev_out_of_space_buffer),
2039 		TEST_CASES_END() /**< NULL terminate unit test array */
2040 	}
2041 };
2042 
2043 static int
2044 test_compressdev(void)
2045 {
2046 	return unit_test_suite_runner(&compressdev_testsuite);
2047 }
2048 
2049 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
2050