xref: /dpdk/app/test/test_compressdev.c (revision 7df9d02e68c5e44e008cb6151021c04c428ed735)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 - 2019 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <stdlib.h>
8 
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_compressdev.h>
14 #include <rte_string_fns.h>
15 
16 #include "test_compressdev_test_buffer.h"
17 #include "test.h"
18 
19 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
20 
21 #define DEFAULT_WINDOW_SIZE 15
22 #define DEFAULT_MEM_LEVEL 8
23 #define MAX_DEQD_RETRIES 10
24 #define DEQUEUE_WAIT_TIME 10000
25 
26 /*
27  * 30% extra size for compressed data compared to original data,
28  * in case data size cannot be reduced and it is actually bigger
29  * due to the compress block headers
30  */
31 #define COMPRESS_BUF_SIZE_RATIO 1.3
32 #define NUM_LARGE_MBUFS 16
33 #define SMALL_SEG_SIZE 256
34 #define MAX_SEGS 16
35 #define NUM_OPS 16
36 #define NUM_MAX_XFORMS 16
37 #define NUM_MAX_INFLIGHT_OPS 128
38 #define CACHE_SIZE 0
39 
40 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
41 #define ZLIB_HEADER_SIZE 2
42 #define ZLIB_TRAILER_SIZE 4
43 #define GZIP_HEADER_SIZE 10
44 #define GZIP_TRAILER_SIZE 8
45 
46 #define OUT_OF_SPACE_BUF 1
47 
48 #define MAX_MBUF_SEGMENT_SIZE 65535
49 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
50 #define NUM_BIG_MBUFS 4
51 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
52 
53 const char *
54 huffman_type_strings[] = {
55 	[RTE_COMP_HUFFMAN_DEFAULT]	= "PMD default",
56 	[RTE_COMP_HUFFMAN_FIXED]	= "Fixed",
57 	[RTE_COMP_HUFFMAN_DYNAMIC]	= "Dynamic"
58 };
59 
60 enum zlib_direction {
61 	ZLIB_NONE,
62 	ZLIB_COMPRESS,
63 	ZLIB_DECOMPRESS,
64 	ZLIB_ALL
65 };
66 
67 enum varied_buff {
68 	LB_BOTH = 0,	/* both input and output are linear*/
69 	SGL_BOTH,	/* both input and output are chained */
70 	SGL_TO_LB,	/* input buffer is chained */
71 	LB_TO_SGL	/* output buffer is chained */
72 };
73 
74 struct priv_op_data {
75 	uint16_t orig_idx;
76 };
77 
78 struct comp_testsuite_params {
79 	struct rte_mempool *large_mbuf_pool;
80 	struct rte_mempool *small_mbuf_pool;
81 	struct rte_mempool *big_mbuf_pool;
82 	struct rte_mempool *op_pool;
83 	struct rte_comp_xform *def_comp_xform;
84 	struct rte_comp_xform *def_decomp_xform;
85 };
86 
87 struct interim_data_params {
88 	const char * const *test_bufs;
89 	unsigned int num_bufs;
90 	uint16_t *buf_idx;
91 	struct rte_comp_xform **compress_xforms;
92 	struct rte_comp_xform **decompress_xforms;
93 	unsigned int num_xforms;
94 };
95 
96 struct test_data_params {
97 	enum rte_comp_op_type state;
98 	enum varied_buff buff_type;
99 	enum zlib_direction zlib_dir;
100 	unsigned int out_of_space;
101 	unsigned int big_data;
102 };
103 
104 static struct comp_testsuite_params testsuite_params = { 0 };
105 
106 static void
107 testsuite_teardown(void)
108 {
109 	struct comp_testsuite_params *ts_params = &testsuite_params;
110 
111 	if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
112 		RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
113 	if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
114 		RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
115 	if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
116 		RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
117 	if (rte_mempool_in_use_count(ts_params->op_pool))
118 		RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
119 
120 	rte_mempool_free(ts_params->large_mbuf_pool);
121 	rte_mempool_free(ts_params->small_mbuf_pool);
122 	rte_mempool_free(ts_params->big_mbuf_pool);
123 	rte_mempool_free(ts_params->op_pool);
124 	rte_free(ts_params->def_comp_xform);
125 	rte_free(ts_params->def_decomp_xform);
126 }
127 
128 static int
129 testsuite_setup(void)
130 {
131 	struct comp_testsuite_params *ts_params = &testsuite_params;
132 	uint32_t max_buf_size = 0;
133 	unsigned int i;
134 
135 	if (rte_compressdev_count() == 0) {
136 		RTE_LOG(ERR, USER1, "Need at least one compress device\n");
137 		return TEST_FAILED;
138 	}
139 
140 	RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
141 				rte_compressdev_name_get(0));
142 
143 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
144 		max_buf_size = RTE_MAX(max_buf_size,
145 				strlen(compress_test_bufs[i]) + 1);
146 
147 	/*
148 	 * Buffers to be used in compression and decompression.
149 	 * Since decompressed data might be larger than
150 	 * compressed data (due to block header),
151 	 * buffers should be big enough for both cases.
152 	 */
153 	max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
154 	ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
155 			NUM_LARGE_MBUFS,
156 			CACHE_SIZE, 0,
157 			max_buf_size + RTE_PKTMBUF_HEADROOM,
158 			rte_socket_id());
159 	if (ts_params->large_mbuf_pool == NULL) {
160 		RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
161 		return TEST_FAILED;
162 	}
163 
164 	/* Create mempool with smaller buffers for SGL testing */
165 	ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
166 			NUM_LARGE_MBUFS * MAX_SEGS,
167 			CACHE_SIZE, 0,
168 			SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
169 			rte_socket_id());
170 	if (ts_params->small_mbuf_pool == NULL) {
171 		RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
172 		goto exit;
173 	}
174 
175 	/* Create mempool with big buffers for SGL testing */
176 	ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
177 			NUM_BIG_MBUFS + 1,
178 			CACHE_SIZE, 0,
179 			MAX_MBUF_SEGMENT_SIZE,
180 			rte_socket_id());
181 	if (ts_params->big_mbuf_pool == NULL) {
182 		RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
183 		goto exit;
184 	}
185 
186 	ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
187 				0, sizeof(struct priv_op_data),
188 				rte_socket_id());
189 	if (ts_params->op_pool == NULL) {
190 		RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
191 		goto exit;
192 	}
193 
194 	ts_params->def_comp_xform =
195 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
196 	if (ts_params->def_comp_xform == NULL) {
197 		RTE_LOG(ERR, USER1,
198 			"Default compress xform could not be created\n");
199 		goto exit;
200 	}
201 	ts_params->def_decomp_xform =
202 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
203 	if (ts_params->def_decomp_xform == NULL) {
204 		RTE_LOG(ERR, USER1,
205 			"Default decompress xform could not be created\n");
206 		goto exit;
207 	}
208 
209 	/* Initializes default values for compress/decompress xforms */
210 	ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
211 	ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
212 	ts_params->def_comp_xform->compress.deflate.huffman =
213 						RTE_COMP_HUFFMAN_DEFAULT;
214 	ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
215 	ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
216 	ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
217 
218 	ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
219 	ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
220 	ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
221 	ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
222 
223 	return TEST_SUCCESS;
224 
225 exit:
226 	testsuite_teardown();
227 
228 	return TEST_FAILED;
229 }
230 
231 static int
232 generic_ut_setup(void)
233 {
234 	/* Configure compressdev (one device, one queue pair) */
235 	struct rte_compressdev_config config = {
236 		.socket_id = rte_socket_id(),
237 		.nb_queue_pairs = 1,
238 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
239 		.max_nb_streams = 0
240 	};
241 
242 	if (rte_compressdev_configure(0, &config) < 0) {
243 		RTE_LOG(ERR, USER1, "Device configuration failed\n");
244 		return -1;
245 	}
246 
247 	if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
248 			rte_socket_id()) < 0) {
249 		RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
250 		return -1;
251 	}
252 
253 	if (rte_compressdev_start(0) < 0) {
254 		RTE_LOG(ERR, USER1, "Device could not be started\n");
255 		return -1;
256 	}
257 
258 	return 0;
259 }
260 
261 static void
262 generic_ut_teardown(void)
263 {
264 	rte_compressdev_stop(0);
265 	if (rte_compressdev_close(0) < 0)
266 		RTE_LOG(ERR, USER1, "Device could not be closed\n");
267 }
268 
269 static int
270 test_compressdev_invalid_configuration(void)
271 {
272 	struct rte_compressdev_config invalid_config;
273 	struct rte_compressdev_config valid_config = {
274 		.socket_id = rte_socket_id(),
275 		.nb_queue_pairs = 1,
276 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
277 		.max_nb_streams = 0
278 	};
279 	struct rte_compressdev_info dev_info;
280 
281 	/* Invalid configuration with 0 queue pairs */
282 	memcpy(&invalid_config, &valid_config,
283 			sizeof(struct rte_compressdev_config));
284 	invalid_config.nb_queue_pairs = 0;
285 
286 	TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
287 			"Device configuration was successful "
288 			"with no queue pairs (invalid)\n");
289 
290 	/*
291 	 * Invalid configuration with too many queue pairs
292 	 * (if there is an actual maximum number of queue pairs)
293 	 */
294 	rte_compressdev_info_get(0, &dev_info);
295 	if (dev_info.max_nb_queue_pairs != 0) {
296 		memcpy(&invalid_config, &valid_config,
297 			sizeof(struct rte_compressdev_config));
298 		invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
299 
300 		TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
301 				"Device configuration was successful "
302 				"with too many queue pairs (invalid)\n");
303 	}
304 
305 	/* Invalid queue pair setup, with no number of queue pairs set */
306 	TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
307 				NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
308 			"Queue pair setup was successful "
309 			"with no queue pairs set (invalid)\n");
310 
311 	return TEST_SUCCESS;
312 }
313 
314 static int
315 compare_buffers(const char *buffer1, uint32_t buffer1_len,
316 		const char *buffer2, uint32_t buffer2_len)
317 {
318 	if (buffer1_len != buffer2_len) {
319 		RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
320 		return -1;
321 	}
322 
323 	if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
324 		RTE_LOG(ERR, USER1, "Buffers are different\n");
325 		return -1;
326 	}
327 
328 	return 0;
329 }
330 
331 /*
332  * Maps compressdev and Zlib flush flags
333  */
334 static int
335 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
336 {
337 	switch (flag) {
338 	case RTE_COMP_FLUSH_NONE:
339 		return Z_NO_FLUSH;
340 	case RTE_COMP_FLUSH_SYNC:
341 		return Z_SYNC_FLUSH;
342 	case RTE_COMP_FLUSH_FULL:
343 		return Z_FULL_FLUSH;
344 	case RTE_COMP_FLUSH_FINAL:
345 		return Z_FINISH;
346 	/*
347 	 * There should be only the values above,
348 	 * so this should never happen
349 	 */
350 	default:
351 		return -1;
352 	}
353 }
354 
355 static int
356 compress_zlib(struct rte_comp_op *op,
357 		const struct rte_comp_xform *xform, int mem_level)
358 {
359 	z_stream stream;
360 	int zlib_flush;
361 	int strategy, window_bits, comp_level;
362 	int ret = TEST_FAILED;
363 	uint8_t *single_src_buf = NULL;
364 	uint8_t *single_dst_buf = NULL;
365 
366 	/* initialize zlib stream */
367 	stream.zalloc = Z_NULL;
368 	stream.zfree = Z_NULL;
369 	stream.opaque = Z_NULL;
370 
371 	if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
372 		strategy = Z_FIXED;
373 	else
374 		strategy = Z_DEFAULT_STRATEGY;
375 
376 	/*
377 	 * Window bits is the base two logarithm of the window size (in bytes).
378 	 * When doing raw DEFLATE, this number will be negative.
379 	 */
380 	window_bits = -(xform->compress.window_size);
381 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
382 		window_bits *= -1;
383 	else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
384 		window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
385 
386 	comp_level = xform->compress.level;
387 
388 	if (comp_level != RTE_COMP_LEVEL_NONE)
389 		ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
390 			window_bits, mem_level, strategy);
391 	else
392 		ret = deflateInit(&stream, Z_NO_COMPRESSION);
393 
394 	if (ret != Z_OK) {
395 		printf("Zlib deflate could not be initialized\n");
396 		goto exit;
397 	}
398 
399 	/* Assuming stateless operation */
400 	/* SGL Input */
401 	if (op->m_src->nb_segs > 1) {
402 		single_src_buf = rte_malloc(NULL,
403 				rte_pktmbuf_pkt_len(op->m_src), 0);
404 		if (single_src_buf == NULL) {
405 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
406 			goto exit;
407 		}
408 
409 		if (rte_pktmbuf_read(op->m_src, op->src.offset,
410 					rte_pktmbuf_pkt_len(op->m_src) -
411 					op->src.offset,
412 					single_src_buf) == NULL) {
413 			RTE_LOG(ERR, USER1,
414 				"Buffer could not be read entirely\n");
415 			goto exit;
416 		}
417 
418 		stream.avail_in = op->src.length;
419 		stream.next_in = single_src_buf;
420 
421 	} else {
422 		stream.avail_in = op->src.length;
423 		stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
424 				op->src.offset);
425 	}
426 	/* SGL output */
427 	if (op->m_dst->nb_segs > 1) {
428 
429 		single_dst_buf = rte_malloc(NULL,
430 				rte_pktmbuf_pkt_len(op->m_dst), 0);
431 			if (single_dst_buf == NULL) {
432 				RTE_LOG(ERR, USER1,
433 					"Buffer could not be allocated\n");
434 			goto exit;
435 		}
436 
437 		stream.avail_out = op->m_dst->pkt_len;
438 		stream.next_out = single_dst_buf;
439 
440 	} else {/* linear output */
441 		stream.avail_out = op->m_dst->data_len;
442 		stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
443 				op->dst.offset);
444 	}
445 
446 	/* Stateless operation, all buffer will be compressed in one go */
447 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
448 	ret = deflate(&stream, zlib_flush);
449 
450 	if (stream.avail_in != 0) {
451 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
452 		goto exit;
453 	}
454 
455 	if (ret != Z_STREAM_END)
456 		goto exit;
457 
458 	/* Copy data to destination SGL */
459 	if (op->m_dst->nb_segs > 1) {
460 		uint32_t remaining_data = stream.total_out;
461 		uint8_t *src_data = single_dst_buf;
462 		struct rte_mbuf *dst_buf = op->m_dst;
463 
464 		while (remaining_data > 0) {
465 			uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
466 						uint8_t *, op->dst.offset);
467 			/* Last segment */
468 			if (remaining_data < dst_buf->data_len) {
469 				memcpy(dst_data, src_data, remaining_data);
470 				remaining_data = 0;
471 			} else {
472 				memcpy(dst_data, src_data, dst_buf->data_len);
473 				remaining_data -= dst_buf->data_len;
474 				src_data += dst_buf->data_len;
475 				dst_buf = dst_buf->next;
476 			}
477 		}
478 	}
479 
480 	op->consumed = stream.total_in;
481 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
482 		rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
483 		rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
484 		op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
485 				ZLIB_TRAILER_SIZE);
486 	} else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
487 		rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
488 		rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
489 		op->produced = stream.total_out - (GZIP_HEADER_SIZE +
490 				GZIP_TRAILER_SIZE);
491 	} else
492 		op->produced = stream.total_out;
493 
494 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
495 	op->output_chksum = stream.adler;
496 
497 	deflateReset(&stream);
498 
499 	ret = 0;
500 exit:
501 	deflateEnd(&stream);
502 	rte_free(single_src_buf);
503 	rte_free(single_dst_buf);
504 
505 	return ret;
506 }
507 
508 static int
509 decompress_zlib(struct rte_comp_op *op,
510 		const struct rte_comp_xform *xform)
511 {
512 	z_stream stream;
513 	int window_bits;
514 	int zlib_flush;
515 	int ret = TEST_FAILED;
516 	uint8_t *single_src_buf = NULL;
517 	uint8_t *single_dst_buf = NULL;
518 
519 	/* initialize zlib stream */
520 	stream.zalloc = Z_NULL;
521 	stream.zfree = Z_NULL;
522 	stream.opaque = Z_NULL;
523 
524 	/*
525 	 * Window bits is the base two logarithm of the window size (in bytes).
526 	 * When doing raw DEFLATE, this number will be negative.
527 	 */
528 	window_bits = -(xform->decompress.window_size);
529 	ret = inflateInit2(&stream, window_bits);
530 
531 	if (ret != Z_OK) {
532 		printf("Zlib deflate could not be initialized\n");
533 		goto exit;
534 	}
535 
536 	/* Assuming stateless operation */
537 	/* SGL */
538 	if (op->m_src->nb_segs > 1) {
539 		single_src_buf = rte_malloc(NULL,
540 				rte_pktmbuf_pkt_len(op->m_src), 0);
541 		if (single_src_buf == NULL) {
542 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
543 			goto exit;
544 		}
545 		single_dst_buf = rte_malloc(NULL,
546 				rte_pktmbuf_pkt_len(op->m_dst), 0);
547 		if (single_dst_buf == NULL) {
548 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
549 			goto exit;
550 		}
551 		if (rte_pktmbuf_read(op->m_src, 0,
552 					rte_pktmbuf_pkt_len(op->m_src),
553 					single_src_buf) == NULL) {
554 			RTE_LOG(ERR, USER1,
555 				"Buffer could not be read entirely\n");
556 			goto exit;
557 		}
558 
559 		stream.avail_in = op->src.length;
560 		stream.next_in = single_src_buf;
561 		stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
562 		stream.next_out = single_dst_buf;
563 
564 	} else {
565 		stream.avail_in = op->src.length;
566 		stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
567 		stream.avail_out = op->m_dst->data_len;
568 		stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
569 	}
570 
571 	/* Stateless operation, all buffer will be compressed in one go */
572 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
573 	ret = inflate(&stream, zlib_flush);
574 
575 	if (stream.avail_in != 0) {
576 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
577 		goto exit;
578 	}
579 
580 	if (ret != Z_STREAM_END)
581 		goto exit;
582 
583 	if (op->m_src->nb_segs > 1) {
584 		uint32_t remaining_data = stream.total_out;
585 		uint8_t *src_data = single_dst_buf;
586 		struct rte_mbuf *dst_buf = op->m_dst;
587 
588 		while (remaining_data > 0) {
589 			uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
590 					uint8_t *);
591 			/* Last segment */
592 			if (remaining_data < dst_buf->data_len) {
593 				memcpy(dst_data, src_data, remaining_data);
594 				remaining_data = 0;
595 			} else {
596 				memcpy(dst_data, src_data, dst_buf->data_len);
597 				remaining_data -= dst_buf->data_len;
598 				src_data += dst_buf->data_len;
599 				dst_buf = dst_buf->next;
600 			}
601 		}
602 	}
603 
604 	op->consumed = stream.total_in;
605 	op->produced = stream.total_out;
606 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
607 
608 	inflateReset(&stream);
609 
610 	ret = 0;
611 exit:
612 	inflateEnd(&stream);
613 
614 	return ret;
615 }
616 
617 static int
618 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
619 		uint32_t total_data_size,
620 		struct rte_mempool *small_mbuf_pool,
621 		struct rte_mempool *large_mbuf_pool,
622 		uint8_t limit_segs_in_sgl,
623 		uint16_t seg_size)
624 {
625 	uint32_t remaining_data = total_data_size;
626 	uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
627 	struct rte_mempool *pool;
628 	struct rte_mbuf *next_seg;
629 	uint32_t data_size;
630 	char *buf_ptr;
631 	const char *data_ptr = test_buf;
632 	uint16_t i;
633 	int ret;
634 
635 	if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
636 		num_remaining_segs = limit_segs_in_sgl - 1;
637 
638 	/*
639 	 * Allocate data in the first segment (header) and
640 	 * copy data if test buffer is provided
641 	 */
642 	if (remaining_data < seg_size)
643 		data_size = remaining_data;
644 	else
645 		data_size = seg_size;
646 	buf_ptr = rte_pktmbuf_append(head_buf, data_size);
647 	if (buf_ptr == NULL) {
648 		RTE_LOG(ERR, USER1,
649 			"Not enough space in the 1st buffer\n");
650 		return -1;
651 	}
652 
653 	if (data_ptr != NULL) {
654 		/* Copy characters without NULL terminator */
655 		strncpy(buf_ptr, data_ptr, data_size);
656 		data_ptr += data_size;
657 	}
658 	remaining_data -= data_size;
659 	num_remaining_segs--;
660 
661 	/*
662 	 * Allocate the rest of the segments,
663 	 * copy the rest of the data and chain the segments.
664 	 */
665 	for (i = 0; i < num_remaining_segs; i++) {
666 
667 		if (i == (num_remaining_segs - 1)) {
668 			/* last segment */
669 			if (remaining_data > seg_size)
670 				pool = large_mbuf_pool;
671 			else
672 				pool = small_mbuf_pool;
673 			data_size = remaining_data;
674 		} else {
675 			data_size = seg_size;
676 			pool = small_mbuf_pool;
677 		}
678 
679 		next_seg = rte_pktmbuf_alloc(pool);
680 		if (next_seg == NULL) {
681 			RTE_LOG(ERR, USER1,
682 				"New segment could not be allocated "
683 				"from the mempool\n");
684 			return -1;
685 		}
686 		buf_ptr = rte_pktmbuf_append(next_seg, data_size);
687 		if (buf_ptr == NULL) {
688 			RTE_LOG(ERR, USER1,
689 				"Not enough space in the buffer\n");
690 			rte_pktmbuf_free(next_seg);
691 			return -1;
692 		}
693 		if (data_ptr != NULL) {
694 			/* Copy characters without NULL terminator */
695 			strncpy(buf_ptr, data_ptr, data_size);
696 			data_ptr += data_size;
697 		}
698 		remaining_data -= data_size;
699 
700 		ret = rte_pktmbuf_chain(head_buf, next_seg);
701 		if (ret != 0) {
702 			rte_pktmbuf_free(next_seg);
703 			RTE_LOG(ERR, USER1,
704 				"Segment could not chained\n");
705 			return -1;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
712 /*
713  * Compresses and decompresses buffer with compressdev API and Zlib API
714  */
715 static int
716 test_deflate_comp_decomp(const struct interim_data_params *int_data,
717 		const struct test_data_params *test_data)
718 {
719 	struct comp_testsuite_params *ts_params = &testsuite_params;
720 	const char * const *test_bufs = int_data->test_bufs;
721 	unsigned int num_bufs = int_data->num_bufs;
722 	uint16_t *buf_idx = int_data->buf_idx;
723 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
724 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
725 	unsigned int num_xforms = int_data->num_xforms;
726 	enum rte_comp_op_type state = test_data->state;
727 	unsigned int buff_type = test_data->buff_type;
728 	unsigned int out_of_space = test_data->out_of_space;
729 	unsigned int big_data = test_data->big_data;
730 	enum zlib_direction zlib_dir = test_data->zlib_dir;
731 	int ret_status = -1;
732 	int ret;
733 	struct rte_mbuf *uncomp_bufs[num_bufs];
734 	struct rte_mbuf *comp_bufs[num_bufs];
735 	struct rte_comp_op *ops[num_bufs];
736 	struct rte_comp_op *ops_processed[num_bufs];
737 	void *priv_xforms[num_bufs];
738 	uint16_t num_enqd, num_deqd, num_total_deqd;
739 	uint16_t num_priv_xforms = 0;
740 	unsigned int deqd_retries = 0;
741 	struct priv_op_data *priv_data;
742 	char *buf_ptr;
743 	unsigned int i;
744 	struct rte_mempool *buf_pool;
745 	uint32_t data_size;
746 	/* Compressing with CompressDev */
747 	unsigned int oos_zlib_decompress =
748 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
749 	/* Decompressing with CompressDev */
750 	unsigned int oos_zlib_compress =
751 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
752 	const struct rte_compressdev_capabilities *capa =
753 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
754 	char *contig_buf = NULL;
755 	uint64_t compress_checksum[num_bufs];
756 
757 	/* Initialize all arrays to NULL */
758 	memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
759 	memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
760 	memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
761 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
762 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
763 
764 	if (big_data)
765 		buf_pool = ts_params->big_mbuf_pool;
766 	else if (buff_type == SGL_BOTH)
767 		buf_pool = ts_params->small_mbuf_pool;
768 	else
769 		buf_pool = ts_params->large_mbuf_pool;
770 
771 	/* Prepare the source mbufs with the data */
772 	ret = rte_pktmbuf_alloc_bulk(buf_pool,
773 				uncomp_bufs, num_bufs);
774 	if (ret < 0) {
775 		RTE_LOG(ERR, USER1,
776 			"Source mbufs could not be allocated "
777 			"from the mempool\n");
778 		goto exit;
779 	}
780 
781 	if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
782 		for (i = 0; i < num_bufs; i++) {
783 			data_size = strlen(test_bufs[i]) + 1;
784 			if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
785 			    data_size,
786 			    big_data ? buf_pool : ts_params->small_mbuf_pool,
787 			    big_data ? buf_pool : ts_params->large_mbuf_pool,
788 			    big_data ? 0 : MAX_SEGS,
789 			    big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
790 				goto exit;
791 		}
792 	} else {
793 		for (i = 0; i < num_bufs; i++) {
794 			data_size = strlen(test_bufs[i]) + 1;
795 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
796 			strlcpy(buf_ptr, test_bufs[i], data_size);
797 		}
798 	}
799 
800 	/* Prepare the destination mbufs */
801 	ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
802 	if (ret < 0) {
803 		RTE_LOG(ERR, USER1,
804 			"Destination mbufs could not be allocated "
805 			"from the mempool\n");
806 		goto exit;
807 	}
808 
809 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
810 		for (i = 0; i < num_bufs; i++) {
811 			if (out_of_space == 1 && oos_zlib_decompress)
812 				data_size = OUT_OF_SPACE_BUF;
813 			else
814 				(data_size = strlen(test_bufs[i]) *
815 					COMPRESS_BUF_SIZE_RATIO);
816 
817 			if (prepare_sgl_bufs(NULL, comp_bufs[i],
818 			      data_size,
819 			      big_data ? buf_pool : ts_params->small_mbuf_pool,
820 			      big_data ? buf_pool : ts_params->large_mbuf_pool,
821 			      big_data ? 0 : MAX_SEGS,
822 			      big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
823 					< 0)
824 				goto exit;
825 		}
826 
827 	} else {
828 		for (i = 0; i < num_bufs; i++) {
829 			if (out_of_space == 1 && oos_zlib_decompress)
830 				data_size = OUT_OF_SPACE_BUF;
831 			else
832 				(data_size = strlen(test_bufs[i]) *
833 					COMPRESS_BUF_SIZE_RATIO);
834 
835 			rte_pktmbuf_append(comp_bufs[i], data_size);
836 		}
837 	}
838 
839 	/* Build the compression operations */
840 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
841 	if (ret < 0) {
842 		RTE_LOG(ERR, USER1,
843 			"Compress operations could not be allocated "
844 			"from the mempool\n");
845 		goto exit;
846 	}
847 
848 
849 	for (i = 0; i < num_bufs; i++) {
850 		ops[i]->m_src = uncomp_bufs[i];
851 		ops[i]->m_dst = comp_bufs[i];
852 		ops[i]->src.offset = 0;
853 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
854 		ops[i]->dst.offset = 0;
855 		if (state == RTE_COMP_OP_STATELESS) {
856 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
857 		} else {
858 			RTE_LOG(ERR, USER1,
859 				"Stateful operations are not supported "
860 				"in these tests yet\n");
861 			goto exit;
862 		}
863 		ops[i]->input_chksum = 0;
864 		/*
865 		 * Store original operation index in private data,
866 		 * since ordering does not have to be maintained,
867 		 * when dequeueing from compressdev, so a comparison
868 		 * at the end of the test can be done.
869 		 */
870 		priv_data = (struct priv_op_data *) (ops[i] + 1);
871 		priv_data->orig_idx = i;
872 	}
873 
874 	/* Compress data (either with Zlib API or compressdev API */
875 	if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
876 		for (i = 0; i < num_bufs; i++) {
877 			const struct rte_comp_xform *compress_xform =
878 				compress_xforms[i % num_xforms];
879 			ret = compress_zlib(ops[i], compress_xform,
880 					DEFAULT_MEM_LEVEL);
881 			if (ret < 0)
882 				goto exit;
883 
884 			ops_processed[i] = ops[i];
885 		}
886 	} else {
887 		/* Create compress private xform data */
888 		for (i = 0; i < num_xforms; i++) {
889 			ret = rte_compressdev_private_xform_create(0,
890 				(const struct rte_comp_xform *)compress_xforms[i],
891 				&priv_xforms[i]);
892 			if (ret < 0) {
893 				RTE_LOG(ERR, USER1,
894 					"Compression private xform "
895 					"could not be created\n");
896 				goto exit;
897 			}
898 			num_priv_xforms++;
899 		}
900 
901 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
902 			/* Attach shareable private xform data to ops */
903 			for (i = 0; i < num_bufs; i++)
904 				ops[i]->private_xform = priv_xforms[i % num_xforms];
905 		} else {
906 			/* Create rest of the private xforms for the other ops */
907 			for (i = num_xforms; i < num_bufs; i++) {
908 				ret = rte_compressdev_private_xform_create(0,
909 					compress_xforms[i % num_xforms],
910 					&priv_xforms[i]);
911 				if (ret < 0) {
912 					RTE_LOG(ERR, USER1,
913 						"Compression private xform "
914 						"could not be created\n");
915 					goto exit;
916 				}
917 				num_priv_xforms++;
918 			}
919 
920 			/* Attach non shareable private xform data to ops */
921 			for (i = 0; i < num_bufs; i++)
922 				ops[i]->private_xform = priv_xforms[i];
923 		}
924 
925 		/* Enqueue and dequeue all operations */
926 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
927 		if (num_enqd < num_bufs) {
928 			RTE_LOG(ERR, USER1,
929 				"The operations could not be enqueued\n");
930 			goto exit;
931 		}
932 
933 		num_total_deqd = 0;
934 		do {
935 			/*
936 			 * If retrying a dequeue call, wait for 10 ms to allow
937 			 * enough time to the driver to process the operations
938 			 */
939 			if (deqd_retries != 0) {
940 				/*
941 				 * Avoid infinite loop if not all the
942 				 * operations get out of the device
943 				 */
944 				if (deqd_retries == MAX_DEQD_RETRIES) {
945 					RTE_LOG(ERR, USER1,
946 						"Not all operations could be "
947 						"dequeued\n");
948 					goto exit;
949 				}
950 				usleep(DEQUEUE_WAIT_TIME);
951 			}
952 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
953 					&ops_processed[num_total_deqd], num_bufs);
954 			num_total_deqd += num_deqd;
955 			deqd_retries++;
956 
957 		} while (num_total_deqd < num_enqd);
958 
959 		deqd_retries = 0;
960 
961 		/* Free compress private xforms */
962 		for (i = 0; i < num_priv_xforms; i++) {
963 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
964 			priv_xforms[i] = NULL;
965 		}
966 		num_priv_xforms = 0;
967 	}
968 
969 	for (i = 0; i < num_bufs; i++) {
970 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
971 		uint16_t xform_idx = priv_data->orig_idx % num_xforms;
972 		const struct rte_comp_compress_xform *compress_xform =
973 				&compress_xforms[xform_idx]->compress;
974 		enum rte_comp_huffman huffman_type =
975 			compress_xform->deflate.huffman;
976 		char engine[] = "zlib (directly, not PMD)";
977 		if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
978 			strlcpy(engine, "PMD", sizeof(engine));
979 
980 		RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
981 			" %u bytes (level = %d, huffman = %s)\n",
982 			buf_idx[priv_data->orig_idx], engine,
983 			ops_processed[i]->consumed, ops_processed[i]->produced,
984 			compress_xform->level,
985 			huffman_type_strings[huffman_type]);
986 		RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
987 			ops_processed[i]->consumed == 0 ? 0 :
988 			(float)ops_processed[i]->produced /
989 			ops_processed[i]->consumed * 100);
990 		if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
991 			compress_checksum[i] = ops_processed[i]->output_chksum;
992 		ops[i] = NULL;
993 	}
994 
995 	/*
996 	 * Check operation status and free source mbufs (destination mbuf and
997 	 * compress operation information is needed for the decompression stage)
998 	 */
999 	for (i = 0; i < num_bufs; i++) {
1000 		if (out_of_space && oos_zlib_decompress) {
1001 			if (ops_processed[i]->status !=
1002 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1003 				ret_status = -1;
1004 
1005 				RTE_LOG(ERR, USER1,
1006 					"Operation without expected out of "
1007 					"space status error\n");
1008 				goto exit;
1009 			} else
1010 				continue;
1011 		}
1012 
1013 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1014 			RTE_LOG(ERR, USER1,
1015 				"Some operations were not successful\n");
1016 			goto exit;
1017 		}
1018 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1019 		rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1020 		uncomp_bufs[priv_data->orig_idx] = NULL;
1021 	}
1022 
1023 	if (out_of_space && oos_zlib_decompress) {
1024 		ret_status = 0;
1025 		goto exit;
1026 	}
1027 
1028 	/* Allocate buffers for decompressed data */
1029 	ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1030 	if (ret < 0) {
1031 		RTE_LOG(ERR, USER1,
1032 			"Destination mbufs could not be allocated "
1033 			"from the mempool\n");
1034 		goto exit;
1035 	}
1036 
1037 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1038 		for (i = 0; i < num_bufs; i++) {
1039 			priv_data = (struct priv_op_data *)
1040 					(ops_processed[i] + 1);
1041 			if (out_of_space == 1 && oos_zlib_compress)
1042 				data_size = OUT_OF_SPACE_BUF;
1043 			else
1044 				data_size =
1045 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1046 
1047 			if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1048 			       data_size,
1049 			       big_data ? buf_pool : ts_params->small_mbuf_pool,
1050 			       big_data ? buf_pool : ts_params->large_mbuf_pool,
1051 			       big_data ? 0 : MAX_SEGS,
1052 			       big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1053 					< 0)
1054 				goto exit;
1055 		}
1056 
1057 	} else {
1058 		for (i = 0; i < num_bufs; i++) {
1059 			priv_data = (struct priv_op_data *)
1060 					(ops_processed[i] + 1);
1061 			if (out_of_space == 1 && oos_zlib_compress)
1062 				data_size = OUT_OF_SPACE_BUF;
1063 			else
1064 				data_size =
1065 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1066 
1067 			rte_pktmbuf_append(uncomp_bufs[i], data_size);
1068 		}
1069 	}
1070 
1071 	/* Build the decompression operations */
1072 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1073 	if (ret < 0) {
1074 		RTE_LOG(ERR, USER1,
1075 			"Decompress operations could not be allocated "
1076 			"from the mempool\n");
1077 		goto exit;
1078 	}
1079 
1080 	/* Source buffer is the compressed data from the previous operations */
1081 	for (i = 0; i < num_bufs; i++) {
1082 		ops[i]->m_src = ops_processed[i]->m_dst;
1083 		ops[i]->m_dst = uncomp_bufs[i];
1084 		ops[i]->src.offset = 0;
1085 		/*
1086 		 * Set the length of the compressed data to the
1087 		 * number of bytes that were produced in the previous stage
1088 		 */
1089 		ops[i]->src.length = ops_processed[i]->produced;
1090 		ops[i]->dst.offset = 0;
1091 		if (state == RTE_COMP_OP_STATELESS) {
1092 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1093 		} else {
1094 			RTE_LOG(ERR, USER1,
1095 				"Stateful operations are not supported "
1096 				"in these tests yet\n");
1097 			goto exit;
1098 		}
1099 		ops[i]->input_chksum = 0;
1100 		/*
1101 		 * Copy private data from previous operations,
1102 		 * to keep the pointer to the original buffer
1103 		 */
1104 		memcpy(ops[i] + 1, ops_processed[i] + 1,
1105 				sizeof(struct priv_op_data));
1106 	}
1107 
1108 	/*
1109 	 * Free the previous compress operations,
1110 	 * as they are not needed anymore
1111 	 */
1112 	rte_comp_op_bulk_free(ops_processed, num_bufs);
1113 
1114 	/* Decompress data (either with Zlib API or compressdev API */
1115 	if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1116 		for (i = 0; i < num_bufs; i++) {
1117 			priv_data = (struct priv_op_data *)(ops[i] + 1);
1118 			uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1119 			const struct rte_comp_xform *decompress_xform =
1120 				decompress_xforms[xform_idx];
1121 
1122 			ret = decompress_zlib(ops[i], decompress_xform);
1123 			if (ret < 0)
1124 				goto exit;
1125 
1126 			ops_processed[i] = ops[i];
1127 		}
1128 	} else {
1129 		/* Create decompress private xform data */
1130 		for (i = 0; i < num_xforms; i++) {
1131 			ret = rte_compressdev_private_xform_create(0,
1132 				(const struct rte_comp_xform *)decompress_xforms[i],
1133 				&priv_xforms[i]);
1134 			if (ret < 0) {
1135 				RTE_LOG(ERR, USER1,
1136 					"Decompression private xform "
1137 					"could not be created\n");
1138 				goto exit;
1139 			}
1140 			num_priv_xforms++;
1141 		}
1142 
1143 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1144 			/* Attach shareable private xform data to ops */
1145 			for (i = 0; i < num_bufs; i++) {
1146 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1147 				uint16_t xform_idx = priv_data->orig_idx %
1148 								num_xforms;
1149 				ops[i]->private_xform = priv_xforms[xform_idx];
1150 			}
1151 		} else {
1152 			/* Create rest of the private xforms for the other ops */
1153 			for (i = num_xforms; i < num_bufs; i++) {
1154 				ret = rte_compressdev_private_xform_create(0,
1155 					decompress_xforms[i % num_xforms],
1156 					&priv_xforms[i]);
1157 				if (ret < 0) {
1158 					RTE_LOG(ERR, USER1,
1159 						"Decompression private xform "
1160 						"could not be created\n");
1161 					goto exit;
1162 				}
1163 				num_priv_xforms++;
1164 			}
1165 
1166 			/* Attach non shareable private xform data to ops */
1167 			for (i = 0; i < num_bufs; i++) {
1168 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1169 				uint16_t xform_idx = priv_data->orig_idx;
1170 				ops[i]->private_xform = priv_xforms[xform_idx];
1171 			}
1172 		}
1173 
1174 		/* Enqueue and dequeue all operations */
1175 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1176 		if (num_enqd < num_bufs) {
1177 			RTE_LOG(ERR, USER1,
1178 				"The operations could not be enqueued\n");
1179 			goto exit;
1180 		}
1181 
1182 		num_total_deqd = 0;
1183 		do {
1184 			/*
1185 			 * If retrying a dequeue call, wait for 10 ms to allow
1186 			 * enough time to the driver to process the operations
1187 			 */
1188 			if (deqd_retries != 0) {
1189 				/*
1190 				 * Avoid infinite loop if not all the
1191 				 * operations get out of the device
1192 				 */
1193 				if (deqd_retries == MAX_DEQD_RETRIES) {
1194 					RTE_LOG(ERR, USER1,
1195 						"Not all operations could be "
1196 						"dequeued\n");
1197 					goto exit;
1198 				}
1199 				usleep(DEQUEUE_WAIT_TIME);
1200 			}
1201 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
1202 					&ops_processed[num_total_deqd], num_bufs);
1203 			num_total_deqd += num_deqd;
1204 			deqd_retries++;
1205 		} while (num_total_deqd < num_enqd);
1206 
1207 		deqd_retries = 0;
1208 	}
1209 
1210 	for (i = 0; i < num_bufs; i++) {
1211 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1212 		char engine[] = "zlib, (directly, no PMD)";
1213 		if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1214 			strlcpy(engine, "pmd", sizeof(engine));
1215 		RTE_LOG(DEBUG, USER1,
1216 			"Buffer %u decompressed by %s from %u to %u bytes\n",
1217 			buf_idx[priv_data->orig_idx], engine,
1218 			ops_processed[i]->consumed, ops_processed[i]->produced);
1219 		ops[i] = NULL;
1220 	}
1221 
1222 	/*
1223 	 * Check operation status and free source mbuf (destination mbuf and
1224 	 * compress operation information is still needed)
1225 	 */
1226 	for (i = 0; i < num_bufs; i++) {
1227 		if (out_of_space && oos_zlib_compress) {
1228 			if (ops_processed[i]->status !=
1229 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1230 				ret_status = -1;
1231 
1232 				RTE_LOG(ERR, USER1,
1233 					"Operation without expected out of "
1234 					"space status error\n");
1235 				goto exit;
1236 			} else
1237 				continue;
1238 		}
1239 
1240 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1241 			RTE_LOG(ERR, USER1,
1242 				"Some operations were not successful\n");
1243 			goto exit;
1244 		}
1245 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1246 		rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1247 		comp_bufs[priv_data->orig_idx] = NULL;
1248 	}
1249 
1250 	if (out_of_space && oos_zlib_compress) {
1251 		ret_status = 0;
1252 		goto exit;
1253 	}
1254 
1255 	/*
1256 	 * Compare the original stream with the decompressed stream
1257 	 * (in size and the data)
1258 	 */
1259 	for (i = 0; i < num_bufs; i++) {
1260 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1261 		const char *buf1 = test_bufs[priv_data->orig_idx];
1262 		const char *buf2;
1263 		contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1264 		if (contig_buf == NULL) {
1265 			RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1266 					"be allocated\n");
1267 			goto exit;
1268 		}
1269 
1270 		buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1271 				ops_processed[i]->produced, contig_buf);
1272 		if (compare_buffers(buf1, strlen(buf1) + 1,
1273 				buf2, ops_processed[i]->produced) < 0)
1274 			goto exit;
1275 
1276 		/* Test checksums */
1277 		if (compress_xforms[0]->compress.chksum !=
1278 				RTE_COMP_CHECKSUM_NONE) {
1279 			if (ops_processed[i]->output_chksum !=
1280 					compress_checksum[i]) {
1281 				RTE_LOG(ERR, USER1, "The checksums differ\n"
1282 			"Compression Checksum: %" PRIu64 "\tDecompression "
1283 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1284 			ops_processed[i]->output_chksum);
1285 				goto exit;
1286 			}
1287 		}
1288 
1289 		rte_free(contig_buf);
1290 		contig_buf = NULL;
1291 	}
1292 
1293 	ret_status = 0;
1294 
1295 exit:
1296 	/* Free resources */
1297 	for (i = 0; i < num_bufs; i++) {
1298 		rte_pktmbuf_free(uncomp_bufs[i]);
1299 		rte_pktmbuf_free(comp_bufs[i]);
1300 		rte_comp_op_free(ops[i]);
1301 		rte_comp_op_free(ops_processed[i]);
1302 	}
1303 	for (i = 0; i < num_priv_xforms; i++) {
1304 		if (priv_xforms[i] != NULL)
1305 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1306 	}
1307 	rte_free(contig_buf);
1308 
1309 	return ret_status;
1310 }
1311 
1312 static int
1313 test_compressdev_deflate_stateless_fixed(void)
1314 {
1315 	struct comp_testsuite_params *ts_params = &testsuite_params;
1316 	uint16_t i;
1317 	int ret;
1318 	const struct rte_compressdev_capabilities *capab;
1319 
1320 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1321 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1322 
1323 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1324 		return -ENOTSUP;
1325 
1326 	struct rte_comp_xform *compress_xform =
1327 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1328 
1329 	if (compress_xform == NULL) {
1330 		RTE_LOG(ERR, USER1,
1331 			"Compress xform could not be created\n");
1332 		ret = TEST_FAILED;
1333 		goto exit;
1334 	}
1335 
1336 	memcpy(compress_xform, ts_params->def_comp_xform,
1337 			sizeof(struct rte_comp_xform));
1338 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1339 
1340 	struct interim_data_params int_data = {
1341 		NULL,
1342 		1,
1343 		NULL,
1344 		&compress_xform,
1345 		&ts_params->def_decomp_xform,
1346 		1
1347 	};
1348 
1349 	struct test_data_params test_data = {
1350 		RTE_COMP_OP_STATELESS,
1351 		LB_BOTH,
1352 		ZLIB_DECOMPRESS,
1353 		0,
1354 		0
1355 	};
1356 
1357 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1358 		int_data.test_bufs = &compress_test_bufs[i];
1359 		int_data.buf_idx = &i;
1360 
1361 		/* Compress with compressdev, decompress with Zlib */
1362 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1363 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1364 			ret = TEST_FAILED;
1365 			goto exit;
1366 		}
1367 
1368 		/* Compress with Zlib, decompress with compressdev */
1369 		test_data.zlib_dir = ZLIB_COMPRESS;
1370 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1371 			ret = TEST_FAILED;
1372 			goto exit;
1373 		}
1374 	}
1375 
1376 	ret = TEST_SUCCESS;
1377 
1378 exit:
1379 	rte_free(compress_xform);
1380 	return ret;
1381 }
1382 
1383 static int
1384 test_compressdev_deflate_stateless_dynamic(void)
1385 {
1386 	struct comp_testsuite_params *ts_params = &testsuite_params;
1387 	uint16_t i;
1388 	int ret;
1389 	struct rte_comp_xform *compress_xform =
1390 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1391 
1392 	const struct rte_compressdev_capabilities *capab;
1393 
1394 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1395 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1396 
1397 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1398 		return -ENOTSUP;
1399 
1400 	if (compress_xform == NULL) {
1401 		RTE_LOG(ERR, USER1,
1402 			"Compress xform could not be created\n");
1403 		ret = TEST_FAILED;
1404 		goto exit;
1405 	}
1406 
1407 	memcpy(compress_xform, ts_params->def_comp_xform,
1408 			sizeof(struct rte_comp_xform));
1409 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1410 
1411 	struct interim_data_params int_data = {
1412 		NULL,
1413 		1,
1414 		NULL,
1415 		&compress_xform,
1416 		&ts_params->def_decomp_xform,
1417 		1
1418 	};
1419 
1420 	struct test_data_params test_data = {
1421 		RTE_COMP_OP_STATELESS,
1422 		LB_BOTH,
1423 		ZLIB_DECOMPRESS,
1424 		0,
1425 		0
1426 	};
1427 
1428 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1429 		int_data.test_bufs = &compress_test_bufs[i];
1430 		int_data.buf_idx = &i;
1431 
1432 		/* Compress with compressdev, decompress with Zlib */
1433 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1434 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1435 			ret = TEST_FAILED;
1436 			goto exit;
1437 		}
1438 
1439 		/* Compress with Zlib, decompress with compressdev */
1440 		test_data.zlib_dir = ZLIB_COMPRESS;
1441 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1442 			ret = TEST_FAILED;
1443 			goto exit;
1444 		}
1445 	}
1446 
1447 	ret = TEST_SUCCESS;
1448 
1449 exit:
1450 	rte_free(compress_xform);
1451 	return ret;
1452 }
1453 
1454 static int
1455 test_compressdev_deflate_stateless_multi_op(void)
1456 {
1457 	struct comp_testsuite_params *ts_params = &testsuite_params;
1458 	uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1459 	uint16_t buf_idx[num_bufs];
1460 	uint16_t i;
1461 
1462 	for (i = 0; i < num_bufs; i++)
1463 		buf_idx[i] = i;
1464 
1465 	struct interim_data_params int_data = {
1466 		compress_test_bufs,
1467 		num_bufs,
1468 		buf_idx,
1469 		&ts_params->def_comp_xform,
1470 		&ts_params->def_decomp_xform,
1471 		1
1472 	};
1473 
1474 	struct test_data_params test_data = {
1475 		RTE_COMP_OP_STATELESS,
1476 		LB_BOTH,
1477 		ZLIB_DECOMPRESS,
1478 		0,
1479 		0
1480 	};
1481 
1482 	/* Compress with compressdev, decompress with Zlib */
1483 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1484 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1485 		return TEST_FAILED;
1486 
1487 	/* Compress with Zlib, decompress with compressdev */
1488 	test_data.zlib_dir = ZLIB_COMPRESS;
1489 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1490 		return TEST_FAILED;
1491 
1492 	return TEST_SUCCESS;
1493 }
1494 
1495 static int
1496 test_compressdev_deflate_stateless_multi_level(void)
1497 {
1498 	struct comp_testsuite_params *ts_params = &testsuite_params;
1499 	unsigned int level;
1500 	uint16_t i;
1501 	int ret;
1502 	struct rte_comp_xform *compress_xform =
1503 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1504 
1505 	if (compress_xform == NULL) {
1506 		RTE_LOG(ERR, USER1,
1507 			"Compress xform could not be created\n");
1508 		ret = TEST_FAILED;
1509 		goto exit;
1510 	}
1511 
1512 	memcpy(compress_xform, ts_params->def_comp_xform,
1513 			sizeof(struct rte_comp_xform));
1514 
1515 	struct interim_data_params int_data = {
1516 		NULL,
1517 		1,
1518 		NULL,
1519 		&compress_xform,
1520 		&ts_params->def_decomp_xform,
1521 		1
1522 	};
1523 
1524 	struct test_data_params test_data = {
1525 		RTE_COMP_OP_STATELESS,
1526 		LB_BOTH,
1527 		ZLIB_DECOMPRESS,
1528 		0,
1529 		0
1530 	};
1531 
1532 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1533 		int_data.test_bufs = &compress_test_bufs[i];
1534 		int_data.buf_idx = &i;
1535 
1536 		for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1537 				level++) {
1538 			compress_xform->compress.level = level;
1539 			/* Compress with compressdev, decompress with Zlib */
1540 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1541 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1542 				ret = TEST_FAILED;
1543 				goto exit;
1544 			}
1545 		}
1546 	}
1547 
1548 	ret = TEST_SUCCESS;
1549 
1550 exit:
1551 	rte_free(compress_xform);
1552 	return ret;
1553 }
1554 
1555 #define NUM_XFORMS 3
1556 static int
1557 test_compressdev_deflate_stateless_multi_xform(void)
1558 {
1559 	struct comp_testsuite_params *ts_params = &testsuite_params;
1560 	uint16_t num_bufs = NUM_XFORMS;
1561 	struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1562 	struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1563 	const char *test_buffers[NUM_XFORMS];
1564 	uint16_t i;
1565 	unsigned int level = RTE_COMP_LEVEL_MIN;
1566 	uint16_t buf_idx[num_bufs];
1567 
1568 	int ret;
1569 
1570 	/* Create multiple xforms with various levels */
1571 	for (i = 0; i < NUM_XFORMS; i++) {
1572 		compress_xforms[i] = rte_malloc(NULL,
1573 				sizeof(struct rte_comp_xform), 0);
1574 		if (compress_xforms[i] == NULL) {
1575 			RTE_LOG(ERR, USER1,
1576 				"Compress xform could not be created\n");
1577 			ret = TEST_FAILED;
1578 			goto exit;
1579 		}
1580 
1581 		memcpy(compress_xforms[i], ts_params->def_comp_xform,
1582 				sizeof(struct rte_comp_xform));
1583 		compress_xforms[i]->compress.level = level;
1584 		level++;
1585 
1586 		decompress_xforms[i] = rte_malloc(NULL,
1587 				sizeof(struct rte_comp_xform), 0);
1588 		if (decompress_xforms[i] == NULL) {
1589 			RTE_LOG(ERR, USER1,
1590 				"Decompress xform could not be created\n");
1591 			ret = TEST_FAILED;
1592 			goto exit;
1593 		}
1594 
1595 		memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1596 				sizeof(struct rte_comp_xform));
1597 	}
1598 
1599 	for (i = 0; i < NUM_XFORMS; i++) {
1600 		buf_idx[i] = 0;
1601 		/* Use the same buffer in all sessions */
1602 		test_buffers[i] = compress_test_bufs[0];
1603 	}
1604 
1605 	struct interim_data_params int_data = {
1606 		test_buffers,
1607 		num_bufs,
1608 		buf_idx,
1609 		compress_xforms,
1610 		decompress_xforms,
1611 		NUM_XFORMS
1612 	};
1613 
1614 	struct test_data_params test_data = {
1615 		RTE_COMP_OP_STATELESS,
1616 		LB_BOTH,
1617 		ZLIB_DECOMPRESS,
1618 		0,
1619 		0
1620 	};
1621 
1622 	/* Compress with compressdev, decompress with Zlib */
1623 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1624 		ret = TEST_FAILED;
1625 		goto exit;
1626 	}
1627 
1628 	ret = TEST_SUCCESS;
1629 exit:
1630 	for (i = 0; i < NUM_XFORMS; i++) {
1631 		rte_free(compress_xforms[i]);
1632 		rte_free(decompress_xforms[i]);
1633 	}
1634 
1635 	return ret;
1636 }
1637 
1638 static int
1639 test_compressdev_deflate_stateless_sgl(void)
1640 {
1641 	struct comp_testsuite_params *ts_params = &testsuite_params;
1642 	uint16_t i;
1643 	const struct rte_compressdev_capabilities *capab;
1644 
1645 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1646 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1647 
1648 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1649 		return -ENOTSUP;
1650 
1651 	struct interim_data_params int_data = {
1652 		NULL,
1653 		1,
1654 		NULL,
1655 		&ts_params->def_comp_xform,
1656 		&ts_params->def_decomp_xform,
1657 		1
1658 	};
1659 
1660 	struct test_data_params test_data = {
1661 		RTE_COMP_OP_STATELESS,
1662 		SGL_BOTH,
1663 		ZLIB_DECOMPRESS,
1664 		0,
1665 		0
1666 	};
1667 
1668 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1669 		int_data.test_bufs = &compress_test_bufs[i];
1670 		int_data.buf_idx = &i;
1671 
1672 		/* Compress with compressdev, decompress with Zlib */
1673 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1674 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1675 			return TEST_FAILED;
1676 
1677 		/* Compress with Zlib, decompress with compressdev */
1678 		test_data.zlib_dir = ZLIB_COMPRESS;
1679 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1680 			return TEST_FAILED;
1681 
1682 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1683 			/* Compress with compressdev, decompress with Zlib */
1684 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1685 			test_data.buff_type = SGL_TO_LB;
1686 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1687 				return TEST_FAILED;
1688 
1689 			/* Compress with Zlib, decompress with compressdev */
1690 			test_data.zlib_dir = ZLIB_COMPRESS;
1691 			test_data.buff_type = SGL_TO_LB;
1692 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1693 				return TEST_FAILED;
1694 		}
1695 
1696 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1697 			/* Compress with compressdev, decompress with Zlib */
1698 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1699 			test_data.buff_type = LB_TO_SGL;
1700 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1701 				return TEST_FAILED;
1702 
1703 			/* Compress with Zlib, decompress with compressdev */
1704 			test_data.zlib_dir = ZLIB_COMPRESS;
1705 			test_data.buff_type = LB_TO_SGL;
1706 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1707 				return TEST_FAILED;
1708 		}
1709 
1710 
1711 	}
1712 
1713 	return TEST_SUCCESS;
1714 
1715 }
1716 
1717 static int
1718 test_compressdev_deflate_stateless_checksum(void)
1719 {
1720 	struct comp_testsuite_params *ts_params = &testsuite_params;
1721 	uint16_t i;
1722 	int ret;
1723 	const struct rte_compressdev_capabilities *capab;
1724 
1725 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1726 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1727 
1728 	/* Check if driver supports any checksum */
1729 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1730 			(capab->comp_feature_flags &
1731 			RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1732 			(capab->comp_feature_flags &
1733 			RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1734 		return -ENOTSUP;
1735 
1736 	struct rte_comp_xform *compress_xform =
1737 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1738 	if (compress_xform == NULL) {
1739 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1740 		ret = TEST_FAILED;
1741 		return ret;
1742 	}
1743 
1744 	memcpy(compress_xform, ts_params->def_comp_xform,
1745 			sizeof(struct rte_comp_xform));
1746 
1747 	struct rte_comp_xform *decompress_xform =
1748 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1749 	if (decompress_xform == NULL) {
1750 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1751 		rte_free(compress_xform);
1752 		ret = TEST_FAILED;
1753 		return ret;
1754 	}
1755 
1756 	memcpy(decompress_xform, ts_params->def_decomp_xform,
1757 			sizeof(struct rte_comp_xform));
1758 
1759 	struct interim_data_params int_data = {
1760 		NULL,
1761 		1,
1762 		NULL,
1763 		&compress_xform,
1764 		&decompress_xform,
1765 		1
1766 	};
1767 
1768 	struct test_data_params test_data = {
1769 		RTE_COMP_OP_STATELESS,
1770 		LB_BOTH,
1771 		ZLIB_DECOMPRESS,
1772 		0,
1773 		0
1774 	};
1775 
1776 	/* Check if driver supports crc32 checksum and test */
1777 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1778 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1779 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1780 
1781 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1782 			/* Compress with compressdev, decompress with Zlib */
1783 			int_data.test_bufs = &compress_test_bufs[i];
1784 			int_data.buf_idx = &i;
1785 
1786 			/* Generate zlib checksum and test against selected
1787 			 * drivers decompression checksum
1788 			 */
1789 			test_data.zlib_dir = ZLIB_COMPRESS;
1790 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1791 				ret = TEST_FAILED;
1792 				goto exit;
1793 			}
1794 
1795 			/* Generate compression and decompression
1796 			 * checksum of selected driver
1797 			 */
1798 			test_data.zlib_dir = ZLIB_NONE;
1799 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1800 				ret = TEST_FAILED;
1801 				goto exit;
1802 			}
1803 		}
1804 	}
1805 
1806 	/* Check if driver supports adler32 checksum and test */
1807 	if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1808 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1809 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1810 
1811 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1812 			int_data.test_bufs = &compress_test_bufs[i];
1813 			int_data.buf_idx = &i;
1814 
1815 			/* Generate zlib checksum and test against selected
1816 			 * drivers decompression checksum
1817 			 */
1818 			test_data.zlib_dir = ZLIB_COMPRESS;
1819 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1820 				ret = TEST_FAILED;
1821 				goto exit;
1822 			}
1823 			/* Generate compression and decompression
1824 			 * checksum of selected driver
1825 			 */
1826 			test_data.zlib_dir = ZLIB_NONE;
1827 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1828 				ret = TEST_FAILED;
1829 				goto exit;
1830 			}
1831 		}
1832 	}
1833 
1834 	/* Check if driver supports combined crc and adler checksum and test */
1835 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1836 		compress_xform->compress.chksum =
1837 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1838 		decompress_xform->decompress.chksum =
1839 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1840 
1841 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1842 			int_data.test_bufs = &compress_test_bufs[i];
1843 			int_data.buf_idx = &i;
1844 
1845 			/* Generate compression and decompression
1846 			 * checksum of selected driver
1847 			 */
1848 			test_data.zlib_dir = ZLIB_NONE;
1849 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1850 				ret = TEST_FAILED;
1851 				goto exit;
1852 			}
1853 		}
1854 	}
1855 
1856 	ret = TEST_SUCCESS;
1857 
1858 exit:
1859 	rte_free(compress_xform);
1860 	rte_free(decompress_xform);
1861 	return ret;
1862 }
1863 
1864 static int
1865 test_compressdev_out_of_space_buffer(void)
1866 {
1867 	struct comp_testsuite_params *ts_params = &testsuite_params;
1868 	int ret;
1869 	uint16_t i;
1870 	const struct rte_compressdev_capabilities *capab;
1871 
1872 	RTE_LOG(INFO, USER1, "This is a negative test errors are expected\n");
1873 
1874 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1875 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1876 
1877 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1878 		return -ENOTSUP;
1879 
1880 	struct rte_comp_xform *compress_xform =
1881 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1882 
1883 	if (compress_xform == NULL) {
1884 		RTE_LOG(ERR, USER1,
1885 			"Compress xform could not be created\n");
1886 		ret = TEST_FAILED;
1887 		goto exit;
1888 	}
1889 
1890 	struct interim_data_params int_data = {
1891 		&compress_test_bufs[0],
1892 		1,
1893 		&i,
1894 		&ts_params->def_comp_xform,
1895 		&ts_params->def_decomp_xform,
1896 		1
1897 	};
1898 
1899 	struct test_data_params test_data = {
1900 		RTE_COMP_OP_STATELESS,
1901 		LB_BOTH,
1902 		ZLIB_DECOMPRESS,
1903 		1,
1904 		0
1905 	};
1906 	/* Compress with compressdev, decompress with Zlib */
1907 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1908 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1909 		ret = TEST_FAILED;
1910 		goto exit;
1911 	}
1912 
1913 	/* Compress with Zlib, decompress with compressdev */
1914 	test_data.zlib_dir = ZLIB_COMPRESS;
1915 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1916 		ret = TEST_FAILED;
1917 		goto exit;
1918 	}
1919 
1920 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1921 		/* Compress with compressdev, decompress with Zlib */
1922 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1923 		test_data.buff_type = SGL_BOTH;
1924 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1925 			ret = TEST_FAILED;
1926 			goto exit;
1927 		}
1928 
1929 		/* Compress with Zlib, decompress with compressdev */
1930 		test_data.zlib_dir = ZLIB_COMPRESS;
1931 		test_data.buff_type = SGL_BOTH;
1932 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1933 			ret = TEST_FAILED;
1934 			goto exit;
1935 		}
1936 	}
1937 
1938 	ret  = TEST_SUCCESS;
1939 
1940 exit:
1941 	rte_free(compress_xform);
1942 	return ret;
1943 }
1944 
1945 static int
1946 test_compressdev_deflate_stateless_dynamic_big(void)
1947 {
1948 	struct comp_testsuite_params *ts_params = &testsuite_params;
1949 	uint16_t i = 0;
1950 	int ret = TEST_SUCCESS;
1951 	const struct rte_compressdev_capabilities *capab;
1952 	char *test_buffer = NULL;
1953 
1954 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1955 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1956 
1957 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1958 		return -ENOTSUP;
1959 
1960 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1961 		return -ENOTSUP;
1962 
1963 	test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
1964 	if (test_buffer == NULL) {
1965 		RTE_LOG(ERR, USER1,
1966 			"Can't allocate buffer for big-data\n");
1967 		return TEST_FAILED;
1968 	}
1969 
1970 	struct interim_data_params int_data = {
1971 		(const char * const *)&test_buffer,
1972 		1,
1973 		NULL,
1974 		&ts_params->def_comp_xform,
1975 		&ts_params->def_decomp_xform,
1976 		1
1977 	};
1978 
1979 	struct test_data_params test_data = {
1980 		RTE_COMP_OP_STATELESS,
1981 		SGL_BOTH,
1982 		ZLIB_DECOMPRESS,
1983 		0,
1984 		1
1985 	};
1986 
1987 	ts_params->def_comp_xform->compress.deflate.huffman =
1988 						RTE_COMP_HUFFMAN_DYNAMIC;
1989 
1990 	/* fill the buffer with data based on rand. data */
1991 	srand(BIG_DATA_TEST_SIZE);
1992 	for (uint32_t i = 0; i < BIG_DATA_TEST_SIZE - 1; ++i)
1993 		test_buffer[i] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
1994 
1995 	test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
1996 	int_data.buf_idx = &i;
1997 
1998 	/* Compress with compressdev, decompress with Zlib */
1999 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2000 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2001 		ret = TEST_FAILED;
2002 		goto end;
2003 	}
2004 
2005 	/* Compress with Zlib, decompress with compressdev */
2006 	test_data.zlib_dir = ZLIB_COMPRESS;
2007 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2008 		ret = TEST_FAILED;
2009 		goto end;
2010 	}
2011 
2012 end:
2013 	ts_params->def_comp_xform->compress.deflate.huffman =
2014 						RTE_COMP_HUFFMAN_DEFAULT;
2015 	rte_free(test_buffer);
2016 	return ret;
2017 }
2018 
2019 
2020 static struct unit_test_suite compressdev_testsuite  = {
2021 	.suite_name = "compressdev unit test suite",
2022 	.setup = testsuite_setup,
2023 	.teardown = testsuite_teardown,
2024 	.unit_test_cases = {
2025 		TEST_CASE_ST(NULL, NULL,
2026 			test_compressdev_invalid_configuration),
2027 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2028 			test_compressdev_deflate_stateless_fixed),
2029 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2030 			test_compressdev_deflate_stateless_dynamic),
2031 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2032 			test_compressdev_deflate_stateless_dynamic_big),
2033 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2034 			test_compressdev_deflate_stateless_multi_op),
2035 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2036 			test_compressdev_deflate_stateless_multi_level),
2037 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2038 			test_compressdev_deflate_stateless_multi_xform),
2039 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2040 			test_compressdev_deflate_stateless_sgl),
2041 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2042 			test_compressdev_deflate_stateless_checksum),
2043 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2044 			test_compressdev_out_of_space_buffer),
2045 		TEST_CASES_END() /**< NULL terminate unit test array */
2046 	}
2047 };
2048 
2049 static int
2050 test_compressdev(void)
2051 {
2052 	return unit_test_suite_runner(&compressdev_testsuite);
2053 }
2054 
2055 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
2056