xref: /dpdk/app/test/test_compressdev.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 - 2019 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_mempool.h>
13 #include <rte_mbuf.h>
14 #include <rte_compressdev.h>
15 #include <rte_string_fns.h>
16 
17 #include "test_compressdev_test_buffer.h"
18 #include "test.h"
19 
20 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
21 
22 #define DEFAULT_WINDOW_SIZE 15
23 #define DEFAULT_MEM_LEVEL 8
24 #define MAX_DEQD_RETRIES 10
25 #define DEQUEUE_WAIT_TIME 10000
26 
27 /*
28  * 30% extra size for compressed data compared to original data,
29  * in case data size cannot be reduced and it is actually bigger
30  * due to the compress block headers
31  */
32 #define COMPRESS_BUF_SIZE_RATIO 1.3
33 #define NUM_LARGE_MBUFS 16
34 #define SMALL_SEG_SIZE 256
35 #define MAX_SEGS 16
36 #define NUM_OPS 16
37 #define NUM_MAX_XFORMS 16
38 #define NUM_MAX_INFLIGHT_OPS 128
39 #define CACHE_SIZE 0
40 
41 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
42 #define ZLIB_HEADER_SIZE 2
43 #define ZLIB_TRAILER_SIZE 4
44 #define GZIP_HEADER_SIZE 10
45 #define GZIP_TRAILER_SIZE 8
46 
47 #define OUT_OF_SPACE_BUF 1
48 
49 #define MAX_MBUF_SEGMENT_SIZE 65535
50 #define MAX_DATA_MBUF_SIZE (MAX_MBUF_SEGMENT_SIZE - RTE_PKTMBUF_HEADROOM)
51 #define NUM_BIG_MBUFS 4
52 #define BIG_DATA_TEST_SIZE (MAX_DATA_MBUF_SIZE * NUM_BIG_MBUFS / 2)
53 
54 const char *
55 huffman_type_strings[] = {
56 	[RTE_COMP_HUFFMAN_DEFAULT]	= "PMD default",
57 	[RTE_COMP_HUFFMAN_FIXED]	= "Fixed",
58 	[RTE_COMP_HUFFMAN_DYNAMIC]	= "Dynamic"
59 };
60 
61 enum zlib_direction {
62 	ZLIB_NONE,
63 	ZLIB_COMPRESS,
64 	ZLIB_DECOMPRESS,
65 	ZLIB_ALL
66 };
67 
68 enum varied_buff {
69 	LB_BOTH = 0,	/* both input and output are linear*/
70 	SGL_BOTH,	/* both input and output are chained */
71 	SGL_TO_LB,	/* input buffer is chained */
72 	LB_TO_SGL	/* output buffer is chained */
73 };
74 
75 struct priv_op_data {
76 	uint16_t orig_idx;
77 };
78 
79 struct comp_testsuite_params {
80 	struct rte_mempool *large_mbuf_pool;
81 	struct rte_mempool *small_mbuf_pool;
82 	struct rte_mempool *big_mbuf_pool;
83 	struct rte_mempool *op_pool;
84 	struct rte_comp_xform *def_comp_xform;
85 	struct rte_comp_xform *def_decomp_xform;
86 };
87 
88 struct interim_data_params {
89 	const char * const *test_bufs;
90 	unsigned int num_bufs;
91 	uint16_t *buf_idx;
92 	struct rte_comp_xform **compress_xforms;
93 	struct rte_comp_xform **decompress_xforms;
94 	unsigned int num_xforms;
95 };
96 
97 struct test_data_params {
98 	enum rte_comp_op_type compress_state;
99 	enum rte_comp_op_type decompress_state;
100 	enum varied_buff buff_type;
101 	enum zlib_direction zlib_dir;
102 	unsigned int out_of_space;
103 	unsigned int big_data;
104 	/* stateful decompression specific parameters */
105 	unsigned int decompress_output_block_size;
106 	unsigned int decompress_steps_max;
107 	/* external mbufs specific parameters */
108 	unsigned int use_external_mbufs;
109 	unsigned int inbuf_data_size;
110 	const struct rte_memzone *inbuf_memzone;
111 	const struct rte_memzone *compbuf_memzone;
112 	const struct rte_memzone *uncompbuf_memzone;
113 };
114 
115 static struct comp_testsuite_params testsuite_params = { 0 };
116 
117 static void
118 testsuite_teardown(void)
119 {
120 	struct comp_testsuite_params *ts_params = &testsuite_params;
121 
122 	if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
123 		RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
124 	if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
125 		RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
126 	if (rte_mempool_in_use_count(ts_params->big_mbuf_pool))
127 		RTE_LOG(ERR, USER1, "Big mbuf pool still has unfreed bufs\n");
128 	if (rte_mempool_in_use_count(ts_params->op_pool))
129 		RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
130 
131 	rte_mempool_free(ts_params->large_mbuf_pool);
132 	rte_mempool_free(ts_params->small_mbuf_pool);
133 	rte_mempool_free(ts_params->big_mbuf_pool);
134 	rte_mempool_free(ts_params->op_pool);
135 	rte_free(ts_params->def_comp_xform);
136 	rte_free(ts_params->def_decomp_xform);
137 }
138 
139 static int
140 testsuite_setup(void)
141 {
142 	struct comp_testsuite_params *ts_params = &testsuite_params;
143 	uint32_t max_buf_size = 0;
144 	unsigned int i;
145 
146 	if (rte_compressdev_count() == 0) {
147 		RTE_LOG(WARNING, USER1, "Need at least one compress device\n");
148 		return TEST_SKIPPED;
149 	}
150 
151 	RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
152 				rte_compressdev_name_get(0));
153 
154 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
155 		max_buf_size = RTE_MAX(max_buf_size,
156 				strlen(compress_test_bufs[i]) + 1);
157 
158 	/*
159 	 * Buffers to be used in compression and decompression.
160 	 * Since decompressed data might be larger than
161 	 * compressed data (due to block header),
162 	 * buffers should be big enough for both cases.
163 	 */
164 	max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
165 	ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
166 			NUM_LARGE_MBUFS,
167 			CACHE_SIZE, 0,
168 			max_buf_size + RTE_PKTMBUF_HEADROOM,
169 			rte_socket_id());
170 	if (ts_params->large_mbuf_pool == NULL) {
171 		RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
172 		return TEST_FAILED;
173 	}
174 
175 	/* Create mempool with smaller buffers for SGL testing */
176 	ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
177 			NUM_LARGE_MBUFS * MAX_SEGS,
178 			CACHE_SIZE, 0,
179 			SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
180 			rte_socket_id());
181 	if (ts_params->small_mbuf_pool == NULL) {
182 		RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
183 		goto exit;
184 	}
185 
186 	/* Create mempool with big buffers for SGL testing */
187 	ts_params->big_mbuf_pool = rte_pktmbuf_pool_create("big_mbuf_pool",
188 			NUM_BIG_MBUFS + 1,
189 			CACHE_SIZE, 0,
190 			MAX_MBUF_SEGMENT_SIZE,
191 			rte_socket_id());
192 	if (ts_params->big_mbuf_pool == NULL) {
193 		RTE_LOG(ERR, USER1, "Big mbuf pool could not be created\n");
194 		goto exit;
195 	}
196 
197 	ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
198 				0, sizeof(struct priv_op_data),
199 				rte_socket_id());
200 	if (ts_params->op_pool == NULL) {
201 		RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
202 		goto exit;
203 	}
204 
205 	ts_params->def_comp_xform =
206 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
207 	if (ts_params->def_comp_xform == NULL) {
208 		RTE_LOG(ERR, USER1,
209 			"Default compress xform could not be created\n");
210 		goto exit;
211 	}
212 	ts_params->def_decomp_xform =
213 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
214 	if (ts_params->def_decomp_xform == NULL) {
215 		RTE_LOG(ERR, USER1,
216 			"Default decompress xform could not be created\n");
217 		goto exit;
218 	}
219 
220 	/* Initializes default values for compress/decompress xforms */
221 	ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
222 	ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
223 	ts_params->def_comp_xform->compress.deflate.huffman =
224 						RTE_COMP_HUFFMAN_DEFAULT;
225 	ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
226 	ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
227 	ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
228 
229 	ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
230 	ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
231 	ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
232 	ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
233 
234 	return TEST_SUCCESS;
235 
236 exit:
237 	testsuite_teardown();
238 
239 	return TEST_FAILED;
240 }
241 
242 static int
243 generic_ut_setup(void)
244 {
245 	/* Configure compressdev (one device, one queue pair) */
246 	struct rte_compressdev_config config = {
247 		.socket_id = rte_socket_id(),
248 		.nb_queue_pairs = 1,
249 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
250 		.max_nb_streams = 1
251 	};
252 
253 	if (rte_compressdev_configure(0, &config) < 0) {
254 		RTE_LOG(ERR, USER1, "Device configuration failed\n");
255 		return -1;
256 	}
257 
258 	if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
259 			rte_socket_id()) < 0) {
260 		RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
261 		return -1;
262 	}
263 
264 	if (rte_compressdev_start(0) < 0) {
265 		RTE_LOG(ERR, USER1, "Device could not be started\n");
266 		return -1;
267 	}
268 
269 	return 0;
270 }
271 
272 static void
273 generic_ut_teardown(void)
274 {
275 	rte_compressdev_stop(0);
276 	if (rte_compressdev_close(0) < 0)
277 		RTE_LOG(ERR, USER1, "Device could not be closed\n");
278 }
279 
280 static int
281 test_compressdev_invalid_configuration(void)
282 {
283 	struct rte_compressdev_config invalid_config;
284 	struct rte_compressdev_config valid_config = {
285 		.socket_id = rte_socket_id(),
286 		.nb_queue_pairs = 1,
287 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
288 		.max_nb_streams = 1
289 	};
290 	struct rte_compressdev_info dev_info;
291 
292 	/* Invalid configuration with 0 queue pairs */
293 	memcpy(&invalid_config, &valid_config,
294 			sizeof(struct rte_compressdev_config));
295 	invalid_config.nb_queue_pairs = 0;
296 
297 	TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
298 			"Device configuration was successful "
299 			"with no queue pairs (invalid)\n");
300 
301 	/*
302 	 * Invalid configuration with too many queue pairs
303 	 * (if there is an actual maximum number of queue pairs)
304 	 */
305 	rte_compressdev_info_get(0, &dev_info);
306 	if (dev_info.max_nb_queue_pairs != 0) {
307 		memcpy(&invalid_config, &valid_config,
308 			sizeof(struct rte_compressdev_config));
309 		invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
310 
311 		TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
312 				"Device configuration was successful "
313 				"with too many queue pairs (invalid)\n");
314 	}
315 
316 	/* Invalid queue pair setup, with no number of queue pairs set */
317 	TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
318 				NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
319 			"Queue pair setup was successful "
320 			"with no queue pairs set (invalid)\n");
321 
322 	return TEST_SUCCESS;
323 }
324 
325 static int
326 compare_buffers(const char *buffer1, uint32_t buffer1_len,
327 		const char *buffer2, uint32_t buffer2_len)
328 {
329 	if (buffer1_len != buffer2_len) {
330 		RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
331 		return -1;
332 	}
333 
334 	if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
335 		RTE_LOG(ERR, USER1, "Buffers are different\n");
336 		return -1;
337 	}
338 
339 	return 0;
340 }
341 
342 /*
343  * Maps compressdev and Zlib flush flags
344  */
345 static int
346 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
347 {
348 	switch (flag) {
349 	case RTE_COMP_FLUSH_NONE:
350 		return Z_NO_FLUSH;
351 	case RTE_COMP_FLUSH_SYNC:
352 		return Z_SYNC_FLUSH;
353 	case RTE_COMP_FLUSH_FULL:
354 		return Z_FULL_FLUSH;
355 	case RTE_COMP_FLUSH_FINAL:
356 		return Z_FINISH;
357 	/*
358 	 * There should be only the values above,
359 	 * so this should never happen
360 	 */
361 	default:
362 		return -1;
363 	}
364 }
365 
366 static int
367 compress_zlib(struct rte_comp_op *op,
368 		const struct rte_comp_xform *xform, int mem_level)
369 {
370 	z_stream stream;
371 	int zlib_flush;
372 	int strategy, window_bits, comp_level;
373 	int ret = TEST_FAILED;
374 	uint8_t *single_src_buf = NULL;
375 	uint8_t *single_dst_buf = NULL;
376 
377 	/* initialize zlib stream */
378 	stream.zalloc = Z_NULL;
379 	stream.zfree = Z_NULL;
380 	stream.opaque = Z_NULL;
381 
382 	if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
383 		strategy = Z_FIXED;
384 	else
385 		strategy = Z_DEFAULT_STRATEGY;
386 
387 	/*
388 	 * Window bits is the base two logarithm of the window size (in bytes).
389 	 * When doing raw DEFLATE, this number will be negative.
390 	 */
391 	window_bits = -(xform->compress.window_size);
392 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
393 		window_bits *= -1;
394 	else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
395 		window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
396 
397 	comp_level = xform->compress.level;
398 
399 	if (comp_level != RTE_COMP_LEVEL_NONE)
400 		ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
401 			window_bits, mem_level, strategy);
402 	else
403 		ret = deflateInit(&stream, Z_NO_COMPRESSION);
404 
405 	if (ret != Z_OK) {
406 		printf("Zlib deflate could not be initialized\n");
407 		goto exit;
408 	}
409 
410 	/* Assuming stateless operation */
411 	/* SGL Input */
412 	if (op->m_src->nb_segs > 1) {
413 		single_src_buf = rte_malloc(NULL,
414 				rte_pktmbuf_pkt_len(op->m_src), 0);
415 		if (single_src_buf == NULL) {
416 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
417 			goto exit;
418 		}
419 
420 		if (rte_pktmbuf_read(op->m_src, op->src.offset,
421 					rte_pktmbuf_pkt_len(op->m_src) -
422 					op->src.offset,
423 					single_src_buf) == NULL) {
424 			RTE_LOG(ERR, USER1,
425 				"Buffer could not be read entirely\n");
426 			goto exit;
427 		}
428 
429 		stream.avail_in = op->src.length;
430 		stream.next_in = single_src_buf;
431 
432 	} else {
433 		stream.avail_in = op->src.length;
434 		stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
435 				op->src.offset);
436 	}
437 	/* SGL output */
438 	if (op->m_dst->nb_segs > 1) {
439 
440 		single_dst_buf = rte_malloc(NULL,
441 				rte_pktmbuf_pkt_len(op->m_dst), 0);
442 			if (single_dst_buf == NULL) {
443 				RTE_LOG(ERR, USER1,
444 					"Buffer could not be allocated\n");
445 			goto exit;
446 		}
447 
448 		stream.avail_out = op->m_dst->pkt_len;
449 		stream.next_out = single_dst_buf;
450 
451 	} else {/* linear output */
452 		stream.avail_out = op->m_dst->data_len;
453 		stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
454 				op->dst.offset);
455 	}
456 
457 	/* Stateless operation, all buffer will be compressed in one go */
458 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
459 	ret = deflate(&stream, zlib_flush);
460 
461 	if (stream.avail_in != 0) {
462 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
463 		goto exit;
464 	}
465 
466 	if (ret != Z_STREAM_END)
467 		goto exit;
468 
469 	/* Copy data to destination SGL */
470 	if (op->m_dst->nb_segs > 1) {
471 		uint32_t remaining_data = stream.total_out;
472 		uint8_t *src_data = single_dst_buf;
473 		struct rte_mbuf *dst_buf = op->m_dst;
474 
475 		while (remaining_data > 0) {
476 			uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
477 						uint8_t *, op->dst.offset);
478 			/* Last segment */
479 			if (remaining_data < dst_buf->data_len) {
480 				memcpy(dst_data, src_data, remaining_data);
481 				remaining_data = 0;
482 			} else {
483 				memcpy(dst_data, src_data, dst_buf->data_len);
484 				remaining_data -= dst_buf->data_len;
485 				src_data += dst_buf->data_len;
486 				dst_buf = dst_buf->next;
487 			}
488 		}
489 	}
490 
491 	op->consumed = stream.total_in;
492 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
493 		rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
494 		rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
495 		op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
496 				ZLIB_TRAILER_SIZE);
497 	} else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
498 		rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
499 		rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
500 		op->produced = stream.total_out - (GZIP_HEADER_SIZE +
501 				GZIP_TRAILER_SIZE);
502 	} else
503 		op->produced = stream.total_out;
504 
505 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
506 	op->output_chksum = stream.adler;
507 
508 	deflateReset(&stream);
509 
510 	ret = 0;
511 exit:
512 	deflateEnd(&stream);
513 	rte_free(single_src_buf);
514 	rte_free(single_dst_buf);
515 
516 	return ret;
517 }
518 
519 static int
520 decompress_zlib(struct rte_comp_op *op,
521 		const struct rte_comp_xform *xform)
522 {
523 	z_stream stream;
524 	int window_bits;
525 	int zlib_flush;
526 	int ret = TEST_FAILED;
527 	uint8_t *single_src_buf = NULL;
528 	uint8_t *single_dst_buf = NULL;
529 
530 	/* initialize zlib stream */
531 	stream.zalloc = Z_NULL;
532 	stream.zfree = Z_NULL;
533 	stream.opaque = Z_NULL;
534 
535 	/*
536 	 * Window bits is the base two logarithm of the window size (in bytes).
537 	 * When doing raw DEFLATE, this number will be negative.
538 	 */
539 	window_bits = -(xform->decompress.window_size);
540 	ret = inflateInit2(&stream, window_bits);
541 
542 	if (ret != Z_OK) {
543 		printf("Zlib deflate could not be initialized\n");
544 		goto exit;
545 	}
546 
547 	/* Assuming stateless operation */
548 	/* SGL */
549 	if (op->m_src->nb_segs > 1) {
550 		single_src_buf = rte_malloc(NULL,
551 				rte_pktmbuf_pkt_len(op->m_src), 0);
552 		if (single_src_buf == NULL) {
553 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
554 			goto exit;
555 		}
556 		single_dst_buf = rte_malloc(NULL,
557 				rte_pktmbuf_pkt_len(op->m_dst), 0);
558 		if (single_dst_buf == NULL) {
559 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
560 			goto exit;
561 		}
562 		if (rte_pktmbuf_read(op->m_src, 0,
563 					rte_pktmbuf_pkt_len(op->m_src),
564 					single_src_buf) == NULL) {
565 			RTE_LOG(ERR, USER1,
566 				"Buffer could not be read entirely\n");
567 			goto exit;
568 		}
569 
570 		stream.avail_in = op->src.length;
571 		stream.next_in = single_src_buf;
572 		stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
573 		stream.next_out = single_dst_buf;
574 
575 	} else {
576 		stream.avail_in = op->src.length;
577 		stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
578 		stream.avail_out = op->m_dst->data_len;
579 		stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
580 	}
581 
582 	/* Stateless operation, all buffer will be compressed in one go */
583 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
584 	ret = inflate(&stream, zlib_flush);
585 
586 	if (stream.avail_in != 0) {
587 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
588 		goto exit;
589 	}
590 
591 	if (ret != Z_STREAM_END)
592 		goto exit;
593 
594 	if (op->m_src->nb_segs > 1) {
595 		uint32_t remaining_data = stream.total_out;
596 		uint8_t *src_data = single_dst_buf;
597 		struct rte_mbuf *dst_buf = op->m_dst;
598 
599 		while (remaining_data > 0) {
600 			uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
601 					uint8_t *);
602 			/* Last segment */
603 			if (remaining_data < dst_buf->data_len) {
604 				memcpy(dst_data, src_data, remaining_data);
605 				remaining_data = 0;
606 			} else {
607 				memcpy(dst_data, src_data, dst_buf->data_len);
608 				remaining_data -= dst_buf->data_len;
609 				src_data += dst_buf->data_len;
610 				dst_buf = dst_buf->next;
611 			}
612 		}
613 	}
614 
615 	op->consumed = stream.total_in;
616 	op->produced = stream.total_out;
617 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
618 
619 	inflateReset(&stream);
620 
621 	ret = 0;
622 exit:
623 	inflateEnd(&stream);
624 
625 	return ret;
626 }
627 
628 static int
629 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
630 		uint32_t total_data_size,
631 		struct rte_mempool *small_mbuf_pool,
632 		struct rte_mempool *large_mbuf_pool,
633 		uint8_t limit_segs_in_sgl,
634 		uint16_t seg_size)
635 {
636 	uint32_t remaining_data = total_data_size;
637 	uint16_t num_remaining_segs = DIV_CEIL(remaining_data, seg_size);
638 	struct rte_mempool *pool;
639 	struct rte_mbuf *next_seg;
640 	uint32_t data_size;
641 	char *buf_ptr;
642 	const char *data_ptr = test_buf;
643 	uint16_t i;
644 	int ret;
645 
646 	if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
647 		num_remaining_segs = limit_segs_in_sgl - 1;
648 
649 	/*
650 	 * Allocate data in the first segment (header) and
651 	 * copy data if test buffer is provided
652 	 */
653 	if (remaining_data < seg_size)
654 		data_size = remaining_data;
655 	else
656 		data_size = seg_size;
657 	buf_ptr = rte_pktmbuf_append(head_buf, data_size);
658 	if (buf_ptr == NULL) {
659 		RTE_LOG(ERR, USER1,
660 			"Not enough space in the 1st buffer\n");
661 		return -1;
662 	}
663 
664 	if (data_ptr != NULL) {
665 		/* Copy characters without NULL terminator */
666 		strncpy(buf_ptr, data_ptr, data_size);
667 		data_ptr += data_size;
668 	}
669 	remaining_data -= data_size;
670 	num_remaining_segs--;
671 
672 	/*
673 	 * Allocate the rest of the segments,
674 	 * copy the rest of the data and chain the segments.
675 	 */
676 	for (i = 0; i < num_remaining_segs; i++) {
677 
678 		if (i == (num_remaining_segs - 1)) {
679 			/* last segment */
680 			if (remaining_data > seg_size)
681 				pool = large_mbuf_pool;
682 			else
683 				pool = small_mbuf_pool;
684 			data_size = remaining_data;
685 		} else {
686 			data_size = seg_size;
687 			pool = small_mbuf_pool;
688 		}
689 
690 		next_seg = rte_pktmbuf_alloc(pool);
691 		if (next_seg == NULL) {
692 			RTE_LOG(ERR, USER1,
693 				"New segment could not be allocated "
694 				"from the mempool\n");
695 			return -1;
696 		}
697 		buf_ptr = rte_pktmbuf_append(next_seg, data_size);
698 		if (buf_ptr == NULL) {
699 			RTE_LOG(ERR, USER1,
700 				"Not enough space in the buffer\n");
701 			rte_pktmbuf_free(next_seg);
702 			return -1;
703 		}
704 		if (data_ptr != NULL) {
705 			/* Copy characters without NULL terminator */
706 			strncpy(buf_ptr, data_ptr, data_size);
707 			data_ptr += data_size;
708 		}
709 		remaining_data -= data_size;
710 
711 		ret = rte_pktmbuf_chain(head_buf, next_seg);
712 		if (ret != 0) {
713 			rte_pktmbuf_free(next_seg);
714 			RTE_LOG(ERR, USER1,
715 				"Segment could not chained\n");
716 			return -1;
717 		}
718 	}
719 
720 	return 0;
721 }
722 
723 static void
724 extbuf_free_callback(void *addr __rte_unused, void *opaque __rte_unused)
725 {
726 }
727 
728 /*
729  * Compresses and decompresses buffer with compressdev API and Zlib API
730  */
731 static int
732 test_deflate_comp_decomp(const struct interim_data_params *int_data,
733 		const struct test_data_params *test_data)
734 {
735 	struct comp_testsuite_params *ts_params = &testsuite_params;
736 	const char * const *test_bufs = int_data->test_bufs;
737 	unsigned int num_bufs = int_data->num_bufs;
738 	uint16_t *buf_idx = int_data->buf_idx;
739 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
740 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
741 	unsigned int num_xforms = int_data->num_xforms;
742 	enum rte_comp_op_type compress_state = test_data->compress_state;
743 	enum rte_comp_op_type decompress_state = test_data->decompress_state;
744 	unsigned int buff_type = test_data->buff_type;
745 	unsigned int out_of_space = test_data->out_of_space;
746 	unsigned int big_data = test_data->big_data;
747 	enum zlib_direction zlib_dir = test_data->zlib_dir;
748 	int ret_status = TEST_FAILED;
749 	struct rte_mbuf_ext_shared_info inbuf_info;
750 	struct rte_mbuf_ext_shared_info compbuf_info;
751 	struct rte_mbuf_ext_shared_info decompbuf_info;
752 	int ret;
753 	struct rte_mbuf *uncomp_bufs[num_bufs];
754 	struct rte_mbuf *comp_bufs[num_bufs];
755 	struct rte_comp_op *ops[num_bufs];
756 	struct rte_comp_op *ops_processed[num_bufs];
757 	void *priv_xforms[num_bufs];
758 	uint16_t num_enqd, num_deqd, num_total_deqd;
759 	uint16_t num_priv_xforms = 0;
760 	unsigned int deqd_retries = 0;
761 	struct priv_op_data *priv_data;
762 	char *buf_ptr;
763 	unsigned int i;
764 	struct rte_mempool *buf_pool;
765 	uint32_t data_size;
766 	/* Compressing with CompressDev */
767 	unsigned int oos_zlib_decompress =
768 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
769 	/* Decompressing with CompressDev */
770 	unsigned int oos_zlib_compress =
771 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
772 	const struct rte_compressdev_capabilities *capa =
773 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
774 	char *contig_buf = NULL;
775 	uint64_t compress_checksum[num_bufs];
776 	void *stream = NULL;
777 	char *all_decomp_data = NULL;
778 	unsigned int decomp_produced_data_size = 0;
779 	unsigned int step = 0;
780 
781 	TEST_ASSERT(decompress_state == RTE_COMP_OP_STATELESS || num_bufs == 1,
782 		    "Number of stateful operations in a step should be 1");
783 
784 	if (capa == NULL) {
785 		RTE_LOG(ERR, USER1,
786 			"Compress device does not support DEFLATE\n");
787 		return -ENOTSUP;
788 	}
789 
790 	/* Initialize all arrays to NULL */
791 	memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
792 	memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
793 	memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
794 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
795 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
796 
797 	if (decompress_state == RTE_COMP_OP_STATEFUL) {
798 		data_size = strlen(test_bufs[0]) + 1;
799 		all_decomp_data = rte_malloc(NULL, data_size,
800 					     RTE_CACHE_LINE_SIZE);
801 	}
802 
803 	if (big_data)
804 		buf_pool = ts_params->big_mbuf_pool;
805 	else if (buff_type == SGL_BOTH)
806 		buf_pool = ts_params->small_mbuf_pool;
807 	else
808 		buf_pool = ts_params->large_mbuf_pool;
809 
810 	/* Prepare the source mbufs with the data */
811 	ret = rte_pktmbuf_alloc_bulk(buf_pool,
812 				uncomp_bufs, num_bufs);
813 	if (ret < 0) {
814 		RTE_LOG(ERR, USER1,
815 			"Source mbufs could not be allocated "
816 			"from the mempool\n");
817 		goto exit;
818 	}
819 
820 	if (test_data->use_external_mbufs) {
821 		inbuf_info.free_cb = extbuf_free_callback;
822 		inbuf_info.fcb_opaque = NULL;
823 		rte_mbuf_ext_refcnt_set(&inbuf_info, 1);
824 		for (i = 0; i < num_bufs; i++) {
825 			rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
826 					test_data->inbuf_memzone->addr,
827 					test_data->inbuf_memzone->iova,
828 					test_data->inbuf_data_size,
829 					&inbuf_info);
830 			rte_pktmbuf_append(uncomp_bufs[i],
831 					test_data->inbuf_data_size);
832 		}
833 	} else if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
834 		for (i = 0; i < num_bufs; i++) {
835 			data_size = strlen(test_bufs[i]) + 1;
836 			if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
837 			    data_size,
838 			    big_data ? buf_pool : ts_params->small_mbuf_pool,
839 			    big_data ? buf_pool : ts_params->large_mbuf_pool,
840 			    big_data ? 0 : MAX_SEGS,
841 			    big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE) < 0)
842 				goto exit;
843 		}
844 	} else {
845 		for (i = 0; i < num_bufs; i++) {
846 			data_size = strlen(test_bufs[i]) + 1;
847 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
848 			strlcpy(buf_ptr, test_bufs[i], data_size);
849 		}
850 	}
851 
852 	/* Prepare the destination mbufs */
853 	ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
854 	if (ret < 0) {
855 		RTE_LOG(ERR, USER1,
856 			"Destination mbufs could not be allocated "
857 			"from the mempool\n");
858 		goto exit;
859 	}
860 
861 	if (test_data->use_external_mbufs) {
862 		compbuf_info.free_cb = extbuf_free_callback;
863 		compbuf_info.fcb_opaque = NULL;
864 		rte_mbuf_ext_refcnt_set(&compbuf_info, 1);
865 		for (i = 0; i < num_bufs; i++) {
866 			rte_pktmbuf_attach_extbuf(comp_bufs[i],
867 					test_data->compbuf_memzone->addr,
868 					test_data->compbuf_memzone->iova,
869 					test_data->compbuf_memzone->len,
870 					&compbuf_info);
871 			rte_pktmbuf_append(comp_bufs[i],
872 					test_data->compbuf_memzone->len);
873 		}
874 	} else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
875 		for (i = 0; i < num_bufs; i++) {
876 			if (out_of_space == 1 && oos_zlib_decompress)
877 				data_size = OUT_OF_SPACE_BUF;
878 			else
879 				(data_size = strlen(test_bufs[i]) *
880 					COMPRESS_BUF_SIZE_RATIO);
881 
882 			if (prepare_sgl_bufs(NULL, comp_bufs[i],
883 			      data_size,
884 			      big_data ? buf_pool : ts_params->small_mbuf_pool,
885 			      big_data ? buf_pool : ts_params->large_mbuf_pool,
886 			      big_data ? 0 : MAX_SEGS,
887 			      big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
888 					< 0)
889 				goto exit;
890 		}
891 
892 	} else {
893 		for (i = 0; i < num_bufs; i++) {
894 			if (out_of_space == 1 && oos_zlib_decompress)
895 				data_size = OUT_OF_SPACE_BUF;
896 			else
897 				(data_size = strlen(test_bufs[i]) *
898 					COMPRESS_BUF_SIZE_RATIO);
899 
900 			rte_pktmbuf_append(comp_bufs[i], data_size);
901 		}
902 	}
903 
904 	/* Build the compression operations */
905 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
906 	if (ret < 0) {
907 		RTE_LOG(ERR, USER1,
908 			"Compress operations could not be allocated "
909 			"from the mempool\n");
910 		goto exit;
911 	}
912 
913 
914 	for (i = 0; i < num_bufs; i++) {
915 		ops[i]->m_src = uncomp_bufs[i];
916 		ops[i]->m_dst = comp_bufs[i];
917 		ops[i]->src.offset = 0;
918 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
919 		ops[i]->dst.offset = 0;
920 		if (compress_state == RTE_COMP_OP_STATELESS)
921 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
922 		else {
923 			RTE_LOG(ERR, USER1,
924 				"Stateful operations are not supported "
925 				"in these tests yet\n");
926 			goto exit;
927 		}
928 		ops[i]->input_chksum = 0;
929 		/*
930 		 * Store original operation index in private data,
931 		 * since ordering does not have to be maintained,
932 		 * when dequeueing from compressdev, so a comparison
933 		 * at the end of the test can be done.
934 		 */
935 		priv_data = (struct priv_op_data *) (ops[i] + 1);
936 		priv_data->orig_idx = i;
937 	}
938 
939 	/* Compress data (either with Zlib API or compressdev API */
940 	if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
941 		for (i = 0; i < num_bufs; i++) {
942 			const struct rte_comp_xform *compress_xform =
943 				compress_xforms[i % num_xforms];
944 			ret = compress_zlib(ops[i], compress_xform,
945 					DEFAULT_MEM_LEVEL);
946 			if (ret < 0)
947 				goto exit;
948 
949 			ops_processed[i] = ops[i];
950 		}
951 	} else {
952 		/* Create compress private xform data */
953 		for (i = 0; i < num_xforms; i++) {
954 			ret = rte_compressdev_private_xform_create(0,
955 				(const struct rte_comp_xform *)compress_xforms[i],
956 				&priv_xforms[i]);
957 			if (ret < 0) {
958 				RTE_LOG(ERR, USER1,
959 					"Compression private xform "
960 					"could not be created\n");
961 				goto exit;
962 			}
963 			num_priv_xforms++;
964 		}
965 
966 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
967 			/* Attach shareable private xform data to ops */
968 			for (i = 0; i < num_bufs; i++)
969 				ops[i]->private_xform = priv_xforms[i % num_xforms];
970 		} else {
971 			/* Create rest of the private xforms for the other ops */
972 			for (i = num_xforms; i < num_bufs; i++) {
973 				ret = rte_compressdev_private_xform_create(0,
974 					compress_xforms[i % num_xforms],
975 					&priv_xforms[i]);
976 				if (ret < 0) {
977 					RTE_LOG(ERR, USER1,
978 						"Compression private xform "
979 						"could not be created\n");
980 					goto exit;
981 				}
982 				num_priv_xforms++;
983 			}
984 
985 			/* Attach non shareable private xform data to ops */
986 			for (i = 0; i < num_bufs; i++)
987 				ops[i]->private_xform = priv_xforms[i];
988 		}
989 
990 		/* Enqueue and dequeue all operations */
991 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
992 		if (num_enqd < num_bufs) {
993 			RTE_LOG(ERR, USER1,
994 				"The operations could not be enqueued\n");
995 			goto exit;
996 		}
997 
998 		num_total_deqd = 0;
999 		do {
1000 			/*
1001 			 * If retrying a dequeue call, wait for 10 ms to allow
1002 			 * enough time to the driver to process the operations
1003 			 */
1004 			if (deqd_retries != 0) {
1005 				/*
1006 				 * Avoid infinite loop if not all the
1007 				 * operations get out of the device
1008 				 */
1009 				if (deqd_retries == MAX_DEQD_RETRIES) {
1010 					RTE_LOG(ERR, USER1,
1011 						"Not all operations could be "
1012 						"dequeued\n");
1013 					goto exit;
1014 				}
1015 				usleep(DEQUEUE_WAIT_TIME);
1016 			}
1017 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
1018 					&ops_processed[num_total_deqd], num_bufs);
1019 			num_total_deqd += num_deqd;
1020 			deqd_retries++;
1021 
1022 		} while (num_total_deqd < num_enqd);
1023 
1024 		deqd_retries = 0;
1025 
1026 		/* Free compress private xforms */
1027 		for (i = 0; i < num_priv_xforms; i++) {
1028 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1029 			priv_xforms[i] = NULL;
1030 		}
1031 		num_priv_xforms = 0;
1032 	}
1033 
1034 	for (i = 0; i < num_bufs; i++) {
1035 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1036 		uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1037 		const struct rte_comp_compress_xform *compress_xform =
1038 				&compress_xforms[xform_idx]->compress;
1039 		enum rte_comp_huffman huffman_type =
1040 			compress_xform->deflate.huffman;
1041 		char engine[] = "zlib (directly, not PMD)";
1042 		if (zlib_dir != ZLIB_COMPRESS && zlib_dir != ZLIB_ALL)
1043 			strlcpy(engine, "PMD", sizeof(engine));
1044 
1045 		RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
1046 			" %u bytes (level = %d, huffman = %s)\n",
1047 			buf_idx[priv_data->orig_idx], engine,
1048 			ops_processed[i]->consumed, ops_processed[i]->produced,
1049 			compress_xform->level,
1050 			huffman_type_strings[huffman_type]);
1051 		RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
1052 			ops_processed[i]->consumed == 0 ? 0 :
1053 			(float)ops_processed[i]->produced /
1054 			ops_processed[i]->consumed * 100);
1055 		if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
1056 			compress_checksum[i] = ops_processed[i]->output_chksum;
1057 		ops[i] = NULL;
1058 	}
1059 
1060 	/*
1061 	 * Check operation status and free source mbufs (destination mbuf and
1062 	 * compress operation information is needed for the decompression stage)
1063 	 */
1064 	for (i = 0; i < num_bufs; i++) {
1065 		if (out_of_space && oos_zlib_decompress) {
1066 			if (ops_processed[i]->status !=
1067 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1068 				ret_status = TEST_FAILED;
1069 				RTE_LOG(ERR, USER1,
1070 					"Operation without expected out of "
1071 					"space status error\n");
1072 				goto exit;
1073 			} else
1074 				continue;
1075 		}
1076 
1077 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1078 			RTE_LOG(ERR, USER1,
1079 				"Some operations were not successful\n");
1080 			goto exit;
1081 		}
1082 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1083 		rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
1084 		uncomp_bufs[priv_data->orig_idx] = NULL;
1085 	}
1086 
1087 	if (out_of_space && oos_zlib_decompress) {
1088 		ret_status = TEST_SUCCESS;
1089 		goto exit;
1090 	}
1091 
1092 	/* Allocate buffers for decompressed data */
1093 	ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1094 	if (ret < 0) {
1095 		RTE_LOG(ERR, USER1,
1096 			"Destination mbufs could not be allocated "
1097 			"from the mempool\n");
1098 		goto exit;
1099 	}
1100 
1101 	if (test_data->use_external_mbufs) {
1102 		decompbuf_info.free_cb = extbuf_free_callback;
1103 		decompbuf_info.fcb_opaque = NULL;
1104 		rte_mbuf_ext_refcnt_set(&decompbuf_info, 1);
1105 		for (i = 0; i < num_bufs; i++) {
1106 			rte_pktmbuf_attach_extbuf(uncomp_bufs[i],
1107 					test_data->uncompbuf_memzone->addr,
1108 					test_data->uncompbuf_memzone->iova,
1109 					test_data->uncompbuf_memzone->len,
1110 					&decompbuf_info);
1111 			rte_pktmbuf_append(uncomp_bufs[i],
1112 					test_data->uncompbuf_memzone->len);
1113 		}
1114 	} else if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1115 		for (i = 0; i < num_bufs; i++) {
1116 			priv_data = (struct priv_op_data *)
1117 					(ops_processed[i] + 1);
1118 			if (out_of_space == 1 && oos_zlib_compress)
1119 				data_size = OUT_OF_SPACE_BUF;
1120 			else if (test_data->decompress_output_block_size != 0)
1121 				data_size =
1122 					test_data->decompress_output_block_size;
1123 			else
1124 				data_size =
1125 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1126 
1127 			if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1128 			       data_size,
1129 			       big_data ? buf_pool : ts_params->small_mbuf_pool,
1130 			       big_data ? buf_pool : ts_params->large_mbuf_pool,
1131 			       big_data ? 0 : MAX_SEGS,
1132 			       big_data ? MAX_DATA_MBUF_SIZE : SMALL_SEG_SIZE)
1133 					< 0)
1134 				goto exit;
1135 		}
1136 
1137 	} else {
1138 		for (i = 0; i < num_bufs; i++) {
1139 			priv_data = (struct priv_op_data *)
1140 					(ops_processed[i] + 1);
1141 			if (out_of_space == 1 && oos_zlib_compress)
1142 				data_size = OUT_OF_SPACE_BUF;
1143 			else if (test_data->decompress_output_block_size != 0)
1144 				data_size =
1145 					test_data->decompress_output_block_size;
1146 			else
1147 				data_size =
1148 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1149 
1150 			rte_pktmbuf_append(uncomp_bufs[i], data_size);
1151 		}
1152 	}
1153 
1154 	/* Build the decompression operations */
1155 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1156 	if (ret < 0) {
1157 		RTE_LOG(ERR, USER1,
1158 			"Decompress operations could not be allocated "
1159 			"from the mempool\n");
1160 		goto exit;
1161 	}
1162 
1163 	/* Source buffer is the compressed data from the previous operations */
1164 	for (i = 0; i < num_bufs; i++) {
1165 		ops[i]->m_src = ops_processed[i]->m_dst;
1166 		ops[i]->m_dst = uncomp_bufs[i];
1167 		ops[i]->src.offset = 0;
1168 		/*
1169 		 * Set the length of the compressed data to the
1170 		 * number of bytes that were produced in the previous stage
1171 		 */
1172 		ops[i]->src.length = ops_processed[i]->produced;
1173 
1174 		ops[i]->dst.offset = 0;
1175 		if (decompress_state == RTE_COMP_OP_STATELESS) {
1176 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1177 			ops[i]->op_type = RTE_COMP_OP_STATELESS;
1178 		} else if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_NONE) {
1179 			ops[i]->flush_flag = RTE_COMP_FLUSH_SYNC;
1180 			ops[i]->op_type = RTE_COMP_OP_STATEFUL;
1181 		} else {
1182 			RTE_LOG(ERR, USER1,
1183 				"Stateful operations are not supported "
1184 				"in these tests yet\n");
1185 			goto exit;
1186 		}
1187 		ops[i]->input_chksum = 0;
1188 		/*
1189 		 * Copy private data from previous operations,
1190 		 * to keep the pointer to the original buffer
1191 		 */
1192 		memcpy(ops[i] + 1, ops_processed[i] + 1,
1193 				sizeof(struct priv_op_data));
1194 	}
1195 
1196 	/*
1197 	 * Free the previous compress operations,
1198 	 * as they are not needed anymore
1199 	 */
1200 	rte_comp_op_bulk_free(ops_processed, num_bufs);
1201 
1202 	/* Decompress data (either with Zlib API or compressdev API */
1203 	if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1204 		for (i = 0; i < num_bufs; i++) {
1205 			priv_data = (struct priv_op_data *)(ops[i] + 1);
1206 			uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1207 			const struct rte_comp_xform *decompress_xform =
1208 				decompress_xforms[xform_idx];
1209 
1210 			ret = decompress_zlib(ops[i], decompress_xform);
1211 			if (ret < 0)
1212 				goto exit;
1213 
1214 			ops_processed[i] = ops[i];
1215 		}
1216 	} else {
1217 		if (decompress_state == RTE_COMP_OP_STATELESS) {
1218 			/* Create decompress private xform data */
1219 			for (i = 0; i < num_xforms; i++) {
1220 				ret = rte_compressdev_private_xform_create(0,
1221 					(const struct rte_comp_xform *)
1222 					decompress_xforms[i],
1223 					&priv_xforms[i]);
1224 				if (ret < 0) {
1225 					RTE_LOG(ERR, USER1,
1226 						"Decompression private xform "
1227 						"could not be created\n");
1228 					goto exit;
1229 				}
1230 				num_priv_xforms++;
1231 			}
1232 
1233 			if (capa->comp_feature_flags &
1234 					RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1235 				/* Attach shareable private xform data to ops */
1236 				for (i = 0; i < num_bufs; i++) {
1237 					priv_data = (struct priv_op_data *)
1238 							(ops[i] + 1);
1239 					uint16_t xform_idx =
1240 					       priv_data->orig_idx % num_xforms;
1241 					ops[i]->private_xform =
1242 							priv_xforms[xform_idx];
1243 				}
1244 			} else {
1245 				/* Create rest of the private xforms */
1246 				/* for the other ops */
1247 				for (i = num_xforms; i < num_bufs; i++) {
1248 					ret =
1249 					 rte_compressdev_private_xform_create(0,
1250 					      decompress_xforms[i % num_xforms],
1251 					      &priv_xforms[i]);
1252 					if (ret < 0) {
1253 						RTE_LOG(ERR, USER1,
1254 							"Decompression private xform could not be created\n");
1255 						goto exit;
1256 					}
1257 					num_priv_xforms++;
1258 				}
1259 
1260 				/* Attach non shareable private xform data */
1261 				/* to ops */
1262 				for (i = 0; i < num_bufs; i++) {
1263 					priv_data = (struct priv_op_data *)
1264 							(ops[i] + 1);
1265 					uint16_t xform_idx =
1266 							priv_data->orig_idx;
1267 					ops[i]->private_xform =
1268 							priv_xforms[xform_idx];
1269 				}
1270 			}
1271 		} else {
1272 			/* Create a stream object for stateful decompression */
1273 			ret = rte_compressdev_stream_create(0,
1274 					decompress_xforms[0], &stream);
1275 			if (ret < 0) {
1276 				RTE_LOG(ERR, USER1,
1277 					"Decompression stream could not be created, error %d\n",
1278 					ret);
1279 				goto exit;
1280 			}
1281 			/* Attach stream to ops */
1282 			for (i = 0; i < num_bufs; i++)
1283 				ops[i]->stream = stream;
1284 		}
1285 
1286 next_step:
1287 		/* Enqueue and dequeue all operations */
1288 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1289 		if (num_enqd < num_bufs) {
1290 			RTE_LOG(ERR, USER1,
1291 				"The operations could not be enqueued\n");
1292 			goto exit;
1293 		}
1294 
1295 		num_total_deqd = 0;
1296 		do {
1297 			/*
1298 			 * If retrying a dequeue call, wait for 10 ms to allow
1299 			 * enough time to the driver to process the operations
1300 			 */
1301 			if (deqd_retries != 0) {
1302 				/*
1303 				 * Avoid infinite loop if not all the
1304 				 * operations get out of the device
1305 				 */
1306 				if (deqd_retries == MAX_DEQD_RETRIES) {
1307 					RTE_LOG(ERR, USER1,
1308 						"Not all operations could be "
1309 						"dequeued\n");
1310 					goto exit;
1311 				}
1312 				usleep(DEQUEUE_WAIT_TIME);
1313 			}
1314 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
1315 					&ops_processed[num_total_deqd], num_bufs);
1316 			num_total_deqd += num_deqd;
1317 			deqd_retries++;
1318 		} while (num_total_deqd < num_enqd);
1319 
1320 		deqd_retries = 0;
1321 	}
1322 
1323 	for (i = 0; i < num_bufs; i++) {
1324 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1325 		char engine[] = "zlib, (directly, no PMD)";
1326 		if (zlib_dir != ZLIB_DECOMPRESS && zlib_dir != ZLIB_ALL)
1327 			strlcpy(engine, "pmd", sizeof(engine));
1328 		RTE_LOG(DEBUG, USER1,
1329 			"Buffer %u decompressed by %s from %u to %u bytes\n",
1330 			buf_idx[priv_data->orig_idx], engine,
1331 			ops_processed[i]->consumed, ops_processed[i]->produced);
1332 		ops[i] = NULL;
1333 	}
1334 
1335 	/*
1336 	 * Check operation status and free source mbuf (destination mbuf and
1337 	 * compress operation information is still needed)
1338 	 */
1339 	for (i = 0; i < num_bufs; i++) {
1340 		if (out_of_space && oos_zlib_compress) {
1341 			if (ops_processed[i]->status !=
1342 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1343 				ret_status = TEST_FAILED;
1344 				RTE_LOG(ERR, USER1,
1345 					"Operation without expected out of "
1346 					"space status error\n");
1347 				goto exit;
1348 			} else
1349 				continue;
1350 		}
1351 
1352 		if (decompress_state == RTE_COMP_OP_STATEFUL
1353 			&& (ops_processed[i]->status ==
1354 				RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE
1355 			    || ops_processed[i]->status ==
1356 				RTE_COMP_OP_STATUS_SUCCESS)) {
1357 			/* collect the output into all_decomp_data */
1358 			const void *ptr = rte_pktmbuf_read(
1359 					ops_processed[i]->m_dst,
1360 					ops_processed[i]->dst.offset,
1361 					ops_processed[i]->produced,
1362 					all_decomp_data +
1363 						decomp_produced_data_size);
1364 			if (ptr != all_decomp_data + decomp_produced_data_size)
1365 				rte_memcpy(all_decomp_data +
1366 					   decomp_produced_data_size,
1367 					   ptr, ops_processed[i]->produced);
1368 			decomp_produced_data_size += ops_processed[i]->produced;
1369 			if (ops_processed[i]->src.length >
1370 					ops_processed[i]->consumed) {
1371 				if (ops_processed[i]->status ==
1372 						RTE_COMP_OP_STATUS_SUCCESS) {
1373 					ret_status = -1;
1374 					RTE_LOG(ERR, USER1,
1375 					      "Operation finished too early\n");
1376 					goto exit;
1377 				}
1378 				step++;
1379 				if (step >= test_data->decompress_steps_max) {
1380 					ret_status = -1;
1381 					RTE_LOG(ERR, USER1,
1382 					  "Operation exceeded maximum steps\n");
1383 					goto exit;
1384 				}
1385 				ops[i] = ops_processed[i];
1386 				ops[i]->status =
1387 					       RTE_COMP_OP_STATUS_NOT_PROCESSED;
1388 				ops[i]->src.offset +=
1389 						ops_processed[i]->consumed;
1390 				ops[i]->src.length -=
1391 						ops_processed[i]->consumed;
1392 				goto next_step;
1393 			} else {
1394 				/* Compare the original stream with the */
1395 				/* decompressed stream (in size and the data) */
1396 				priv_data = (struct priv_op_data *)
1397 						(ops_processed[i] + 1);
1398 				const char *buf1 =
1399 						test_bufs[priv_data->orig_idx];
1400 				const char *buf2 = all_decomp_data;
1401 
1402 				if (compare_buffers(buf1, strlen(buf1) + 1,
1403 					  buf2, decomp_produced_data_size) < 0)
1404 					goto exit;
1405 				/* Test checksums */
1406 				if (compress_xforms[0]->compress.chksum
1407 						!= RTE_COMP_CHECKSUM_NONE) {
1408 					if (ops_processed[i]->output_chksum
1409 						      != compress_checksum[i]) {
1410 						RTE_LOG(ERR, USER1,
1411 							"The checksums differ\n"
1412 			     "Compression Checksum: %" PRIu64 "\tDecompression "
1413 				"Checksum: %" PRIu64 "\n", compress_checksum[i],
1414 					       ops_processed[i]->output_chksum);
1415 						goto exit;
1416 					}
1417 				}
1418 			}
1419 		} else if (ops_processed[i]->status !=
1420 			   RTE_COMP_OP_STATUS_SUCCESS) {
1421 			RTE_LOG(ERR, USER1,
1422 				"Some operations were not successful\n");
1423 			goto exit;
1424 		}
1425 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1426 		rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1427 		comp_bufs[priv_data->orig_idx] = NULL;
1428 	}
1429 
1430 	if ((out_of_space && oos_zlib_compress)
1431 			|| (decompress_state == RTE_COMP_OP_STATEFUL)) {
1432 		ret_status = TEST_SUCCESS;
1433 		goto exit;
1434 	}
1435 
1436 	/*
1437 	 * Compare the original stream with the decompressed stream
1438 	 * (in size and the data)
1439 	 */
1440 	for (i = 0; i < num_bufs; i++) {
1441 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1442 		const char *buf1 = test_data->use_external_mbufs ?
1443 				test_data->inbuf_memzone->addr :
1444 				test_bufs[priv_data->orig_idx];
1445 		const char *buf2;
1446 		data_size = test_data->use_external_mbufs ?
1447 				test_data->inbuf_data_size :
1448 				strlen(buf1) + 1;
1449 		contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1450 		if (contig_buf == NULL) {
1451 			RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1452 					"be allocated\n");
1453 			goto exit;
1454 		}
1455 
1456 		buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1457 				ops_processed[i]->produced, contig_buf);
1458 		if (compare_buffers(buf1, data_size,
1459 				buf2, ops_processed[i]->produced) < 0)
1460 			goto exit;
1461 
1462 		/* Test checksums */
1463 		if (compress_xforms[0]->compress.chksum !=
1464 				RTE_COMP_CHECKSUM_NONE) {
1465 			if (ops_processed[i]->output_chksum !=
1466 					compress_checksum[i]) {
1467 				RTE_LOG(ERR, USER1, "The checksums differ\n"
1468 			"Compression Checksum: %" PRIu64 "\tDecompression "
1469 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1470 			ops_processed[i]->output_chksum);
1471 				goto exit;
1472 			}
1473 		}
1474 
1475 		rte_free(contig_buf);
1476 		contig_buf = NULL;
1477 	}
1478 
1479 	ret_status = TEST_SUCCESS;
1480 
1481 exit:
1482 	/* Free resources */
1483 	for (i = 0; i < num_bufs; i++) {
1484 		rte_pktmbuf_free(uncomp_bufs[i]);
1485 		rte_pktmbuf_free(comp_bufs[i]);
1486 		rte_comp_op_free(ops[i]);
1487 		rte_comp_op_free(ops_processed[i]);
1488 	}
1489 	for (i = 0; i < num_priv_xforms; i++)
1490 		if (priv_xforms[i] != NULL)
1491 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1492 	if (stream != NULL)
1493 		rte_compressdev_stream_free(0, stream);
1494 	if (all_decomp_data != NULL)
1495 		rte_free(all_decomp_data);
1496 	rte_free(contig_buf);
1497 
1498 	return ret_status;
1499 }
1500 
1501 static int
1502 test_compressdev_deflate_stateless_fixed(void)
1503 {
1504 	struct comp_testsuite_params *ts_params = &testsuite_params;
1505 	uint16_t i;
1506 	int ret;
1507 	const struct rte_compressdev_capabilities *capab;
1508 
1509 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1510 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1511 
1512 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1513 		return -ENOTSUP;
1514 
1515 	struct rte_comp_xform *compress_xform =
1516 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1517 
1518 	if (compress_xform == NULL) {
1519 		RTE_LOG(ERR, USER1,
1520 			"Compress xform could not be created\n");
1521 		ret = TEST_FAILED;
1522 		goto exit;
1523 	}
1524 
1525 	memcpy(compress_xform, ts_params->def_comp_xform,
1526 			sizeof(struct rte_comp_xform));
1527 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1528 
1529 	struct interim_data_params int_data = {
1530 		NULL,
1531 		1,
1532 		NULL,
1533 		&compress_xform,
1534 		&ts_params->def_decomp_xform,
1535 		1
1536 	};
1537 
1538 	struct test_data_params test_data = {
1539 		.compress_state = RTE_COMP_OP_STATELESS,
1540 		.decompress_state = RTE_COMP_OP_STATELESS,
1541 		.buff_type = LB_BOTH,
1542 		.zlib_dir = ZLIB_DECOMPRESS,
1543 		.out_of_space = 0,
1544 		.big_data = 0
1545 	};
1546 
1547 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1548 		int_data.test_bufs = &compress_test_bufs[i];
1549 		int_data.buf_idx = &i;
1550 
1551 		/* Compress with compressdev, decompress with Zlib */
1552 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1553 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1554 		if (ret < 0)
1555 			goto exit;
1556 
1557 		/* Compress with Zlib, decompress with compressdev */
1558 		test_data.zlib_dir = ZLIB_COMPRESS;
1559 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1560 		if (ret < 0)
1561 			goto exit;
1562 	}
1563 
1564 	ret = TEST_SUCCESS;
1565 
1566 exit:
1567 	rte_free(compress_xform);
1568 	return ret;
1569 }
1570 
1571 static int
1572 test_compressdev_deflate_stateless_dynamic(void)
1573 {
1574 	struct comp_testsuite_params *ts_params = &testsuite_params;
1575 	uint16_t i;
1576 	int ret;
1577 	struct rte_comp_xform *compress_xform =
1578 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1579 
1580 	const struct rte_compressdev_capabilities *capab;
1581 
1582 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1583 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1584 
1585 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1586 		return -ENOTSUP;
1587 
1588 	if (compress_xform == NULL) {
1589 		RTE_LOG(ERR, USER1,
1590 			"Compress xform could not be created\n");
1591 		ret = TEST_FAILED;
1592 		goto exit;
1593 	}
1594 
1595 	memcpy(compress_xform, ts_params->def_comp_xform,
1596 			sizeof(struct rte_comp_xform));
1597 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1598 
1599 	struct interim_data_params int_data = {
1600 		NULL,
1601 		1,
1602 		NULL,
1603 		&compress_xform,
1604 		&ts_params->def_decomp_xform,
1605 		1
1606 	};
1607 
1608 	struct test_data_params test_data = {
1609 		.compress_state = RTE_COMP_OP_STATELESS,
1610 		.decompress_state = RTE_COMP_OP_STATELESS,
1611 		.buff_type = LB_BOTH,
1612 		.zlib_dir = ZLIB_DECOMPRESS,
1613 		.out_of_space = 0,
1614 		.big_data = 0
1615 	};
1616 
1617 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1618 		int_data.test_bufs = &compress_test_bufs[i];
1619 		int_data.buf_idx = &i;
1620 
1621 		/* Compress with compressdev, decompress with Zlib */
1622 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1623 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1624 		if (ret < 0)
1625 			goto exit;
1626 
1627 		/* Compress with Zlib, decompress with compressdev */
1628 		test_data.zlib_dir = ZLIB_COMPRESS;
1629 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1630 		if (ret < 0)
1631 			goto exit;
1632 	}
1633 
1634 	ret = TEST_SUCCESS;
1635 
1636 exit:
1637 	rte_free(compress_xform);
1638 	return ret;
1639 }
1640 
1641 static int
1642 test_compressdev_deflate_stateless_multi_op(void)
1643 {
1644 	struct comp_testsuite_params *ts_params = &testsuite_params;
1645 	uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1646 	uint16_t buf_idx[num_bufs];
1647 	uint16_t i;
1648 	int ret;
1649 
1650 	for (i = 0; i < num_bufs; i++)
1651 		buf_idx[i] = i;
1652 
1653 	struct interim_data_params int_data = {
1654 		compress_test_bufs,
1655 		num_bufs,
1656 		buf_idx,
1657 		&ts_params->def_comp_xform,
1658 		&ts_params->def_decomp_xform,
1659 		1
1660 	};
1661 
1662 	struct test_data_params test_data = {
1663 		.compress_state = RTE_COMP_OP_STATELESS,
1664 		.decompress_state = RTE_COMP_OP_STATELESS,
1665 		.buff_type = LB_BOTH,
1666 		.zlib_dir = ZLIB_DECOMPRESS,
1667 		.out_of_space = 0,
1668 		.big_data = 0
1669 	};
1670 
1671 	/* Compress with compressdev, decompress with Zlib */
1672 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1673 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1674 	if (ret < 0)
1675 		return ret;
1676 
1677 	/* Compress with Zlib, decompress with compressdev */
1678 	test_data.zlib_dir = ZLIB_COMPRESS;
1679 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1680 	if (ret < 0)
1681 		return ret;
1682 
1683 	return TEST_SUCCESS;
1684 }
1685 
1686 static int
1687 test_compressdev_deflate_stateless_multi_level(void)
1688 {
1689 	struct comp_testsuite_params *ts_params = &testsuite_params;
1690 	unsigned int level;
1691 	uint16_t i;
1692 	int ret;
1693 	struct rte_comp_xform *compress_xform =
1694 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1695 
1696 	if (compress_xform == NULL) {
1697 		RTE_LOG(ERR, USER1,
1698 			"Compress xform could not be created\n");
1699 		ret = TEST_FAILED;
1700 		goto exit;
1701 	}
1702 
1703 	memcpy(compress_xform, ts_params->def_comp_xform,
1704 			sizeof(struct rte_comp_xform));
1705 
1706 	struct interim_data_params int_data = {
1707 		NULL,
1708 		1,
1709 		NULL,
1710 		&compress_xform,
1711 		&ts_params->def_decomp_xform,
1712 		1
1713 	};
1714 
1715 	struct test_data_params test_data = {
1716 		.compress_state = RTE_COMP_OP_STATELESS,
1717 		.decompress_state = RTE_COMP_OP_STATELESS,
1718 		.buff_type = LB_BOTH,
1719 		.zlib_dir = ZLIB_DECOMPRESS,
1720 		.out_of_space = 0,
1721 		.big_data = 0
1722 	};
1723 
1724 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1725 		int_data.test_bufs = &compress_test_bufs[i];
1726 		int_data.buf_idx = &i;
1727 
1728 		for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1729 				level++) {
1730 			compress_xform->compress.level = level;
1731 			/* Compress with compressdev, decompress with Zlib */
1732 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1733 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1734 			if (ret < 0)
1735 				goto exit;
1736 		}
1737 	}
1738 
1739 	ret = TEST_SUCCESS;
1740 
1741 exit:
1742 	rte_free(compress_xform);
1743 	return ret;
1744 }
1745 
1746 #define NUM_XFORMS 3
1747 static int
1748 test_compressdev_deflate_stateless_multi_xform(void)
1749 {
1750 	struct comp_testsuite_params *ts_params = &testsuite_params;
1751 	uint16_t num_bufs = NUM_XFORMS;
1752 	struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1753 	struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1754 	const char *test_buffers[NUM_XFORMS];
1755 	uint16_t i;
1756 	unsigned int level = RTE_COMP_LEVEL_MIN;
1757 	uint16_t buf_idx[num_bufs];
1758 	int ret;
1759 
1760 	/* Create multiple xforms with various levels */
1761 	for (i = 0; i < NUM_XFORMS; i++) {
1762 		compress_xforms[i] = rte_malloc(NULL,
1763 				sizeof(struct rte_comp_xform), 0);
1764 		if (compress_xforms[i] == NULL) {
1765 			RTE_LOG(ERR, USER1,
1766 				"Compress xform could not be created\n");
1767 			ret = TEST_FAILED;
1768 			goto exit;
1769 		}
1770 
1771 		memcpy(compress_xforms[i], ts_params->def_comp_xform,
1772 				sizeof(struct rte_comp_xform));
1773 		compress_xforms[i]->compress.level = level;
1774 		level++;
1775 
1776 		decompress_xforms[i] = rte_malloc(NULL,
1777 				sizeof(struct rte_comp_xform), 0);
1778 		if (decompress_xforms[i] == NULL) {
1779 			RTE_LOG(ERR, USER1,
1780 				"Decompress xform could not be created\n");
1781 			ret = TEST_FAILED;
1782 			goto exit;
1783 		}
1784 
1785 		memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1786 				sizeof(struct rte_comp_xform));
1787 	}
1788 
1789 	for (i = 0; i < NUM_XFORMS; i++) {
1790 		buf_idx[i] = 0;
1791 		/* Use the same buffer in all sessions */
1792 		test_buffers[i] = compress_test_bufs[0];
1793 	}
1794 
1795 	struct interim_data_params int_data = {
1796 		test_buffers,
1797 		num_bufs,
1798 		buf_idx,
1799 		compress_xforms,
1800 		decompress_xforms,
1801 		NUM_XFORMS
1802 	};
1803 
1804 	struct test_data_params test_data = {
1805 		.compress_state = RTE_COMP_OP_STATELESS,
1806 		.decompress_state = RTE_COMP_OP_STATELESS,
1807 		.buff_type = LB_BOTH,
1808 		.zlib_dir = ZLIB_DECOMPRESS,
1809 		.out_of_space = 0,
1810 		.big_data = 0
1811 	};
1812 
1813 	/* Compress with compressdev, decompress with Zlib */
1814 	ret = test_deflate_comp_decomp(&int_data, &test_data);
1815 	if (ret < 0)
1816 		goto exit;
1817 
1818 	ret = TEST_SUCCESS;
1819 
1820 exit:
1821 	for (i = 0; i < NUM_XFORMS; i++) {
1822 		rte_free(compress_xforms[i]);
1823 		rte_free(decompress_xforms[i]);
1824 	}
1825 
1826 	return ret;
1827 }
1828 
1829 static int
1830 test_compressdev_deflate_stateless_sgl(void)
1831 {
1832 	struct comp_testsuite_params *ts_params = &testsuite_params;
1833 	uint16_t i;
1834 	int ret;
1835 	const struct rte_compressdev_capabilities *capab;
1836 
1837 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1838 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1839 
1840 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1841 		return -ENOTSUP;
1842 
1843 	struct interim_data_params int_data = {
1844 		NULL,
1845 		1,
1846 		NULL,
1847 		&ts_params->def_comp_xform,
1848 		&ts_params->def_decomp_xform,
1849 		1
1850 	};
1851 
1852 	struct test_data_params test_data = {
1853 		.compress_state = RTE_COMP_OP_STATELESS,
1854 		.decompress_state = RTE_COMP_OP_STATELESS,
1855 		.buff_type = SGL_BOTH,
1856 		.zlib_dir = ZLIB_DECOMPRESS,
1857 		.out_of_space = 0,
1858 		.big_data = 0
1859 	};
1860 
1861 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1862 		int_data.test_bufs = &compress_test_bufs[i];
1863 		int_data.buf_idx = &i;
1864 
1865 		/* Compress with compressdev, decompress with Zlib */
1866 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1867 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1868 		if (ret < 0)
1869 			return ret;
1870 
1871 		/* Compress with Zlib, decompress with compressdev */
1872 		test_data.zlib_dir = ZLIB_COMPRESS;
1873 		ret = test_deflate_comp_decomp(&int_data, &test_data);
1874 		if (ret < 0)
1875 			return ret;
1876 
1877 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1878 			/* Compress with compressdev, decompress with Zlib */
1879 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1880 			test_data.buff_type = SGL_TO_LB;
1881 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1882 			if (ret < 0)
1883 				return ret;
1884 
1885 			/* Compress with Zlib, decompress with compressdev */
1886 			test_data.zlib_dir = ZLIB_COMPRESS;
1887 			test_data.buff_type = SGL_TO_LB;
1888 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1889 			if (ret < 0)
1890 				return ret;
1891 		}
1892 
1893 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1894 			/* Compress with compressdev, decompress with Zlib */
1895 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1896 			test_data.buff_type = LB_TO_SGL;
1897 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1898 			if (ret < 0)
1899 				return ret;
1900 
1901 			/* Compress with Zlib, decompress with compressdev */
1902 			test_data.zlib_dir = ZLIB_COMPRESS;
1903 			test_data.buff_type = LB_TO_SGL;
1904 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1905 			if (ret < 0)
1906 				return ret;
1907 		}
1908 	}
1909 
1910 	return TEST_SUCCESS;
1911 }
1912 
1913 static int
1914 test_compressdev_deflate_stateless_checksum(void)
1915 {
1916 	struct comp_testsuite_params *ts_params = &testsuite_params;
1917 	uint16_t i;
1918 	int ret;
1919 	const struct rte_compressdev_capabilities *capab;
1920 
1921 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1922 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1923 
1924 	/* Check if driver supports any checksum */
1925 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1926 			(capab->comp_feature_flags &
1927 			RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1928 			(capab->comp_feature_flags &
1929 			RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1930 		return -ENOTSUP;
1931 
1932 	struct rte_comp_xform *compress_xform =
1933 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1934 	if (compress_xform == NULL) {
1935 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1936 		return TEST_FAILED;
1937 	}
1938 
1939 	memcpy(compress_xform, ts_params->def_comp_xform,
1940 			sizeof(struct rte_comp_xform));
1941 
1942 	struct rte_comp_xform *decompress_xform =
1943 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1944 	if (decompress_xform == NULL) {
1945 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1946 		rte_free(compress_xform);
1947 		return TEST_FAILED;
1948 	}
1949 
1950 	memcpy(decompress_xform, ts_params->def_decomp_xform,
1951 			sizeof(struct rte_comp_xform));
1952 
1953 	struct interim_data_params int_data = {
1954 		NULL,
1955 		1,
1956 		NULL,
1957 		&compress_xform,
1958 		&decompress_xform,
1959 		1
1960 	};
1961 
1962 	struct test_data_params test_data = {
1963 		.compress_state = RTE_COMP_OP_STATELESS,
1964 		.decompress_state = RTE_COMP_OP_STATELESS,
1965 		.buff_type = LB_BOTH,
1966 		.zlib_dir = ZLIB_DECOMPRESS,
1967 		.out_of_space = 0,
1968 		.big_data = 0
1969 	};
1970 
1971 	/* Check if driver supports crc32 checksum and test */
1972 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1973 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1974 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1975 
1976 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1977 			/* Compress with compressdev, decompress with Zlib */
1978 			int_data.test_bufs = &compress_test_bufs[i];
1979 			int_data.buf_idx = &i;
1980 
1981 			/* Generate zlib checksum and test against selected
1982 			 * drivers decompression checksum
1983 			 */
1984 			test_data.zlib_dir = ZLIB_COMPRESS;
1985 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1986 			if (ret < 0)
1987 				goto exit;
1988 
1989 			/* Generate compression and decompression
1990 			 * checksum of selected driver
1991 			 */
1992 			test_data.zlib_dir = ZLIB_NONE;
1993 			ret = test_deflate_comp_decomp(&int_data, &test_data);
1994 			if (ret < 0)
1995 				goto exit;
1996 		}
1997 	}
1998 
1999 	/* Check if driver supports adler32 checksum and test */
2000 	if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
2001 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2002 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2003 
2004 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2005 			int_data.test_bufs = &compress_test_bufs[i];
2006 			int_data.buf_idx = &i;
2007 
2008 			/* Generate zlib checksum and test against selected
2009 			 * drivers decompression checksum
2010 			 */
2011 			test_data.zlib_dir = ZLIB_COMPRESS;
2012 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2013 			if (ret < 0)
2014 				goto exit;
2015 			/* Generate compression and decompression
2016 			 * checksum of selected driver
2017 			 */
2018 			test_data.zlib_dir = ZLIB_NONE;
2019 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2020 			if (ret < 0)
2021 				goto exit;
2022 		}
2023 	}
2024 
2025 	/* Check if driver supports combined crc and adler checksum and test */
2026 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
2027 		compress_xform->compress.chksum =
2028 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2029 		decompress_xform->decompress.chksum =
2030 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2031 
2032 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2033 			int_data.test_bufs = &compress_test_bufs[i];
2034 			int_data.buf_idx = &i;
2035 
2036 			/* Generate compression and decompression
2037 			 * checksum of selected driver
2038 			 */
2039 			test_data.zlib_dir = ZLIB_NONE;
2040 			ret = test_deflate_comp_decomp(&int_data, &test_data);
2041 			if (ret < 0)
2042 				goto exit;
2043 		}
2044 	}
2045 
2046 	ret = TEST_SUCCESS;
2047 
2048 exit:
2049 	rte_free(compress_xform);
2050 	rte_free(decompress_xform);
2051 	return ret;
2052 }
2053 
2054 static int
2055 test_compressdev_out_of_space_buffer(void)
2056 {
2057 	struct comp_testsuite_params *ts_params = &testsuite_params;
2058 	int ret;
2059 	uint16_t i;
2060 	const struct rte_compressdev_capabilities *capab;
2061 
2062 	RTE_LOG(INFO, USER1, "This is a negative test, errors are expected\n");
2063 
2064 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2065 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2066 
2067 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
2068 		return -ENOTSUP;
2069 
2070 	struct interim_data_params int_data = {
2071 		&compress_test_bufs[0],
2072 		1,
2073 		&i,
2074 		&ts_params->def_comp_xform,
2075 		&ts_params->def_decomp_xform,
2076 		1
2077 	};
2078 
2079 	struct test_data_params test_data = {
2080 		.compress_state = RTE_COMP_OP_STATELESS,
2081 		.decompress_state = RTE_COMP_OP_STATELESS,
2082 		.buff_type = LB_BOTH,
2083 		.zlib_dir = ZLIB_DECOMPRESS,
2084 		.out_of_space = 1,  /* run out-of-space test */
2085 		.big_data = 0
2086 	};
2087 	/* Compress with compressdev, decompress with Zlib */
2088 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2089 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2090 	if (ret < 0)
2091 		goto exit;
2092 
2093 	/* Compress with Zlib, decompress with compressdev */
2094 	test_data.zlib_dir = ZLIB_COMPRESS;
2095 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2096 	if (ret < 0)
2097 		goto exit;
2098 
2099 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2100 		/* Compress with compressdev, decompress with Zlib */
2101 		test_data.zlib_dir = ZLIB_DECOMPRESS;
2102 		test_data.buff_type = SGL_BOTH;
2103 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2104 		if (ret < 0)
2105 			goto exit;
2106 
2107 		/* Compress with Zlib, decompress with compressdev */
2108 		test_data.zlib_dir = ZLIB_COMPRESS;
2109 		test_data.buff_type = SGL_BOTH;
2110 		ret = test_deflate_comp_decomp(&int_data, &test_data);
2111 		if (ret < 0)
2112 			goto exit;
2113 	}
2114 
2115 	ret  = TEST_SUCCESS;
2116 
2117 exit:
2118 	return ret;
2119 }
2120 
2121 static int
2122 test_compressdev_deflate_stateless_dynamic_big(void)
2123 {
2124 	struct comp_testsuite_params *ts_params = &testsuite_params;
2125 	uint16_t i = 0;
2126 	int ret;
2127 	int j;
2128 	const struct rte_compressdev_capabilities *capab;
2129 	char *test_buffer = NULL;
2130 
2131 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2132 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2133 
2134 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
2135 		return -ENOTSUP;
2136 
2137 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
2138 		return -ENOTSUP;
2139 
2140 	test_buffer = rte_malloc(NULL, BIG_DATA_TEST_SIZE, 0);
2141 	if (test_buffer == NULL) {
2142 		RTE_LOG(ERR, USER1,
2143 			"Can't allocate buffer for big-data\n");
2144 		return TEST_FAILED;
2145 	}
2146 
2147 	struct interim_data_params int_data = {
2148 		(const char * const *)&test_buffer,
2149 		1,
2150 		&i,
2151 		&ts_params->def_comp_xform,
2152 		&ts_params->def_decomp_xform,
2153 		1
2154 	};
2155 
2156 	struct test_data_params test_data = {
2157 		.compress_state = RTE_COMP_OP_STATELESS,
2158 		.decompress_state = RTE_COMP_OP_STATELESS,
2159 		.buff_type = SGL_BOTH,
2160 		.zlib_dir = ZLIB_DECOMPRESS,
2161 		.out_of_space = 0,
2162 		.big_data = 1
2163 	};
2164 
2165 	ts_params->def_comp_xform->compress.deflate.huffman =
2166 						RTE_COMP_HUFFMAN_DYNAMIC;
2167 
2168 	/* fill the buffer with data based on rand. data */
2169 	srand(BIG_DATA_TEST_SIZE);
2170 	for (j = 0; j < BIG_DATA_TEST_SIZE - 1; ++j)
2171 		test_buffer[j] = (uint8_t)(rand() % ((uint8_t)-1)) | 1;
2172 	test_buffer[BIG_DATA_TEST_SIZE-1] = 0;
2173 
2174 	/* Compress with compressdev, decompress with Zlib */
2175 	test_data.zlib_dir = ZLIB_DECOMPRESS;
2176 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2177 	if (ret < 0)
2178 		goto exit;
2179 
2180 	/* Compress with Zlib, decompress with compressdev */
2181 	test_data.zlib_dir = ZLIB_COMPRESS;
2182 	ret = test_deflate_comp_decomp(&int_data, &test_data);
2183 	if (ret < 0)
2184 		goto exit;
2185 
2186 	ret = TEST_SUCCESS;
2187 
2188 exit:
2189 	ts_params->def_comp_xform->compress.deflate.huffman =
2190 						RTE_COMP_HUFFMAN_DEFAULT;
2191 	rte_free(test_buffer);
2192 	return ret;
2193 }
2194 
2195 static int
2196 test_compressdev_deflate_stateful_decomp(void)
2197 {
2198 	struct comp_testsuite_params *ts_params = &testsuite_params;
2199 	int ret;
2200 	uint16_t i;
2201 	const struct rte_compressdev_capabilities *capab;
2202 
2203 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2204 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2205 
2206 	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2207 		return -ENOTSUP;
2208 
2209 	struct interim_data_params int_data = {
2210 		&compress_test_bufs[0],
2211 		1,
2212 		&i,
2213 		&ts_params->def_comp_xform,
2214 		&ts_params->def_decomp_xform,
2215 		1
2216 	};
2217 
2218 	struct test_data_params test_data = {
2219 		.compress_state = RTE_COMP_OP_STATELESS,
2220 		.decompress_state = RTE_COMP_OP_STATEFUL,
2221 		.buff_type = LB_BOTH,
2222 		.zlib_dir = ZLIB_COMPRESS,
2223 		.out_of_space = 0,
2224 		.big_data = 0,
2225 		.decompress_output_block_size = 2000,
2226 		.decompress_steps_max = 4
2227 	};
2228 
2229 	/* Compress with Zlib, decompress with compressdev */
2230 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2231 		ret = TEST_FAILED;
2232 		goto exit;
2233 	}
2234 
2235 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2236 		/* Now test with SGL buffers */
2237 		test_data.buff_type = SGL_BOTH;
2238 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2239 			ret = TEST_FAILED;
2240 			goto exit;
2241 		}
2242 	}
2243 
2244 	ret  = TEST_SUCCESS;
2245 
2246 exit:
2247 	return ret;
2248 }
2249 
2250 static int
2251 test_compressdev_deflate_stateful_decomp_checksum(void)
2252 {
2253 	struct comp_testsuite_params *ts_params = &testsuite_params;
2254 	int ret;
2255 	uint16_t i;
2256 	const struct rte_compressdev_capabilities *capab;
2257 
2258 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
2259 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
2260 
2261 	if (!(capab->comp_feature_flags & RTE_COMP_FF_STATEFUL_DECOMPRESSION))
2262 		return -ENOTSUP;
2263 
2264 	/* Check if driver supports any checksum */
2265 	if (!(capab->comp_feature_flags &
2266 	     (RTE_COMP_FF_CRC32_CHECKSUM | RTE_COMP_FF_ADLER32_CHECKSUM |
2267 	      RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)))
2268 		return -ENOTSUP;
2269 
2270 	struct rte_comp_xform *compress_xform =
2271 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2272 	if (compress_xform == NULL) {
2273 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
2274 		return TEST_FAILED;
2275 	}
2276 
2277 	memcpy(compress_xform, ts_params->def_comp_xform,
2278 	       sizeof(struct rte_comp_xform));
2279 
2280 	struct rte_comp_xform *decompress_xform =
2281 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
2282 	if (decompress_xform == NULL) {
2283 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
2284 		rte_free(compress_xform);
2285 		return TEST_FAILED;
2286 	}
2287 
2288 	memcpy(decompress_xform, ts_params->def_decomp_xform,
2289 	       sizeof(struct rte_comp_xform));
2290 
2291 	struct interim_data_params int_data = {
2292 		&compress_test_bufs[0],
2293 		1,
2294 		&i,
2295 		&compress_xform,
2296 		&decompress_xform,
2297 		1
2298 	};
2299 
2300 	struct test_data_params test_data = {
2301 		.compress_state = RTE_COMP_OP_STATELESS,
2302 		.decompress_state = RTE_COMP_OP_STATEFUL,
2303 		.buff_type = LB_BOTH,
2304 		.zlib_dir = ZLIB_COMPRESS,
2305 		.out_of_space = 0,
2306 		.big_data = 0,
2307 		.decompress_output_block_size = 2000,
2308 		.decompress_steps_max = 4
2309 	};
2310 
2311 	/* Check if driver supports crc32 checksum and test */
2312 	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) {
2313 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
2314 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
2315 		/* Compress with Zlib, decompress with compressdev */
2316 		test_data.buff_type = LB_BOTH;
2317 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2318 			ret = TEST_FAILED;
2319 			goto exit;
2320 		}
2321 		if (capab->comp_feature_flags &
2322 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2323 			/* Now test with SGL buffers */
2324 			test_data.buff_type = SGL_BOTH;
2325 			if (test_deflate_comp_decomp(&int_data,
2326 						     &test_data) < 0) {
2327 				ret = TEST_FAILED;
2328 				goto exit;
2329 			}
2330 		}
2331 	}
2332 
2333 	/* Check if driver supports adler32 checksum and test */
2334 	if (capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM) {
2335 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2336 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
2337 		/* Compress with Zlib, decompress with compressdev */
2338 		test_data.buff_type = LB_BOTH;
2339 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2340 			ret = TEST_FAILED;
2341 			goto exit;
2342 		}
2343 		if (capab->comp_feature_flags &
2344 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2345 			/* Now test with SGL buffers */
2346 			test_data.buff_type = SGL_BOTH;
2347 			if (test_deflate_comp_decomp(&int_data,
2348 						     &test_data) < 0) {
2349 				ret = TEST_FAILED;
2350 				goto exit;
2351 			}
2352 		}
2353 	}
2354 
2355 	/* Check if driver supports combined crc and adler checksum and test */
2356 	if (capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) {
2357 		compress_xform->compress.chksum =
2358 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2359 		decompress_xform->decompress.chksum =
2360 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
2361 		/* Zlib doesn't support combined checksum */
2362 		test_data.zlib_dir = ZLIB_NONE;
2363 		/* Compress stateless, decompress stateful with compressdev */
2364 		test_data.buff_type = LB_BOTH;
2365 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
2366 			ret = TEST_FAILED;
2367 			goto exit;
2368 		}
2369 		if (capab->comp_feature_flags &
2370 				RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
2371 			/* Now test with SGL buffers */
2372 			test_data.buff_type = SGL_BOTH;
2373 			if (test_deflate_comp_decomp(&int_data,
2374 						     &test_data) < 0) {
2375 				ret = TEST_FAILED;
2376 				goto exit;
2377 			}
2378 		}
2379 	}
2380 
2381 	ret  = TEST_SUCCESS;
2382 
2383 exit:
2384 	rte_free(compress_xform);
2385 	rte_free(decompress_xform);
2386 	return ret;
2387 }
2388 
2389 static const struct rte_memzone *
2390 make_memzone(const char *name, size_t size)
2391 {
2392 	unsigned int socket_id = rte_socket_id();
2393 	char mz_name[RTE_MEMZONE_NAMESIZE];
2394 	const struct rte_memzone *memzone;
2395 
2396 	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u", name, socket_id);
2397 	memzone = rte_memzone_lookup(mz_name);
2398 	if (memzone != NULL && memzone->len != size) {
2399 		rte_memzone_free(memzone);
2400 		memzone = NULL;
2401 	}
2402 	if (memzone == NULL) {
2403 		memzone = rte_memzone_reserve_aligned(mz_name, size, socket_id,
2404 				RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
2405 		if (memzone == NULL)
2406 			RTE_LOG(ERR, USER1, "Can't allocate memory zone %s",
2407 				mz_name);
2408 	}
2409 	return memzone;
2410 }
2411 
2412 static int
2413 test_compressdev_external_mbufs(void)
2414 {
2415 	struct comp_testsuite_params *ts_params = &testsuite_params;
2416 	size_t data_len = 0;
2417 	uint16_t i;
2418 	int ret = TEST_FAILED;
2419 
2420 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
2421 		data_len = RTE_MAX(data_len, strlen(compress_test_bufs[i]) + 1);
2422 
2423 	struct interim_data_params int_data = {
2424 		NULL,
2425 		1,
2426 		NULL,
2427 		&ts_params->def_comp_xform,
2428 		&ts_params->def_decomp_xform,
2429 		1
2430 	};
2431 
2432 	struct test_data_params test_data = {
2433 		.compress_state = RTE_COMP_OP_STATELESS,
2434 		.decompress_state = RTE_COMP_OP_STATELESS,
2435 		.buff_type = LB_BOTH,
2436 		.zlib_dir = ZLIB_DECOMPRESS,
2437 		.out_of_space = 0,
2438 		.big_data = 0,
2439 		.use_external_mbufs = 1,
2440 		.inbuf_data_size = data_len,
2441 		.inbuf_memzone = make_memzone("inbuf", data_len),
2442 		.compbuf_memzone = make_memzone("compbuf", data_len *
2443 						COMPRESS_BUF_SIZE_RATIO),
2444 		.uncompbuf_memzone = make_memzone("decompbuf", data_len)
2445 	};
2446 
2447 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
2448 		/* prepare input data */
2449 		data_len = strlen(compress_test_bufs[i]) + 1;
2450 		rte_memcpy(test_data.inbuf_memzone->addr, compress_test_bufs[i],
2451 			   data_len);
2452 		test_data.inbuf_data_size = data_len;
2453 		int_data.buf_idx = &i;
2454 
2455 		/* Compress with compressdev, decompress with Zlib */
2456 		test_data.zlib_dir = ZLIB_DECOMPRESS;
2457 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2458 			goto exit;
2459 
2460 		/* Compress with Zlib, decompress with compressdev */
2461 		test_data.zlib_dir = ZLIB_COMPRESS;
2462 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
2463 			goto exit;
2464 	}
2465 
2466 	ret = TEST_SUCCESS;
2467 
2468 exit:
2469 	rte_memzone_free(test_data.inbuf_memzone);
2470 	rte_memzone_free(test_data.compbuf_memzone);
2471 	rte_memzone_free(test_data.uncompbuf_memzone);
2472 	return ret;
2473 }
2474 
2475 static struct unit_test_suite compressdev_testsuite  = {
2476 	.suite_name = "compressdev unit test suite",
2477 	.setup = testsuite_setup,
2478 	.teardown = testsuite_teardown,
2479 	.unit_test_cases = {
2480 		TEST_CASE_ST(NULL, NULL,
2481 			test_compressdev_invalid_configuration),
2482 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2483 			test_compressdev_deflate_stateless_fixed),
2484 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2485 			test_compressdev_deflate_stateless_dynamic),
2486 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2487 			test_compressdev_deflate_stateless_dynamic_big),
2488 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2489 			test_compressdev_deflate_stateless_multi_op),
2490 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2491 			test_compressdev_deflate_stateless_multi_level),
2492 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2493 			test_compressdev_deflate_stateless_multi_xform),
2494 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2495 			test_compressdev_deflate_stateless_sgl),
2496 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2497 			test_compressdev_deflate_stateless_checksum),
2498 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2499 			test_compressdev_out_of_space_buffer),
2500 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2501 			test_compressdev_deflate_stateful_decomp),
2502 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2503 			test_compressdev_deflate_stateful_decomp_checksum),
2504 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
2505 			test_compressdev_external_mbufs),
2506 		TEST_CASES_END() /**< NULL terminate unit test array */
2507 	}
2508 };
2509 
2510 static int
2511 test_compressdev(void)
2512 {
2513 	return unit_test_suite_runner(&compressdev_testsuite);
2514 }
2515 
2516 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
2517