xref: /dpdk/app/test/test_compressdev.c (revision 5ecb687a5698d2d8ec1f3b3b5a7a16bceca3e29c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7 #include <unistd.h>
8 
9 #include <rte_cycles.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_mbuf.h>
13 #include <rte_compressdev.h>
14 #include <rte_string_fns.h>
15 
16 #include "test_compressdev_test_buffer.h"
17 #include "test.h"
18 
19 #define DIV_CEIL(a, b)  ((a) / (b) + ((a) % (b) != 0))
20 
21 #define DEFAULT_WINDOW_SIZE 15
22 #define DEFAULT_MEM_LEVEL 8
23 #define MAX_DEQD_RETRIES 10
24 #define DEQUEUE_WAIT_TIME 10000
25 
26 /*
27  * 30% extra size for compressed data compared to original data,
28  * in case data size cannot be reduced and it is actually bigger
29  * due to the compress block headers
30  */
31 #define COMPRESS_BUF_SIZE_RATIO 1.3
32 #define NUM_LARGE_MBUFS 16
33 #define SMALL_SEG_SIZE 256
34 #define MAX_SEGS 16
35 #define NUM_OPS 16
36 #define NUM_MAX_XFORMS 16
37 #define NUM_MAX_INFLIGHT_OPS 128
38 #define CACHE_SIZE 0
39 
40 #define ZLIB_CRC_CHECKSUM_WINDOW_BITS 31
41 #define ZLIB_HEADER_SIZE 2
42 #define ZLIB_TRAILER_SIZE 4
43 #define GZIP_HEADER_SIZE 10
44 #define GZIP_TRAILER_SIZE 8
45 
46 #define OUT_OF_SPACE_BUF 1
47 
48 const char *
49 huffman_type_strings[] = {
50 	[RTE_COMP_HUFFMAN_DEFAULT]	= "PMD default",
51 	[RTE_COMP_HUFFMAN_FIXED]	= "Fixed",
52 	[RTE_COMP_HUFFMAN_DYNAMIC]	= "Dynamic"
53 };
54 
55 enum zlib_direction {
56 	ZLIB_NONE,
57 	ZLIB_COMPRESS,
58 	ZLIB_DECOMPRESS,
59 	ZLIB_ALL
60 };
61 
62 enum varied_buff {
63 	LB_BOTH = 0,	/* both input and output are linear*/
64 	SGL_BOTH,	/* both input and output are chained */
65 	SGL_TO_LB,	/* input buffer is chained */
66 	LB_TO_SGL	/* output buffer is chained */
67 };
68 
69 struct priv_op_data {
70 	uint16_t orig_idx;
71 };
72 
73 struct comp_testsuite_params {
74 	struct rte_mempool *large_mbuf_pool;
75 	struct rte_mempool *small_mbuf_pool;
76 	struct rte_mempool *op_pool;
77 	struct rte_comp_xform *def_comp_xform;
78 	struct rte_comp_xform *def_decomp_xform;
79 };
80 
81 struct interim_data_params {
82 	const char * const *test_bufs;
83 	unsigned int num_bufs;
84 	uint16_t *buf_idx;
85 	struct rte_comp_xform **compress_xforms;
86 	struct rte_comp_xform **decompress_xforms;
87 	unsigned int num_xforms;
88 };
89 
90 struct test_data_params {
91 	enum rte_comp_op_type state;
92 	enum varied_buff buff_type;
93 	enum zlib_direction zlib_dir;
94 	unsigned int out_of_space;
95 };
96 
97 static struct comp_testsuite_params testsuite_params = { 0 };
98 
99 static void
100 testsuite_teardown(void)
101 {
102 	struct comp_testsuite_params *ts_params = &testsuite_params;
103 
104 	if (rte_mempool_in_use_count(ts_params->large_mbuf_pool))
105 		RTE_LOG(ERR, USER1, "Large mbuf pool still has unfreed bufs\n");
106 	if (rte_mempool_in_use_count(ts_params->small_mbuf_pool))
107 		RTE_LOG(ERR, USER1, "Small mbuf pool still has unfreed bufs\n");
108 	if (rte_mempool_in_use_count(ts_params->op_pool))
109 		RTE_LOG(ERR, USER1, "op pool still has unfreed ops\n");
110 
111 	rte_mempool_free(ts_params->large_mbuf_pool);
112 	rte_mempool_free(ts_params->small_mbuf_pool);
113 	rte_mempool_free(ts_params->op_pool);
114 	rte_free(ts_params->def_comp_xform);
115 	rte_free(ts_params->def_decomp_xform);
116 }
117 
118 static int
119 testsuite_setup(void)
120 {
121 	struct comp_testsuite_params *ts_params = &testsuite_params;
122 	uint32_t max_buf_size = 0;
123 	unsigned int i;
124 
125 	if (rte_compressdev_count() == 0) {
126 		RTE_LOG(ERR, USER1, "Need at least one compress device\n");
127 		return TEST_FAILED;
128 	}
129 
130 	RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
131 				rte_compressdev_name_get(0));
132 
133 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
134 		max_buf_size = RTE_MAX(max_buf_size,
135 				strlen(compress_test_bufs[i]) + 1);
136 
137 	/*
138 	 * Buffers to be used in compression and decompression.
139 	 * Since decompressed data might be larger than
140 	 * compressed data (due to block header),
141 	 * buffers should be big enough for both cases.
142 	 */
143 	max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
144 	ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
145 			NUM_LARGE_MBUFS,
146 			CACHE_SIZE, 0,
147 			max_buf_size + RTE_PKTMBUF_HEADROOM,
148 			rte_socket_id());
149 	if (ts_params->large_mbuf_pool == NULL) {
150 		RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
151 		return TEST_FAILED;
152 	}
153 
154 	/* Create mempool with smaller buffers for SGL testing */
155 	ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
156 			NUM_LARGE_MBUFS * MAX_SEGS,
157 			CACHE_SIZE, 0,
158 			SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
159 			rte_socket_id());
160 	if (ts_params->small_mbuf_pool == NULL) {
161 		RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
162 		goto exit;
163 	}
164 
165 	ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
166 				0, sizeof(struct priv_op_data),
167 				rte_socket_id());
168 	if (ts_params->op_pool == NULL) {
169 		RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
170 		goto exit;
171 	}
172 
173 	ts_params->def_comp_xform =
174 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
175 	if (ts_params->def_comp_xform == NULL) {
176 		RTE_LOG(ERR, USER1,
177 			"Default compress xform could not be created\n");
178 		goto exit;
179 	}
180 	ts_params->def_decomp_xform =
181 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
182 	if (ts_params->def_decomp_xform == NULL) {
183 		RTE_LOG(ERR, USER1,
184 			"Default decompress xform could not be created\n");
185 		goto exit;
186 	}
187 
188 	/* Initializes default values for compress/decompress xforms */
189 	ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
190 	ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
191 	ts_params->def_comp_xform->compress.deflate.huffman =
192 						RTE_COMP_HUFFMAN_DEFAULT;
193 	ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
194 	ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
195 	ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
196 
197 	ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
198 	ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
199 	ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
200 	ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
201 
202 	return TEST_SUCCESS;
203 
204 exit:
205 	testsuite_teardown();
206 
207 	return TEST_FAILED;
208 }
209 
210 static int
211 generic_ut_setup(void)
212 {
213 	/* Configure compressdev (one device, one queue pair) */
214 	struct rte_compressdev_config config = {
215 		.socket_id = rte_socket_id(),
216 		.nb_queue_pairs = 1,
217 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
218 		.max_nb_streams = 0
219 	};
220 
221 	if (rte_compressdev_configure(0, &config) < 0) {
222 		RTE_LOG(ERR, USER1, "Device configuration failed\n");
223 		return -1;
224 	}
225 
226 	if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
227 			rte_socket_id()) < 0) {
228 		RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
229 		return -1;
230 	}
231 
232 	if (rte_compressdev_start(0) < 0) {
233 		RTE_LOG(ERR, USER1, "Device could not be started\n");
234 		return -1;
235 	}
236 
237 	return 0;
238 }
239 
240 static void
241 generic_ut_teardown(void)
242 {
243 	rte_compressdev_stop(0);
244 	if (rte_compressdev_close(0) < 0)
245 		RTE_LOG(ERR, USER1, "Device could not be closed\n");
246 }
247 
248 static int
249 test_compressdev_invalid_configuration(void)
250 {
251 	struct rte_compressdev_config invalid_config;
252 	struct rte_compressdev_config valid_config = {
253 		.socket_id = rte_socket_id(),
254 		.nb_queue_pairs = 1,
255 		.max_nb_priv_xforms = NUM_MAX_XFORMS,
256 		.max_nb_streams = 0
257 	};
258 	struct rte_compressdev_info dev_info;
259 
260 	/* Invalid configuration with 0 queue pairs */
261 	memcpy(&invalid_config, &valid_config,
262 			sizeof(struct rte_compressdev_config));
263 	invalid_config.nb_queue_pairs = 0;
264 
265 	TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
266 			"Device configuration was successful "
267 			"with no queue pairs (invalid)\n");
268 
269 	/*
270 	 * Invalid configuration with too many queue pairs
271 	 * (if there is an actual maximum number of queue pairs)
272 	 */
273 	rte_compressdev_info_get(0, &dev_info);
274 	if (dev_info.max_nb_queue_pairs != 0) {
275 		memcpy(&invalid_config, &valid_config,
276 			sizeof(struct rte_compressdev_config));
277 		invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
278 
279 		TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
280 				"Device configuration was successful "
281 				"with too many queue pairs (invalid)\n");
282 	}
283 
284 	/* Invalid queue pair setup, with no number of queue pairs set */
285 	TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
286 				NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
287 			"Queue pair setup was successful "
288 			"with no queue pairs set (invalid)\n");
289 
290 	return TEST_SUCCESS;
291 }
292 
293 static int
294 compare_buffers(const char *buffer1, uint32_t buffer1_len,
295 		const char *buffer2, uint32_t buffer2_len)
296 {
297 	if (buffer1_len != buffer2_len) {
298 		RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
299 		return -1;
300 	}
301 
302 	if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
303 		RTE_LOG(ERR, USER1, "Buffers are different\n");
304 		return -1;
305 	}
306 
307 	return 0;
308 }
309 
310 /*
311  * Maps compressdev and Zlib flush flags
312  */
313 static int
314 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
315 {
316 	switch (flag) {
317 	case RTE_COMP_FLUSH_NONE:
318 		return Z_NO_FLUSH;
319 	case RTE_COMP_FLUSH_SYNC:
320 		return Z_SYNC_FLUSH;
321 	case RTE_COMP_FLUSH_FULL:
322 		return Z_FULL_FLUSH;
323 	case RTE_COMP_FLUSH_FINAL:
324 		return Z_FINISH;
325 	/*
326 	 * There should be only the values above,
327 	 * so this should never happen
328 	 */
329 	default:
330 		return -1;
331 	}
332 }
333 
334 static int
335 compress_zlib(struct rte_comp_op *op,
336 		const struct rte_comp_xform *xform, int mem_level)
337 {
338 	z_stream stream;
339 	int zlib_flush;
340 	int strategy, window_bits, comp_level;
341 	int ret = TEST_FAILED;
342 	uint8_t *single_src_buf = NULL;
343 	uint8_t *single_dst_buf = NULL;
344 
345 	/* initialize zlib stream */
346 	stream.zalloc = Z_NULL;
347 	stream.zfree = Z_NULL;
348 	stream.opaque = Z_NULL;
349 
350 	if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
351 		strategy = Z_FIXED;
352 	else
353 		strategy = Z_DEFAULT_STRATEGY;
354 
355 	/*
356 	 * Window bits is the base two logarithm of the window size (in bytes).
357 	 * When doing raw DEFLATE, this number will be negative.
358 	 */
359 	window_bits = -(xform->compress.window_size);
360 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32)
361 		window_bits *= -1;
362 	else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32)
363 		window_bits = ZLIB_CRC_CHECKSUM_WINDOW_BITS;
364 
365 	comp_level = xform->compress.level;
366 
367 	if (comp_level != RTE_COMP_LEVEL_NONE)
368 		ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
369 			window_bits, mem_level, strategy);
370 	else
371 		ret = deflateInit(&stream, Z_NO_COMPRESSION);
372 
373 	if (ret != Z_OK) {
374 		printf("Zlib deflate could not be initialized\n");
375 		goto exit;
376 	}
377 
378 	/* Assuming stateless operation */
379 	/* SGL Input */
380 	if (op->m_src->nb_segs > 1) {
381 		single_src_buf = rte_malloc(NULL,
382 				rte_pktmbuf_pkt_len(op->m_src), 0);
383 		if (single_src_buf == NULL) {
384 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
385 			goto exit;
386 		}
387 
388 		if (rte_pktmbuf_read(op->m_src, op->src.offset,
389 					rte_pktmbuf_pkt_len(op->m_src) -
390 					op->src.offset,
391 					single_src_buf) == NULL) {
392 			RTE_LOG(ERR, USER1,
393 				"Buffer could not be read entirely\n");
394 			goto exit;
395 		}
396 
397 		stream.avail_in = op->src.length;
398 		stream.next_in = single_src_buf;
399 
400 	} else {
401 		stream.avail_in = op->src.length;
402 		stream.next_in = rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
403 				op->src.offset);
404 	}
405 	/* SGL output */
406 	if (op->m_dst->nb_segs > 1) {
407 
408 		single_dst_buf = rte_malloc(NULL,
409 				rte_pktmbuf_pkt_len(op->m_dst), 0);
410 			if (single_dst_buf == NULL) {
411 				RTE_LOG(ERR, USER1,
412 					"Buffer could not be allocated\n");
413 			goto exit;
414 		}
415 
416 		stream.avail_out = op->m_dst->pkt_len;
417 		stream.next_out = single_dst_buf;
418 
419 	} else {/* linear output */
420 		stream.avail_out = op->m_dst->data_len;
421 		stream.next_out = rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
422 				op->dst.offset);
423 	}
424 
425 	/* Stateless operation, all buffer will be compressed in one go */
426 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
427 	ret = deflate(&stream, zlib_flush);
428 
429 	if (stream.avail_in != 0) {
430 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
431 		goto exit;
432 	}
433 
434 	if (ret != Z_STREAM_END)
435 		goto exit;
436 
437 	/* Copy data to destination SGL */
438 	if (op->m_dst->nb_segs > 1) {
439 		uint32_t remaining_data = stream.total_out;
440 		uint8_t *src_data = single_dst_buf;
441 		struct rte_mbuf *dst_buf = op->m_dst;
442 
443 		while (remaining_data > 0) {
444 			uint8_t *dst_data = rte_pktmbuf_mtod_offset(dst_buf,
445 						uint8_t *, op->dst.offset);
446 			/* Last segment */
447 			if (remaining_data < dst_buf->data_len) {
448 				memcpy(dst_data, src_data, remaining_data);
449 				remaining_data = 0;
450 			} else {
451 				memcpy(dst_data, src_data, dst_buf->data_len);
452 				remaining_data -= dst_buf->data_len;
453 				src_data += dst_buf->data_len;
454 				dst_buf = dst_buf->next;
455 			}
456 		}
457 	}
458 
459 	op->consumed = stream.total_in;
460 	if (xform->compress.chksum == RTE_COMP_CHECKSUM_ADLER32) {
461 		rte_pktmbuf_adj(op->m_dst, ZLIB_HEADER_SIZE);
462 		rte_pktmbuf_trim(op->m_dst, ZLIB_TRAILER_SIZE);
463 		op->produced = stream.total_out - (ZLIB_HEADER_SIZE +
464 				ZLIB_TRAILER_SIZE);
465 	} else if (xform->compress.chksum == RTE_COMP_CHECKSUM_CRC32) {
466 		rte_pktmbuf_adj(op->m_dst, GZIP_HEADER_SIZE);
467 		rte_pktmbuf_trim(op->m_dst, GZIP_TRAILER_SIZE);
468 		op->produced = stream.total_out - (GZIP_HEADER_SIZE +
469 				GZIP_TRAILER_SIZE);
470 	} else
471 		op->produced = stream.total_out;
472 
473 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
474 	op->output_chksum = stream.adler;
475 
476 	deflateReset(&stream);
477 
478 	ret = 0;
479 exit:
480 	deflateEnd(&stream);
481 	rte_free(single_src_buf);
482 	rte_free(single_dst_buf);
483 
484 	return ret;
485 }
486 
487 static int
488 decompress_zlib(struct rte_comp_op *op,
489 		const struct rte_comp_xform *xform)
490 {
491 	z_stream stream;
492 	int window_bits;
493 	int zlib_flush;
494 	int ret = TEST_FAILED;
495 	uint8_t *single_src_buf = NULL;
496 	uint8_t *single_dst_buf = NULL;
497 
498 	/* initialize zlib stream */
499 	stream.zalloc = Z_NULL;
500 	stream.zfree = Z_NULL;
501 	stream.opaque = Z_NULL;
502 
503 	/*
504 	 * Window bits is the base two logarithm of the window size (in bytes).
505 	 * When doing raw DEFLATE, this number will be negative.
506 	 */
507 	window_bits = -(xform->decompress.window_size);
508 	ret = inflateInit2(&stream, window_bits);
509 
510 	if (ret != Z_OK) {
511 		printf("Zlib deflate could not be initialized\n");
512 		goto exit;
513 	}
514 
515 	/* Assuming stateless operation */
516 	/* SGL */
517 	if (op->m_src->nb_segs > 1) {
518 		single_src_buf = rte_malloc(NULL,
519 				rte_pktmbuf_pkt_len(op->m_src), 0);
520 		if (single_src_buf == NULL) {
521 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
522 			goto exit;
523 		}
524 		single_dst_buf = rte_malloc(NULL,
525 				rte_pktmbuf_pkt_len(op->m_dst), 0);
526 		if (single_dst_buf == NULL) {
527 			RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
528 			goto exit;
529 		}
530 		if (rte_pktmbuf_read(op->m_src, 0,
531 					rte_pktmbuf_pkt_len(op->m_src),
532 					single_src_buf) == NULL) {
533 			RTE_LOG(ERR, USER1,
534 				"Buffer could not be read entirely\n");
535 			goto exit;
536 		}
537 
538 		stream.avail_in = op->src.length;
539 		stream.next_in = single_src_buf;
540 		stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
541 		stream.next_out = single_dst_buf;
542 
543 	} else {
544 		stream.avail_in = op->src.length;
545 		stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
546 		stream.avail_out = op->m_dst->data_len;
547 		stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
548 	}
549 
550 	/* Stateless operation, all buffer will be compressed in one go */
551 	zlib_flush = map_zlib_flush_flag(op->flush_flag);
552 	ret = inflate(&stream, zlib_flush);
553 
554 	if (stream.avail_in != 0) {
555 		RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
556 		goto exit;
557 	}
558 
559 	if (ret != Z_STREAM_END)
560 		goto exit;
561 
562 	if (op->m_src->nb_segs > 1) {
563 		uint32_t remaining_data = stream.total_out;
564 		uint8_t *src_data = single_dst_buf;
565 		struct rte_mbuf *dst_buf = op->m_dst;
566 
567 		while (remaining_data > 0) {
568 			uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
569 					uint8_t *);
570 			/* Last segment */
571 			if (remaining_data < dst_buf->data_len) {
572 				memcpy(dst_data, src_data, remaining_data);
573 				remaining_data = 0;
574 			} else {
575 				memcpy(dst_data, src_data, dst_buf->data_len);
576 				remaining_data -= dst_buf->data_len;
577 				src_data += dst_buf->data_len;
578 				dst_buf = dst_buf->next;
579 			}
580 		}
581 	}
582 
583 	op->consumed = stream.total_in;
584 	op->produced = stream.total_out;
585 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
586 
587 	inflateReset(&stream);
588 
589 	ret = 0;
590 exit:
591 	inflateEnd(&stream);
592 
593 	return ret;
594 }
595 
596 static int
597 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
598 		uint32_t total_data_size,
599 		struct rte_mempool *small_mbuf_pool,
600 		struct rte_mempool *large_mbuf_pool,
601 		uint8_t limit_segs_in_sgl)
602 {
603 	uint32_t remaining_data = total_data_size;
604 	uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
605 	struct rte_mempool *pool;
606 	struct rte_mbuf *next_seg;
607 	uint32_t data_size;
608 	char *buf_ptr;
609 	const char *data_ptr = test_buf;
610 	uint16_t i;
611 	int ret;
612 
613 	if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
614 		num_remaining_segs = limit_segs_in_sgl - 1;
615 
616 	/*
617 	 * Allocate data in the first segment (header) and
618 	 * copy data if test buffer is provided
619 	 */
620 	if (remaining_data < SMALL_SEG_SIZE)
621 		data_size = remaining_data;
622 	else
623 		data_size = SMALL_SEG_SIZE;
624 	buf_ptr = rte_pktmbuf_append(head_buf, data_size);
625 	if (buf_ptr == NULL) {
626 		RTE_LOG(ERR, USER1,
627 			"Not enough space in the 1st buffer\n");
628 		return -1;
629 	}
630 
631 	if (data_ptr != NULL) {
632 		/* Copy characters without NULL terminator */
633 		strncpy(buf_ptr, data_ptr, data_size);
634 		data_ptr += data_size;
635 	}
636 	remaining_data -= data_size;
637 	num_remaining_segs--;
638 
639 	/*
640 	 * Allocate the rest of the segments,
641 	 * copy the rest of the data and chain the segments.
642 	 */
643 	for (i = 0; i < num_remaining_segs; i++) {
644 
645 		if (i == (num_remaining_segs - 1)) {
646 			/* last segment */
647 			if (remaining_data > SMALL_SEG_SIZE)
648 				pool = large_mbuf_pool;
649 			else
650 				pool = small_mbuf_pool;
651 			data_size = remaining_data;
652 		} else {
653 			data_size = SMALL_SEG_SIZE;
654 			pool = small_mbuf_pool;
655 		}
656 
657 		next_seg = rte_pktmbuf_alloc(pool);
658 		if (next_seg == NULL) {
659 			RTE_LOG(ERR, USER1,
660 				"New segment could not be allocated "
661 				"from the mempool\n");
662 			return -1;
663 		}
664 		buf_ptr = rte_pktmbuf_append(next_seg, data_size);
665 		if (buf_ptr == NULL) {
666 			RTE_LOG(ERR, USER1,
667 				"Not enough space in the buffer\n");
668 			rte_pktmbuf_free(next_seg);
669 			return -1;
670 		}
671 		if (data_ptr != NULL) {
672 			/* Copy characters without NULL terminator */
673 			strncpy(buf_ptr, data_ptr, data_size);
674 			data_ptr += data_size;
675 		}
676 		remaining_data -= data_size;
677 
678 		ret = rte_pktmbuf_chain(head_buf, next_seg);
679 		if (ret != 0) {
680 			rte_pktmbuf_free(next_seg);
681 			RTE_LOG(ERR, USER1,
682 				"Segment could not chained\n");
683 			return -1;
684 		}
685 	}
686 
687 	return 0;
688 }
689 
690 /*
691  * Compresses and decompresses buffer with compressdev API and Zlib API
692  */
693 static int
694 test_deflate_comp_decomp(const struct interim_data_params *int_data,
695 		const struct test_data_params *test_data)
696 {
697 	struct comp_testsuite_params *ts_params = &testsuite_params;
698 	const char * const *test_bufs = int_data->test_bufs;
699 	unsigned int num_bufs = int_data->num_bufs;
700 	uint16_t *buf_idx = int_data->buf_idx;
701 	struct rte_comp_xform **compress_xforms = int_data->compress_xforms;
702 	struct rte_comp_xform **decompress_xforms = int_data->decompress_xforms;
703 	unsigned int num_xforms = int_data->num_xforms;
704 	enum rte_comp_op_type state = test_data->state;
705 	unsigned int buff_type = test_data->buff_type;
706 	unsigned int out_of_space = test_data->out_of_space;
707 	enum zlib_direction zlib_dir = test_data->zlib_dir;
708 	int ret_status = -1;
709 	int ret;
710 	struct rte_mbuf *uncomp_bufs[num_bufs];
711 	struct rte_mbuf *comp_bufs[num_bufs];
712 	struct rte_comp_op *ops[num_bufs];
713 	struct rte_comp_op *ops_processed[num_bufs];
714 	void *priv_xforms[num_bufs];
715 	uint16_t num_enqd, num_deqd, num_total_deqd;
716 	uint16_t num_priv_xforms = 0;
717 	unsigned int deqd_retries = 0;
718 	struct priv_op_data *priv_data;
719 	char *buf_ptr;
720 	unsigned int i;
721 	struct rte_mempool *buf_pool;
722 	uint32_t data_size;
723 	/* Compressing with CompressDev */
724 	unsigned int oos_zlib_decompress =
725 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_DECOMPRESS);
726 	/* Decompressing with CompressDev */
727 	unsigned int oos_zlib_compress =
728 			(zlib_dir == ZLIB_NONE || zlib_dir == ZLIB_COMPRESS);
729 	const struct rte_compressdev_capabilities *capa =
730 		rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
731 	char *contig_buf = NULL;
732 	uint64_t compress_checksum[num_bufs];
733 
734 	/* Initialize all arrays to NULL */
735 	memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
736 	memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
737 	memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
738 	memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
739 	memset(priv_xforms, 0, sizeof(void *) * num_bufs);
740 
741 	if (buff_type == SGL_BOTH)
742 		buf_pool = ts_params->small_mbuf_pool;
743 	else
744 		buf_pool = ts_params->large_mbuf_pool;
745 
746 	/* Prepare the source mbufs with the data */
747 	ret = rte_pktmbuf_alloc_bulk(buf_pool,
748 				uncomp_bufs, num_bufs);
749 	if (ret < 0) {
750 		RTE_LOG(ERR, USER1,
751 			"Source mbufs could not be allocated "
752 			"from the mempool\n");
753 		goto exit;
754 	}
755 
756 	if (buff_type == SGL_BOTH || buff_type == SGL_TO_LB) {
757 		for (i = 0; i < num_bufs; i++) {
758 			data_size = strlen(test_bufs[i]) + 1;
759 			if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
760 					data_size,
761 					ts_params->small_mbuf_pool,
762 					ts_params->large_mbuf_pool,
763 					MAX_SEGS) < 0)
764 				goto exit;
765 		}
766 	} else {
767 		for (i = 0; i < num_bufs; i++) {
768 			data_size = strlen(test_bufs[i]) + 1;
769 			buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
770 			strlcpy(buf_ptr, test_bufs[i], data_size);
771 		}
772 	}
773 
774 	/* Prepare the destination mbufs */
775 	ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
776 	if (ret < 0) {
777 		RTE_LOG(ERR, USER1,
778 			"Destination mbufs could not be allocated "
779 			"from the mempool\n");
780 		goto exit;
781 	}
782 
783 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
784 		for (i = 0; i < num_bufs; i++) {
785 			if (out_of_space == 1 && oos_zlib_decompress)
786 				data_size = OUT_OF_SPACE_BUF;
787 			else
788 				(data_size = strlen(test_bufs[i]) *
789 					COMPRESS_BUF_SIZE_RATIO);
790 
791 			if (prepare_sgl_bufs(NULL, comp_bufs[i],
792 					data_size,
793 					ts_params->small_mbuf_pool,
794 					ts_params->large_mbuf_pool,
795 					MAX_SEGS) < 0)
796 				goto exit;
797 		}
798 
799 	} else {
800 		for (i = 0; i < num_bufs; i++) {
801 			if (out_of_space == 1 && oos_zlib_decompress)
802 				data_size = OUT_OF_SPACE_BUF;
803 			else
804 				(data_size = strlen(test_bufs[i]) *
805 					COMPRESS_BUF_SIZE_RATIO);
806 
807 			rte_pktmbuf_append(comp_bufs[i], data_size);
808 		}
809 	}
810 
811 	/* Build the compression operations */
812 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
813 	if (ret < 0) {
814 		RTE_LOG(ERR, USER1,
815 			"Compress operations could not be allocated "
816 			"from the mempool\n");
817 		goto exit;
818 	}
819 
820 
821 	for (i = 0; i < num_bufs; i++) {
822 		ops[i]->m_src = uncomp_bufs[i];
823 		ops[i]->m_dst = comp_bufs[i];
824 		ops[i]->src.offset = 0;
825 		ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
826 		ops[i]->dst.offset = 0;
827 		if (state == RTE_COMP_OP_STATELESS) {
828 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
829 		} else {
830 			RTE_LOG(ERR, USER1,
831 				"Stateful operations are not supported "
832 				"in these tests yet\n");
833 			goto exit;
834 		}
835 		ops[i]->input_chksum = 0;
836 		/*
837 		 * Store original operation index in private data,
838 		 * since ordering does not have to be maintained,
839 		 * when dequeueing from compressdev, so a comparison
840 		 * at the end of the test can be done.
841 		 */
842 		priv_data = (struct priv_op_data *) (ops[i] + 1);
843 		priv_data->orig_idx = i;
844 	}
845 
846 	/* Compress data (either with Zlib API or compressdev API */
847 	if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
848 		for (i = 0; i < num_bufs; i++) {
849 			const struct rte_comp_xform *compress_xform =
850 				compress_xforms[i % num_xforms];
851 			ret = compress_zlib(ops[i], compress_xform,
852 					DEFAULT_MEM_LEVEL);
853 			if (ret < 0)
854 				goto exit;
855 
856 			ops_processed[i] = ops[i];
857 		}
858 	} else {
859 		/* Create compress private xform data */
860 		for (i = 0; i < num_xforms; i++) {
861 			ret = rte_compressdev_private_xform_create(0,
862 				(const struct rte_comp_xform *)compress_xforms[i],
863 				&priv_xforms[i]);
864 			if (ret < 0) {
865 				RTE_LOG(ERR, USER1,
866 					"Compression private xform "
867 					"could not be created\n");
868 				goto exit;
869 			}
870 			num_priv_xforms++;
871 		}
872 
873 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
874 			/* Attach shareable private xform data to ops */
875 			for (i = 0; i < num_bufs; i++)
876 				ops[i]->private_xform = priv_xforms[i % num_xforms];
877 		} else {
878 			/* Create rest of the private xforms for the other ops */
879 			for (i = num_xforms; i < num_bufs; i++) {
880 				ret = rte_compressdev_private_xform_create(0,
881 					compress_xforms[i % num_xforms],
882 					&priv_xforms[i]);
883 				if (ret < 0) {
884 					RTE_LOG(ERR, USER1,
885 						"Compression private xform "
886 						"could not be created\n");
887 					goto exit;
888 				}
889 				num_priv_xforms++;
890 			}
891 
892 			/* Attach non shareable private xform data to ops */
893 			for (i = 0; i < num_bufs; i++)
894 				ops[i]->private_xform = priv_xforms[i];
895 		}
896 
897 		/* Enqueue and dequeue all operations */
898 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
899 		if (num_enqd < num_bufs) {
900 			RTE_LOG(ERR, USER1,
901 				"The operations could not be enqueued\n");
902 			goto exit;
903 		}
904 
905 		num_total_deqd = 0;
906 		do {
907 			/*
908 			 * If retrying a dequeue call, wait for 10 ms to allow
909 			 * enough time to the driver to process the operations
910 			 */
911 			if (deqd_retries != 0) {
912 				/*
913 				 * Avoid infinite loop if not all the
914 				 * operations get out of the device
915 				 */
916 				if (deqd_retries == MAX_DEQD_RETRIES) {
917 					RTE_LOG(ERR, USER1,
918 						"Not all operations could be "
919 						"dequeued\n");
920 					goto exit;
921 				}
922 				usleep(DEQUEUE_WAIT_TIME);
923 			}
924 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
925 					&ops_processed[num_total_deqd], num_bufs);
926 			num_total_deqd += num_deqd;
927 			deqd_retries++;
928 
929 		} while (num_total_deqd < num_enqd);
930 
931 		deqd_retries = 0;
932 
933 		/* Free compress private xforms */
934 		for (i = 0; i < num_priv_xforms; i++) {
935 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
936 			priv_xforms[i] = NULL;
937 		}
938 		num_priv_xforms = 0;
939 	}
940 
941 	for (i = 0; i < num_bufs; i++) {
942 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
943 		uint16_t xform_idx = priv_data->orig_idx % num_xforms;
944 		const struct rte_comp_compress_xform *compress_xform =
945 				&compress_xforms[xform_idx]->compress;
946 		enum rte_comp_huffman huffman_type =
947 			compress_xform->deflate.huffman;
948 		char engine[] = "zlib (directly, not PMD)";
949 		if (zlib_dir != ZLIB_COMPRESS || zlib_dir != ZLIB_ALL)
950 			strlcpy(engine, "PMD", sizeof(engine));
951 
952 		RTE_LOG(DEBUG, USER1, "Buffer %u compressed by %s from %u to"
953 			" %u bytes (level = %d, huffman = %s)\n",
954 			buf_idx[priv_data->orig_idx], engine,
955 			ops_processed[i]->consumed, ops_processed[i]->produced,
956 			compress_xform->level,
957 			huffman_type_strings[huffman_type]);
958 		RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f\n",
959 			ops_processed[i]->consumed == 0 ? 0 :
960 			(float)ops_processed[i]->produced /
961 			ops_processed[i]->consumed * 100);
962 		if (compress_xform->chksum != RTE_COMP_CHECKSUM_NONE)
963 			compress_checksum[i] = ops_processed[i]->output_chksum;
964 		ops[i] = NULL;
965 	}
966 
967 	/*
968 	 * Check operation status and free source mbufs (destination mbuf and
969 	 * compress operation information is needed for the decompression stage)
970 	 */
971 	for (i = 0; i < num_bufs; i++) {
972 		if (out_of_space && oos_zlib_decompress) {
973 			if (ops_processed[i]->status !=
974 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
975 				ret_status = -1;
976 
977 				RTE_LOG(ERR, USER1,
978 					"Operation without expected out of "
979 					"space status error\n");
980 				goto exit;
981 			} else
982 				continue;
983 		}
984 
985 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
986 			RTE_LOG(ERR, USER1,
987 				"Some operations were not successful\n");
988 			goto exit;
989 		}
990 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
991 		rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
992 		uncomp_bufs[priv_data->orig_idx] = NULL;
993 	}
994 
995 	if (out_of_space && oos_zlib_decompress) {
996 		ret_status = 0;
997 		goto exit;
998 	}
999 
1000 	/* Allocate buffers for decompressed data */
1001 	ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
1002 	if (ret < 0) {
1003 		RTE_LOG(ERR, USER1,
1004 			"Destination mbufs could not be allocated "
1005 			"from the mempool\n");
1006 		goto exit;
1007 	}
1008 
1009 	if (buff_type == SGL_BOTH || buff_type == LB_TO_SGL) {
1010 		for (i = 0; i < num_bufs; i++) {
1011 			priv_data = (struct priv_op_data *)
1012 					(ops_processed[i] + 1);
1013 			if (out_of_space == 1 && oos_zlib_compress)
1014 				data_size = OUT_OF_SPACE_BUF;
1015 			else
1016 				data_size =
1017 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1018 
1019 			if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
1020 					data_size,
1021 					ts_params->small_mbuf_pool,
1022 					ts_params->large_mbuf_pool,
1023 					MAX_SEGS) < 0)
1024 				goto exit;
1025 		}
1026 
1027 	} else {
1028 		for (i = 0; i < num_bufs; i++) {
1029 			priv_data = (struct priv_op_data *)
1030 					(ops_processed[i] + 1);
1031 			if (out_of_space == 1 && oos_zlib_compress)
1032 				data_size = OUT_OF_SPACE_BUF;
1033 			else
1034 				data_size =
1035 				strlen(test_bufs[priv_data->orig_idx]) + 1;
1036 
1037 			rte_pktmbuf_append(uncomp_bufs[i], data_size);
1038 		}
1039 	}
1040 
1041 	/* Build the decompression operations */
1042 	ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
1043 	if (ret < 0) {
1044 		RTE_LOG(ERR, USER1,
1045 			"Decompress operations could not be allocated "
1046 			"from the mempool\n");
1047 		goto exit;
1048 	}
1049 
1050 	/* Source buffer is the compressed data from the previous operations */
1051 	for (i = 0; i < num_bufs; i++) {
1052 		ops[i]->m_src = ops_processed[i]->m_dst;
1053 		ops[i]->m_dst = uncomp_bufs[i];
1054 		ops[i]->src.offset = 0;
1055 		/*
1056 		 * Set the length of the compressed data to the
1057 		 * number of bytes that were produced in the previous stage
1058 		 */
1059 		ops[i]->src.length = ops_processed[i]->produced;
1060 		ops[i]->dst.offset = 0;
1061 		if (state == RTE_COMP_OP_STATELESS) {
1062 			ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
1063 		} else {
1064 			RTE_LOG(ERR, USER1,
1065 				"Stateful operations are not supported "
1066 				"in these tests yet\n");
1067 			goto exit;
1068 		}
1069 		ops[i]->input_chksum = 0;
1070 		/*
1071 		 * Copy private data from previous operations,
1072 		 * to keep the pointer to the original buffer
1073 		 */
1074 		memcpy(ops[i] + 1, ops_processed[i] + 1,
1075 				sizeof(struct priv_op_data));
1076 	}
1077 
1078 	/*
1079 	 * Free the previous compress operations,
1080 	 * as they are not needed anymore
1081 	 */
1082 	rte_comp_op_bulk_free(ops_processed, num_bufs);
1083 
1084 	/* Decompress data (either with Zlib API or compressdev API */
1085 	if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
1086 		for (i = 0; i < num_bufs; i++) {
1087 			priv_data = (struct priv_op_data *)(ops[i] + 1);
1088 			uint16_t xform_idx = priv_data->orig_idx % num_xforms;
1089 			const struct rte_comp_xform *decompress_xform =
1090 				decompress_xforms[xform_idx];
1091 
1092 			ret = decompress_zlib(ops[i], decompress_xform);
1093 			if (ret < 0)
1094 				goto exit;
1095 
1096 			ops_processed[i] = ops[i];
1097 		}
1098 	} else {
1099 		/* Create decompress private xform data */
1100 		for (i = 0; i < num_xforms; i++) {
1101 			ret = rte_compressdev_private_xform_create(0,
1102 				(const struct rte_comp_xform *)decompress_xforms[i],
1103 				&priv_xforms[i]);
1104 			if (ret < 0) {
1105 				RTE_LOG(ERR, USER1,
1106 					"Decompression private xform "
1107 					"could not be created\n");
1108 				goto exit;
1109 			}
1110 			num_priv_xforms++;
1111 		}
1112 
1113 		if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
1114 			/* Attach shareable private xform data to ops */
1115 			for (i = 0; i < num_bufs; i++) {
1116 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1117 				uint16_t xform_idx = priv_data->orig_idx %
1118 								num_xforms;
1119 				ops[i]->private_xform = priv_xforms[xform_idx];
1120 			}
1121 		} else {
1122 			/* Create rest of the private xforms for the other ops */
1123 			for (i = num_xforms; i < num_bufs; i++) {
1124 				ret = rte_compressdev_private_xform_create(0,
1125 					decompress_xforms[i % num_xforms],
1126 					&priv_xforms[i]);
1127 				if (ret < 0) {
1128 					RTE_LOG(ERR, USER1,
1129 						"Decompression private xform "
1130 						"could not be created\n");
1131 					goto exit;
1132 				}
1133 				num_priv_xforms++;
1134 			}
1135 
1136 			/* Attach non shareable private xform data to ops */
1137 			for (i = 0; i < num_bufs; i++) {
1138 				priv_data = (struct priv_op_data *)(ops[i] + 1);
1139 				uint16_t xform_idx = priv_data->orig_idx;
1140 				ops[i]->private_xform = priv_xforms[xform_idx];
1141 			}
1142 		}
1143 
1144 		/* Enqueue and dequeue all operations */
1145 		num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1146 		if (num_enqd < num_bufs) {
1147 			RTE_LOG(ERR, USER1,
1148 				"The operations could not be enqueued\n");
1149 			goto exit;
1150 		}
1151 
1152 		num_total_deqd = 0;
1153 		do {
1154 			/*
1155 			 * If retrying a dequeue call, wait for 10 ms to allow
1156 			 * enough time to the driver to process the operations
1157 			 */
1158 			if (deqd_retries != 0) {
1159 				/*
1160 				 * Avoid infinite loop if not all the
1161 				 * operations get out of the device
1162 				 */
1163 				if (deqd_retries == MAX_DEQD_RETRIES) {
1164 					RTE_LOG(ERR, USER1,
1165 						"Not all operations could be "
1166 						"dequeued\n");
1167 					goto exit;
1168 				}
1169 				usleep(DEQUEUE_WAIT_TIME);
1170 			}
1171 			num_deqd = rte_compressdev_dequeue_burst(0, 0,
1172 					&ops_processed[num_total_deqd], num_bufs);
1173 			num_total_deqd += num_deqd;
1174 			deqd_retries++;
1175 		} while (num_total_deqd < num_enqd);
1176 
1177 		deqd_retries = 0;
1178 	}
1179 
1180 	for (i = 0; i < num_bufs; i++) {
1181 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1182 		char engine[] = "zlib, (directly, no PMD)";
1183 		if (zlib_dir != ZLIB_DECOMPRESS || zlib_dir != ZLIB_ALL)
1184 			strlcpy(engine, "pmd", sizeof(engine));
1185 		RTE_LOG(DEBUG, USER1,
1186 			"Buffer %u decompressed by %s from %u to %u bytes\n",
1187 			buf_idx[priv_data->orig_idx], engine,
1188 			ops_processed[i]->consumed, ops_processed[i]->produced);
1189 		ops[i] = NULL;
1190 	}
1191 
1192 	/*
1193 	 * Check operation status and free source mbuf (destination mbuf and
1194 	 * compress operation information is still needed)
1195 	 */
1196 	for (i = 0; i < num_bufs; i++) {
1197 		if (out_of_space && oos_zlib_compress) {
1198 			if (ops_processed[i]->status !=
1199 					RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED) {
1200 				ret_status = -1;
1201 
1202 				RTE_LOG(ERR, USER1,
1203 					"Operation without expected out of "
1204 					"space status error\n");
1205 				goto exit;
1206 			} else
1207 				continue;
1208 		}
1209 
1210 		if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1211 			RTE_LOG(ERR, USER1,
1212 				"Some operations were not successful\n");
1213 			goto exit;
1214 		}
1215 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1216 		rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1217 		comp_bufs[priv_data->orig_idx] = NULL;
1218 	}
1219 
1220 	if (out_of_space && oos_zlib_compress) {
1221 		ret_status = 0;
1222 		goto exit;
1223 	}
1224 
1225 	/*
1226 	 * Compare the original stream with the decompressed stream
1227 	 * (in size and the data)
1228 	 */
1229 	for (i = 0; i < num_bufs; i++) {
1230 		priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1231 		const char *buf1 = test_bufs[priv_data->orig_idx];
1232 		const char *buf2;
1233 		contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1234 		if (contig_buf == NULL) {
1235 			RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1236 					"be allocated\n");
1237 			goto exit;
1238 		}
1239 
1240 		buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1241 				ops_processed[i]->produced, contig_buf);
1242 		if (compare_buffers(buf1, strlen(buf1) + 1,
1243 				buf2, ops_processed[i]->produced) < 0)
1244 			goto exit;
1245 
1246 		/* Test checksums */
1247 		if (compress_xforms[0]->compress.chksum !=
1248 				RTE_COMP_CHECKSUM_NONE) {
1249 			if (ops_processed[i]->output_chksum !=
1250 					compress_checksum[i]) {
1251 				RTE_LOG(ERR, USER1, "The checksums differ\n"
1252 			"Compression Checksum: %" PRIu64 "\tDecompression "
1253 			"Checksum: %" PRIu64 "\n", compress_checksum[i],
1254 			ops_processed[i]->output_chksum);
1255 				goto exit;
1256 			}
1257 		}
1258 
1259 		rte_free(contig_buf);
1260 		contig_buf = NULL;
1261 	}
1262 
1263 	ret_status = 0;
1264 
1265 exit:
1266 	/* Free resources */
1267 	for (i = 0; i < num_bufs; i++) {
1268 		rte_pktmbuf_free(uncomp_bufs[i]);
1269 		rte_pktmbuf_free(comp_bufs[i]);
1270 		rte_comp_op_free(ops[i]);
1271 		rte_comp_op_free(ops_processed[i]);
1272 	}
1273 	for (i = 0; i < num_priv_xforms; i++) {
1274 		if (priv_xforms[i] != NULL)
1275 			rte_compressdev_private_xform_free(0, priv_xforms[i]);
1276 	}
1277 	rte_free(contig_buf);
1278 
1279 	return ret_status;
1280 }
1281 
1282 static int
1283 test_compressdev_deflate_stateless_fixed(void)
1284 {
1285 	struct comp_testsuite_params *ts_params = &testsuite_params;
1286 	uint16_t i;
1287 	int ret;
1288 	const struct rte_compressdev_capabilities *capab;
1289 
1290 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1291 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1292 
1293 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1294 		return -ENOTSUP;
1295 
1296 	struct rte_comp_xform *compress_xform =
1297 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1298 
1299 	if (compress_xform == NULL) {
1300 		RTE_LOG(ERR, USER1,
1301 			"Compress xform could not be created\n");
1302 		ret = TEST_FAILED;
1303 		goto exit;
1304 	}
1305 
1306 	memcpy(compress_xform, ts_params->def_comp_xform,
1307 			sizeof(struct rte_comp_xform));
1308 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1309 
1310 	struct interim_data_params int_data = {
1311 		NULL,
1312 		1,
1313 		NULL,
1314 		&compress_xform,
1315 		&ts_params->def_decomp_xform,
1316 		1
1317 	};
1318 
1319 	struct test_data_params test_data = {
1320 		RTE_COMP_OP_STATELESS,
1321 		LB_BOTH,
1322 		ZLIB_DECOMPRESS,
1323 		0
1324 	};
1325 
1326 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1327 		int_data.test_bufs = &compress_test_bufs[i];
1328 		int_data.buf_idx = &i;
1329 
1330 		/* Compress with compressdev, decompress with Zlib */
1331 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1332 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1333 			ret = TEST_FAILED;
1334 			goto exit;
1335 		}
1336 
1337 		/* Compress with Zlib, decompress with compressdev */
1338 		test_data.zlib_dir = ZLIB_COMPRESS;
1339 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1340 			ret = TEST_FAILED;
1341 			goto exit;
1342 		}
1343 	}
1344 
1345 	ret = TEST_SUCCESS;
1346 
1347 exit:
1348 	rte_free(compress_xform);
1349 	return ret;
1350 }
1351 
1352 static int
1353 test_compressdev_deflate_stateless_dynamic(void)
1354 {
1355 	struct comp_testsuite_params *ts_params = &testsuite_params;
1356 	uint16_t i;
1357 	int ret;
1358 	struct rte_comp_xform *compress_xform =
1359 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1360 
1361 	const struct rte_compressdev_capabilities *capab;
1362 
1363 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1364 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1365 
1366 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1367 		return -ENOTSUP;
1368 
1369 	if (compress_xform == NULL) {
1370 		RTE_LOG(ERR, USER1,
1371 			"Compress xform could not be created\n");
1372 		ret = TEST_FAILED;
1373 		goto exit;
1374 	}
1375 
1376 	memcpy(compress_xform, ts_params->def_comp_xform,
1377 			sizeof(struct rte_comp_xform));
1378 	compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1379 
1380 	struct interim_data_params int_data = {
1381 		NULL,
1382 		1,
1383 		NULL,
1384 		&compress_xform,
1385 		&ts_params->def_decomp_xform,
1386 		1
1387 	};
1388 
1389 	struct test_data_params test_data = {
1390 		RTE_COMP_OP_STATELESS,
1391 		LB_BOTH,
1392 		ZLIB_DECOMPRESS,
1393 		0
1394 	};
1395 
1396 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1397 		int_data.test_bufs = &compress_test_bufs[i];
1398 		int_data.buf_idx = &i;
1399 
1400 		/* Compress with compressdev, decompress with Zlib */
1401 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1402 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1403 			ret = TEST_FAILED;
1404 			goto exit;
1405 		}
1406 
1407 		/* Compress with Zlib, decompress with compressdev */
1408 		test_data.zlib_dir = ZLIB_COMPRESS;
1409 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1410 			ret = TEST_FAILED;
1411 			goto exit;
1412 		}
1413 	}
1414 
1415 	ret = TEST_SUCCESS;
1416 
1417 exit:
1418 	rte_free(compress_xform);
1419 	return ret;
1420 }
1421 
1422 static int
1423 test_compressdev_deflate_stateless_multi_op(void)
1424 {
1425 	struct comp_testsuite_params *ts_params = &testsuite_params;
1426 	uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1427 	uint16_t buf_idx[num_bufs];
1428 	uint16_t i;
1429 
1430 	for (i = 0; i < num_bufs; i++)
1431 		buf_idx[i] = i;
1432 
1433 	struct interim_data_params int_data = {
1434 		compress_test_bufs,
1435 		num_bufs,
1436 		buf_idx,
1437 		&ts_params->def_comp_xform,
1438 		&ts_params->def_decomp_xform,
1439 		1
1440 	};
1441 
1442 	struct test_data_params test_data = {
1443 		RTE_COMP_OP_STATELESS,
1444 		LB_BOTH,
1445 		ZLIB_DECOMPRESS,
1446 		0
1447 	};
1448 
1449 	/* Compress with compressdev, decompress with Zlib */
1450 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1451 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1452 		return TEST_FAILED;
1453 
1454 	/* Compress with Zlib, decompress with compressdev */
1455 	test_data.zlib_dir = ZLIB_COMPRESS;
1456 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1457 		return TEST_FAILED;
1458 
1459 	return TEST_SUCCESS;
1460 }
1461 
1462 static int
1463 test_compressdev_deflate_stateless_multi_level(void)
1464 {
1465 	struct comp_testsuite_params *ts_params = &testsuite_params;
1466 	unsigned int level;
1467 	uint16_t i;
1468 	int ret;
1469 	struct rte_comp_xform *compress_xform =
1470 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1471 
1472 	if (compress_xform == NULL) {
1473 		RTE_LOG(ERR, USER1,
1474 			"Compress xform could not be created\n");
1475 		ret = TEST_FAILED;
1476 		goto exit;
1477 	}
1478 
1479 	memcpy(compress_xform, ts_params->def_comp_xform,
1480 			sizeof(struct rte_comp_xform));
1481 
1482 	struct interim_data_params int_data = {
1483 		NULL,
1484 		1,
1485 		NULL,
1486 		&compress_xform,
1487 		&ts_params->def_decomp_xform,
1488 		1
1489 	};
1490 
1491 	struct test_data_params test_data = {
1492 		RTE_COMP_OP_STATELESS,
1493 		LB_BOTH,
1494 		ZLIB_DECOMPRESS,
1495 		0
1496 	};
1497 
1498 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1499 		int_data.test_bufs = &compress_test_bufs[i];
1500 		int_data.buf_idx = &i;
1501 
1502 		for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1503 				level++) {
1504 			compress_xform->compress.level = level;
1505 			/* Compress with compressdev, decompress with Zlib */
1506 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1507 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1508 				ret = TEST_FAILED;
1509 				goto exit;
1510 			}
1511 		}
1512 	}
1513 
1514 	ret = TEST_SUCCESS;
1515 
1516 exit:
1517 	rte_free(compress_xform);
1518 	return ret;
1519 }
1520 
1521 #define NUM_XFORMS 3
1522 static int
1523 test_compressdev_deflate_stateless_multi_xform(void)
1524 {
1525 	struct comp_testsuite_params *ts_params = &testsuite_params;
1526 	uint16_t num_bufs = NUM_XFORMS;
1527 	struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1528 	struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1529 	const char *test_buffers[NUM_XFORMS];
1530 	uint16_t i;
1531 	unsigned int level = RTE_COMP_LEVEL_MIN;
1532 	uint16_t buf_idx[num_bufs];
1533 
1534 	int ret;
1535 
1536 	/* Create multiple xforms with various levels */
1537 	for (i = 0; i < NUM_XFORMS; i++) {
1538 		compress_xforms[i] = rte_malloc(NULL,
1539 				sizeof(struct rte_comp_xform), 0);
1540 		if (compress_xforms[i] == NULL) {
1541 			RTE_LOG(ERR, USER1,
1542 				"Compress xform could not be created\n");
1543 			ret = TEST_FAILED;
1544 			goto exit;
1545 		}
1546 
1547 		memcpy(compress_xforms[i], ts_params->def_comp_xform,
1548 				sizeof(struct rte_comp_xform));
1549 		compress_xforms[i]->compress.level = level;
1550 		level++;
1551 
1552 		decompress_xforms[i] = rte_malloc(NULL,
1553 				sizeof(struct rte_comp_xform), 0);
1554 		if (decompress_xforms[i] == NULL) {
1555 			RTE_LOG(ERR, USER1,
1556 				"Decompress xform could not be created\n");
1557 			ret = TEST_FAILED;
1558 			goto exit;
1559 		}
1560 
1561 		memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1562 				sizeof(struct rte_comp_xform));
1563 	}
1564 
1565 	for (i = 0; i < NUM_XFORMS; i++) {
1566 		buf_idx[i] = 0;
1567 		/* Use the same buffer in all sessions */
1568 		test_buffers[i] = compress_test_bufs[0];
1569 	}
1570 
1571 	struct interim_data_params int_data = {
1572 		test_buffers,
1573 		num_bufs,
1574 		buf_idx,
1575 		compress_xforms,
1576 		decompress_xforms,
1577 		NUM_XFORMS
1578 	};
1579 
1580 	struct test_data_params test_data = {
1581 		RTE_COMP_OP_STATELESS,
1582 		LB_BOTH,
1583 		ZLIB_DECOMPRESS,
1584 		0
1585 	};
1586 
1587 	/* Compress with compressdev, decompress with Zlib */
1588 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1589 		ret = TEST_FAILED;
1590 		goto exit;
1591 	}
1592 
1593 	ret = TEST_SUCCESS;
1594 exit:
1595 	for (i = 0; i < NUM_XFORMS; i++) {
1596 		rte_free(compress_xforms[i]);
1597 		rte_free(decompress_xforms[i]);
1598 	}
1599 
1600 	return ret;
1601 }
1602 
1603 static int
1604 test_compressdev_deflate_stateless_sgl(void)
1605 {
1606 	struct comp_testsuite_params *ts_params = &testsuite_params;
1607 	uint16_t i;
1608 	const struct rte_compressdev_capabilities *capab;
1609 
1610 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1611 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1612 
1613 	if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1614 		return -ENOTSUP;
1615 
1616 	struct interim_data_params int_data = {
1617 		NULL,
1618 		1,
1619 		NULL,
1620 		&ts_params->def_comp_xform,
1621 		&ts_params->def_decomp_xform,
1622 		1
1623 	};
1624 
1625 	struct test_data_params test_data = {
1626 		RTE_COMP_OP_STATELESS,
1627 		SGL_BOTH,
1628 		ZLIB_DECOMPRESS,
1629 		0
1630 	};
1631 
1632 	for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1633 		int_data.test_bufs = &compress_test_bufs[i];
1634 		int_data.buf_idx = &i;
1635 
1636 		/* Compress with compressdev, decompress with Zlib */
1637 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1638 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1639 			return TEST_FAILED;
1640 
1641 		/* Compress with Zlib, decompress with compressdev */
1642 		test_data.zlib_dir = ZLIB_COMPRESS;
1643 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1644 			return TEST_FAILED;
1645 
1646 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_LB_OUT) {
1647 			/* Compress with compressdev, decompress with Zlib */
1648 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1649 			test_data.buff_type = SGL_TO_LB;
1650 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1651 				return TEST_FAILED;
1652 
1653 			/* Compress with Zlib, decompress with compressdev */
1654 			test_data.zlib_dir = ZLIB_COMPRESS;
1655 			test_data.buff_type = SGL_TO_LB;
1656 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1657 				return TEST_FAILED;
1658 		}
1659 
1660 		if (capab->comp_feature_flags & RTE_COMP_FF_OOP_LB_IN_SGL_OUT) {
1661 			/* Compress with compressdev, decompress with Zlib */
1662 			test_data.zlib_dir = ZLIB_DECOMPRESS;
1663 			test_data.buff_type = LB_TO_SGL;
1664 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1665 				return TEST_FAILED;
1666 
1667 			/* Compress with Zlib, decompress with compressdev */
1668 			test_data.zlib_dir = ZLIB_COMPRESS;
1669 			test_data.buff_type = LB_TO_SGL;
1670 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0)
1671 				return TEST_FAILED;
1672 		}
1673 
1674 
1675 	}
1676 
1677 	return TEST_SUCCESS;
1678 
1679 }
1680 
1681 static int
1682 test_compressdev_deflate_stateless_checksum(void)
1683 {
1684 	struct comp_testsuite_params *ts_params = &testsuite_params;
1685 	uint16_t i;
1686 	int ret;
1687 	const struct rte_compressdev_capabilities *capab;
1688 
1689 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1690 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1691 
1692 	/* Check if driver supports any checksum */
1693 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM) == 0 &&
1694 			(capab->comp_feature_flags &
1695 			RTE_COMP_FF_ADLER32_CHECKSUM) == 0 &&
1696 			(capab->comp_feature_flags &
1697 			RTE_COMP_FF_CRC32_ADLER32_CHECKSUM) == 0)
1698 		return -ENOTSUP;
1699 
1700 	struct rte_comp_xform *compress_xform =
1701 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1702 	if (compress_xform == NULL) {
1703 		RTE_LOG(ERR, USER1, "Compress xform could not be created\n");
1704 		ret = TEST_FAILED;
1705 		return ret;
1706 	}
1707 
1708 	memcpy(compress_xform, ts_params->def_comp_xform,
1709 			sizeof(struct rte_comp_xform));
1710 
1711 	struct rte_comp_xform *decompress_xform =
1712 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1713 	if (decompress_xform == NULL) {
1714 		RTE_LOG(ERR, USER1, "Decompress xform could not be created\n");
1715 		rte_free(compress_xform);
1716 		ret = TEST_FAILED;
1717 		return ret;
1718 	}
1719 
1720 	memcpy(decompress_xform, ts_params->def_decomp_xform,
1721 			sizeof(struct rte_comp_xform));
1722 
1723 	struct interim_data_params int_data = {
1724 		NULL,
1725 		1,
1726 		NULL,
1727 		&compress_xform,
1728 		&decompress_xform,
1729 		1
1730 	};
1731 
1732 	struct test_data_params test_data = {
1733 		RTE_COMP_OP_STATELESS,
1734 		LB_BOTH,
1735 		ZLIB_DECOMPRESS,
1736 		0
1737 	};
1738 
1739 	/* Check if driver supports crc32 checksum and test */
1740 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_CHECKSUM)) {
1741 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_CRC32;
1742 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_CRC32;
1743 
1744 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1745 			/* Compress with compressdev, decompress with Zlib */
1746 			int_data.test_bufs = &compress_test_bufs[i];
1747 			int_data.buf_idx = &i;
1748 
1749 			/* Generate zlib checksum and test against selected
1750 			 * drivers decompression checksum
1751 			 */
1752 			test_data.zlib_dir = ZLIB_COMPRESS;
1753 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1754 				ret = TEST_FAILED;
1755 				goto exit;
1756 			}
1757 
1758 			/* Generate compression and decompression
1759 			 * checksum of selected driver
1760 			 */
1761 			test_data.zlib_dir = ZLIB_NONE;
1762 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1763 				ret = TEST_FAILED;
1764 				goto exit;
1765 			}
1766 		}
1767 	}
1768 
1769 	/* Check if driver supports adler32 checksum and test */
1770 	if ((capab->comp_feature_flags & RTE_COMP_FF_ADLER32_CHECKSUM)) {
1771 		compress_xform->compress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1772 		decompress_xform->decompress.chksum = RTE_COMP_CHECKSUM_ADLER32;
1773 
1774 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1775 			int_data.test_bufs = &compress_test_bufs[i];
1776 			int_data.buf_idx = &i;
1777 
1778 			/* Generate zlib checksum and test against selected
1779 			 * drivers decompression checksum
1780 			 */
1781 			test_data.zlib_dir = ZLIB_COMPRESS;
1782 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1783 				ret = TEST_FAILED;
1784 				goto exit;
1785 			}
1786 			/* Generate compression and decompression
1787 			 * checksum of selected driver
1788 			 */
1789 			test_data.zlib_dir = ZLIB_NONE;
1790 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1791 				ret = TEST_FAILED;
1792 				goto exit;
1793 			}
1794 		}
1795 	}
1796 
1797 	/* Check if driver supports combined crc and adler checksum and test */
1798 	if ((capab->comp_feature_flags & RTE_COMP_FF_CRC32_ADLER32_CHECKSUM)) {
1799 		compress_xform->compress.chksum =
1800 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1801 		decompress_xform->decompress.chksum =
1802 				RTE_COMP_CHECKSUM_CRC32_ADLER32;
1803 
1804 		for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1805 			int_data.test_bufs = &compress_test_bufs[i];
1806 			int_data.buf_idx = &i;
1807 
1808 			/* Generate compression and decompression
1809 			 * checksum of selected driver
1810 			 */
1811 			test_data.zlib_dir = ZLIB_NONE;
1812 			if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1813 				ret = TEST_FAILED;
1814 				goto exit;
1815 			}
1816 		}
1817 	}
1818 
1819 	ret = TEST_SUCCESS;
1820 
1821 exit:
1822 	rte_free(compress_xform);
1823 	rte_free(decompress_xform);
1824 	return ret;
1825 }
1826 
1827 static int
1828 test_compressdev_out_of_space_buffer(void)
1829 {
1830 	struct comp_testsuite_params *ts_params = &testsuite_params;
1831 	int ret;
1832 	uint16_t i;
1833 	const struct rte_compressdev_capabilities *capab;
1834 
1835 	RTE_LOG(INFO, USER1, "This is a negative test errors are expected\n");
1836 
1837 	capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1838 	TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1839 
1840 	if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1841 		return -ENOTSUP;
1842 
1843 	struct rte_comp_xform *compress_xform =
1844 			rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1845 
1846 	if (compress_xform == NULL) {
1847 		RTE_LOG(ERR, USER1,
1848 			"Compress xform could not be created\n");
1849 		ret = TEST_FAILED;
1850 		goto exit;
1851 	}
1852 
1853 	struct interim_data_params int_data = {
1854 		&compress_test_bufs[0],
1855 		1,
1856 		&i,
1857 		&ts_params->def_comp_xform,
1858 		&ts_params->def_decomp_xform,
1859 		1
1860 	};
1861 
1862 	struct test_data_params test_data = {
1863 		RTE_COMP_OP_STATELESS,
1864 		LB_BOTH,
1865 		ZLIB_DECOMPRESS,
1866 		1
1867 	};
1868 	/* Compress with compressdev, decompress with Zlib */
1869 	test_data.zlib_dir = ZLIB_DECOMPRESS;
1870 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1871 		ret = TEST_FAILED;
1872 		goto exit;
1873 	}
1874 
1875 	/* Compress with Zlib, decompress with compressdev */
1876 	test_data.zlib_dir = ZLIB_COMPRESS;
1877 	if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1878 		ret = TEST_FAILED;
1879 		goto exit;
1880 	}
1881 
1882 	if (capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) {
1883 		/* Compress with compressdev, decompress with Zlib */
1884 		test_data.zlib_dir = ZLIB_DECOMPRESS;
1885 		test_data.buff_type = SGL_BOTH;
1886 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1887 			ret = TEST_FAILED;
1888 			goto exit;
1889 		}
1890 
1891 		/* Compress with Zlib, decompress with compressdev */
1892 		test_data.zlib_dir = ZLIB_COMPRESS;
1893 		test_data.buff_type = SGL_BOTH;
1894 		if (test_deflate_comp_decomp(&int_data, &test_data) < 0) {
1895 			ret = TEST_FAILED;
1896 			goto exit;
1897 		}
1898 	}
1899 
1900 	ret  = TEST_SUCCESS;
1901 
1902 exit:
1903 	rte_free(compress_xform);
1904 	return ret;
1905 }
1906 
1907 
1908 static struct unit_test_suite compressdev_testsuite  = {
1909 	.suite_name = "compressdev unit test suite",
1910 	.setup = testsuite_setup,
1911 	.teardown = testsuite_teardown,
1912 	.unit_test_cases = {
1913 		TEST_CASE_ST(NULL, NULL,
1914 			test_compressdev_invalid_configuration),
1915 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1916 			test_compressdev_deflate_stateless_fixed),
1917 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1918 			test_compressdev_deflate_stateless_dynamic),
1919 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1920 			test_compressdev_deflate_stateless_multi_op),
1921 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1922 			test_compressdev_deflate_stateless_multi_level),
1923 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1924 			test_compressdev_deflate_stateless_multi_xform),
1925 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1926 			test_compressdev_deflate_stateless_sgl),
1927 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1928 			test_compressdev_deflate_stateless_checksum),
1929 		TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1930 			test_compressdev_out_of_space_buffer),
1931 		TEST_CASES_END() /**< NULL terminate unit test array */
1932 	}
1933 };
1934 
1935 static int
1936 test_compressdev(void)
1937 {
1938 	return unit_test_suite_runner(&compressdev_testsuite);
1939 }
1940 
1941 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
1942