xref: /dpdk/drivers/compress/zlib/zlib_pmd.c (revision 3d26a70ae33853ac4116b135e55f3d475f148939)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium Networks
3  */
4 
5 #include <rte_bus_vdev.h>
6 #include <rte_common.h>
7 
8 #include "zlib_pmd_private.h"
9 
10 int zlib_logtype_driver;
11 
12 /** Compute next mbuf in the list, assign data buffer and length,
13  *  returns 0 if mbuf is NULL
14  */
15 #define COMPUTE_BUF(mbuf, data, len)		\
16 		((mbuf = mbuf->next) ?		\
17 		(data = rte_pktmbuf_mtod(mbuf, uint8_t *)),	\
18 		(len = rte_pktmbuf_data_len(mbuf)) : 0)
19 
20 static void
21 process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
22 {
23 	int ret, flush, fin_flush;
24 	struct rte_mbuf *mbuf_src = op->m_src;
25 	struct rte_mbuf *mbuf_dst = op->m_dst;
26 
27 	switch (op->flush_flag) {
28 	case RTE_COMP_FLUSH_FULL:
29 	case RTE_COMP_FLUSH_FINAL:
30 		fin_flush = Z_FINISH;
31 		break;
32 	default:
33 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
34 		ZLIB_PMD_ERR("Invalid flush value\n");
35 		return;
36 	}
37 
38 	if (unlikely(!strm)) {
39 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
40 		ZLIB_PMD_ERR("Invalid z_stream\n");
41 		return;
42 	}
43 	/* Update z_stream with the inputs provided by application */
44 	strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
45 			op->src.offset);
46 
47 	strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
48 
49 	strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
50 			op->dst.offset);
51 
52 	strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
53 
54 	/* Set flush value to NO_FLUSH unless it is last mbuf */
55 	flush = Z_NO_FLUSH;
56 	/* Initialize status to SUCCESS */
57 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
58 
59 	do {
60 		/* Set flush value to Z_FINISH for last block */
61 		if ((op->src.length - strm->total_in) <= strm->avail_in) {
62 			strm->avail_in = (op->src.length - strm->total_in);
63 			flush = fin_flush;
64 		}
65 		do {
66 			ret = deflate(strm, flush);
67 			if (unlikely(ret == Z_STREAM_ERROR)) {
68 				/* error return, do not process further */
69 				op->status =  RTE_COMP_OP_STATUS_ERROR;
70 				goto def_end;
71 			}
72 			/* Break if Z_STREAM_END is encountered */
73 			if (ret == Z_STREAM_END)
74 				goto def_end;
75 
76 		/* Keep looping until input mbuf is consumed.
77 		 * Exit if destination mbuf gets exhausted.
78 		 */
79 		} while ((strm->avail_out == 0) &&
80 			COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
81 
82 		if (!strm->avail_out) {
83 			/* there is no space for compressed output */
84 			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
85 			break;
86 		}
87 
88 	/* Update source buffer to next mbuf
89 	 * Exit if input buffers are fully consumed
90 	 */
91 	} while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
92 
93 def_end:
94 	/* Update op stats */
95 	switch (op->status) {
96 	case RTE_COMP_OP_STATUS_SUCCESS:
97 		op->consumed += strm->total_in;
98 	/* Fall-through */
99 	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
100 		op->produced += strm->total_out;
101 		break;
102 	default:
103 		ZLIB_PMD_ERR("stats not updated for status:%d\n",
104 				op->status);
105 	}
106 
107 	deflateReset(strm);
108 }
109 
110 static void
111 process_zlib_inflate(struct rte_comp_op *op, z_stream *strm)
112 {
113 	int ret, flush;
114 	struct rte_mbuf *mbuf_src = op->m_src;
115 	struct rte_mbuf *mbuf_dst = op->m_dst;
116 
117 	if (unlikely(!strm)) {
118 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
119 		ZLIB_PMD_ERR("Invalid z_stream\n");
120 		return;
121 	}
122 	strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
123 			op->src.offset);
124 
125 	strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
126 
127 	strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
128 			op->dst.offset);
129 
130 	strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
131 
132 	/** Ignoring flush value provided from application for decompression */
133 	flush = Z_NO_FLUSH;
134 	/* initialize status to SUCCESS */
135 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
136 
137 	do {
138 		do {
139 			ret = inflate(strm, flush);
140 
141 			switch (ret) {
142 			/* Fall-through */
143 			case Z_NEED_DICT:
144 				ret = Z_DATA_ERROR;
145 			/* Fall-through */
146 			case Z_DATA_ERROR:
147 			/* Fall-through */
148 			case Z_MEM_ERROR:
149 			/* Fall-through */
150 			case Z_STREAM_ERROR:
151 				op->status = RTE_COMP_OP_STATUS_ERROR;
152 			/* Fall-through */
153 			case Z_STREAM_END:
154 				/* no further computation needed if
155 				 * Z_STREAM_END is encountered
156 				 */
157 				goto inf_end;
158 			default:
159 				/* success */
160 				break;
161 
162 			}
163 		/* Keep looping until input mbuf is consumed.
164 		 * Exit if destination mbuf gets exhausted.
165 		 */
166 		} while ((strm->avail_out == 0) &&
167 			COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
168 
169 		if (!strm->avail_out) {
170 			/* there is no more space for decompressed output */
171 			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
172 			break;
173 		}
174 	/* Read next input buffer to be processed, exit if compressed
175 	 * blocks are fully read
176 	 */
177 	} while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
178 
179 inf_end:
180 	/* Update op stats */
181 	switch (op->status) {
182 	case RTE_COMP_OP_STATUS_SUCCESS:
183 		op->consumed += strm->total_in;
184 	/* Fall-through */
185 	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
186 		op->produced += strm->total_out;
187 		break;
188 	default:
189 		ZLIB_PMD_ERR("stats not produced for status:%d\n",
190 				op->status);
191 	}
192 
193 	inflateReset(strm);
194 }
195 
196 /** Process comp operation for mbuf */
197 static inline int
198 process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op)
199 {
200 	struct zlib_stream *stream;
201 	struct zlib_priv_xform *private_xform;
202 
203 	if ((op->op_type == RTE_COMP_OP_STATEFUL) ||
204 			(op->src.offset > rte_pktmbuf_data_len(op->m_src)) ||
205 			(op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) {
206 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
207 		ZLIB_PMD_ERR("Invalid source or destination buffers or "
208 			     "invalid Operation requested\n");
209 	} else {
210 		private_xform = (struct zlib_priv_xform *)op->private_xform;
211 		stream = &private_xform->stream;
212 		stream->comp(op, &stream->strm);
213 	}
214 	/* whatever is out of op, put it into completion queue with
215 	 * its status
216 	 */
217 	return rte_ring_enqueue(qp->processed_pkts, (void *)op);
218 }
219 
220 /** Parse comp xform and set private xform/Stream parameters */
221 int
222 zlib_set_stream_parameters(const struct rte_comp_xform *xform,
223 		struct zlib_stream *stream)
224 {
225 	int strategy, level, wbits;
226 	z_stream *strm = &stream->strm;
227 
228 	/* allocate deflate state */
229 	strm->zalloc = Z_NULL;
230 	strm->zfree = Z_NULL;
231 	strm->opaque = Z_NULL;
232 
233 	switch (xform->type) {
234 	case RTE_COMP_COMPRESS:
235 		stream->comp = process_zlib_deflate;
236 		stream->free = deflateEnd;
237 		/** Compression window bits */
238 		switch (xform->compress.algo) {
239 		case RTE_COMP_ALGO_DEFLATE:
240 			wbits = -(xform->compress.window_size);
241 			break;
242 		default:
243 			ZLIB_PMD_ERR("Compression algorithm not supported\n");
244 			return -1;
245 		}
246 		/** Compression Level */
247 		switch (xform->compress.level) {
248 		case RTE_COMP_LEVEL_PMD_DEFAULT:
249 			level = Z_DEFAULT_COMPRESSION;
250 			break;
251 		case RTE_COMP_LEVEL_NONE:
252 			level = Z_NO_COMPRESSION;
253 			break;
254 		case RTE_COMP_LEVEL_MIN:
255 			level = Z_BEST_SPEED;
256 			break;
257 		case RTE_COMP_LEVEL_MAX:
258 			level = Z_BEST_COMPRESSION;
259 			break;
260 		default:
261 			level = xform->compress.level;
262 			if (level < RTE_COMP_LEVEL_MIN ||
263 					level > RTE_COMP_LEVEL_MAX) {
264 				ZLIB_PMD_ERR("Compression level %d "
265 						"not supported\n",
266 						level);
267 				return -1;
268 			}
269 			break;
270 		}
271 		/** Compression strategy */
272 		switch (xform->compress.deflate.huffman) {
273 		case RTE_COMP_HUFFMAN_DEFAULT:
274 			strategy = Z_DEFAULT_STRATEGY;
275 			break;
276 		case RTE_COMP_HUFFMAN_FIXED:
277 			strategy = Z_FIXED;
278 			break;
279 		case RTE_COMP_HUFFMAN_DYNAMIC:
280 			strategy = Z_DEFAULT_STRATEGY;
281 			break;
282 		default:
283 			ZLIB_PMD_ERR("Compression strategy not supported\n");
284 			return -1;
285 		}
286 		if (deflateInit2(strm, level,
287 					Z_DEFLATED, wbits,
288 					DEF_MEM_LEVEL, strategy) != Z_OK) {
289 			ZLIB_PMD_ERR("Deflate init failed\n");
290 			return -1;
291 		}
292 		break;
293 
294 	case RTE_COMP_DECOMPRESS:
295 		stream->comp = process_zlib_inflate;
296 		stream->free = inflateEnd;
297 		/** window bits */
298 		switch (xform->decompress.algo) {
299 		case RTE_COMP_ALGO_DEFLATE:
300 			wbits = -(xform->decompress.window_size);
301 			break;
302 		default:
303 			ZLIB_PMD_ERR("Compression algorithm not supported\n");
304 			return -1;
305 		}
306 
307 		if (inflateInit2(strm, wbits) != Z_OK) {
308 			ZLIB_PMD_ERR("Inflate init failed\n");
309 			return -1;
310 		}
311 		break;
312 	default:
313 		return -1;
314 	}
315 	return 0;
316 }
317 
318 static uint16_t
319 zlib_pmd_enqueue_burst(void *queue_pair,
320 			struct rte_comp_op **ops, uint16_t nb_ops)
321 {
322 	struct zlib_qp *qp = queue_pair;
323 	int ret;
324 	uint16_t i;
325 	uint16_t enqd = 0;
326 	for (i = 0; i < nb_ops; i++) {
327 		ret = process_zlib_op(qp, ops[i]);
328 		if (unlikely(ret < 0)) {
329 			/* increment count if failed to push to completion
330 			 * queue
331 			 */
332 			qp->qp_stats.enqueue_err_count++;
333 		} else {
334 			qp->qp_stats.enqueued_count++;
335 			enqd++;
336 		}
337 	}
338 	return enqd;
339 }
340 
341 static uint16_t
342 zlib_pmd_dequeue_burst(void *queue_pair,
343 			struct rte_comp_op **ops, uint16_t nb_ops)
344 {
345 	struct zlib_qp *qp = queue_pair;
346 
347 	unsigned int nb_dequeued = 0;
348 
349 	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
350 			(void **)ops, nb_ops, NULL);
351 	qp->qp_stats.dequeued_count += nb_dequeued;
352 
353 	return nb_dequeued;
354 }
355 
356 static int
357 zlib_create(const char *name,
358 		struct rte_vdev_device *vdev,
359 		struct rte_compressdev_pmd_init_params *init_params)
360 {
361 	struct rte_compressdev *dev;
362 
363 	dev = rte_compressdev_pmd_create(name, &vdev->device,
364 			sizeof(struct zlib_private), init_params);
365 	if (dev == NULL) {
366 		ZLIB_PMD_ERR("driver %s: create failed", init_params->name);
367 		return -ENODEV;
368 	}
369 
370 	dev->dev_ops = rte_zlib_pmd_ops;
371 
372 	/* register rx/tx burst functions for data path */
373 	dev->dequeue_burst = zlib_pmd_dequeue_burst;
374 	dev->enqueue_burst = zlib_pmd_enqueue_burst;
375 
376 	return 0;
377 }
378 
379 static int
380 zlib_probe(struct rte_vdev_device *vdev)
381 {
382 	struct rte_compressdev_pmd_init_params init_params = {
383 		"",
384 		rte_socket_id()
385 	};
386 	const char *name;
387 	const char *input_args;
388 	int retval;
389 
390 	name = rte_vdev_device_name(vdev);
391 
392 	if (name == NULL)
393 		return -EINVAL;
394 
395 	input_args = rte_vdev_device_args(vdev);
396 
397 	retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args);
398 	if (retval < 0) {
399 		ZLIB_PMD_LOG(ERR,
400 			"Failed to parse initialisation arguments[%s]\n",
401 			input_args);
402 		return -EINVAL;
403 	}
404 
405 	return zlib_create(name, vdev, &init_params);
406 }
407 
408 static int
409 zlib_remove(struct rte_vdev_device *vdev)
410 {
411 	struct rte_compressdev *compressdev;
412 	const char *name;
413 
414 	name = rte_vdev_device_name(vdev);
415 	if (name == NULL)
416 		return -EINVAL;
417 
418 	compressdev = rte_compressdev_pmd_get_named_dev(name);
419 	if (compressdev == NULL)
420 		return -ENODEV;
421 
422 	return rte_compressdev_pmd_destroy(compressdev);
423 }
424 
425 static struct rte_vdev_driver zlib_pmd_drv = {
426 	.probe = zlib_probe,
427 	.remove = zlib_remove
428 };
429 
430 RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv);
431 
432 RTE_INIT(zlib_init_log)
433 {
434 	zlib_logtype_driver = rte_log_register("pmd.compress.zlib");
435 	if (zlib_logtype_driver >= 0)
436 		rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO);
437 }
438