xref: /dpdk/drivers/compress/zlib/zlib_pmd.c (revision 25d11a86c56d50947af33d0b79ede622809bd8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium Networks
3  */
4 
5 #include <rte_bus_vdev.h>
6 #include <rte_common.h>
7 
8 #include "zlib_pmd_private.h"
9 
10 /** Compute next mbuf in the list, assign data buffer and length,
11  *  returns 0 if mbuf is NULL
12  */
13 #define COMPUTE_BUF(mbuf, data, len)		\
14 		((mbuf = mbuf->next) ?		\
15 		(data = rte_pktmbuf_mtod(mbuf, uint8_t *)),	\
16 		(len = rte_pktmbuf_data_len(mbuf)) : 0)
17 
18 static void
19 process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
20 {
21 	int ret, flush, fin_flush;
22 	struct rte_mbuf *mbuf_src = op->m_src;
23 	struct rte_mbuf *mbuf_dst = op->m_dst;
24 
25 	switch (op->flush_flag) {
26 	case RTE_COMP_FLUSH_FULL:
27 	case RTE_COMP_FLUSH_FINAL:
28 		fin_flush = Z_FINISH;
29 		break;
30 	default:
31 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
32 		ZLIB_PMD_ERR("Invalid flush value\n");
33 	}
34 
35 	if (unlikely(!strm)) {
36 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
37 		ZLIB_PMD_ERR("Invalid z_stream\n");
38 		return;
39 	}
40 	/* Update z_stream with the inputs provided by application */
41 	strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
42 			op->src.offset);
43 
44 	strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
45 
46 	strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
47 			op->dst.offset);
48 
49 	strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
50 
51 	/* Set flush value to NO_FLUSH unless it is last mbuf */
52 	flush = Z_NO_FLUSH;
53 	/* Initialize status to SUCCESS */
54 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
55 
56 	do {
57 		/* Set flush value to Z_FINISH for last block */
58 		if ((op->src.length - strm->total_in) <= strm->avail_in) {
59 			strm->avail_in = (op->src.length - strm->total_in);
60 			flush = fin_flush;
61 		}
62 		do {
63 			ret = deflate(strm, flush);
64 			if (unlikely(ret == Z_STREAM_ERROR)) {
65 				/* error return, do not process further */
66 				op->status =  RTE_COMP_OP_STATUS_ERROR;
67 				goto def_end;
68 			}
69 			/* Break if Z_STREAM_END is encountered */
70 			if (ret == Z_STREAM_END)
71 				goto def_end;
72 
73 		/* Keep looping until input mbuf is consumed.
74 		 * Exit if destination mbuf gets exhausted.
75 		 */
76 		} while ((strm->avail_out == 0) &&
77 			COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
78 
79 		if (!strm->avail_out) {
80 			/* there is no space for compressed output */
81 			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
82 			break;
83 		}
84 
85 	/* Update source buffer to next mbuf
86 	 * Exit if input buffers are fully consumed
87 	 */
88 	} while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
89 
90 def_end:
91 	/* Update op stats */
92 	switch (op->status) {
93 	case RTE_COMP_OP_STATUS_SUCCESS:
94 		op->consumed += strm->total_in;
95 	/* Fall-through */
96 	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
97 		op->produced += strm->total_out;
98 		break;
99 	default:
100 		ZLIB_PMD_ERR("stats not updated for status:%d\n",
101 				op->status);
102 	}
103 
104 	deflateReset(strm);
105 }
106 
107 static void
108 process_zlib_inflate(struct rte_comp_op *op, z_stream *strm)
109 {
110 	int ret, flush;
111 	struct rte_mbuf *mbuf_src = op->m_src;
112 	struct rte_mbuf *mbuf_dst = op->m_dst;
113 
114 	if (unlikely(!strm)) {
115 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
116 		ZLIB_PMD_ERR("Invalid z_stream\n");
117 		return;
118 	}
119 	strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
120 			op->src.offset);
121 
122 	strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
123 
124 	strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
125 			op->dst.offset);
126 
127 	strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
128 
129 	/** Ignoring flush value provided from application for decompression */
130 	flush = Z_NO_FLUSH;
131 	/* initialize status to SUCCESS */
132 	op->status = RTE_COMP_OP_STATUS_SUCCESS;
133 
134 	do {
135 		do {
136 			ret = inflate(strm, flush);
137 
138 			switch (ret) {
139 			/* Fall-through */
140 			case Z_NEED_DICT:
141 				ret = Z_DATA_ERROR;
142 			/* Fall-through */
143 			case Z_DATA_ERROR:
144 			/* Fall-through */
145 			case Z_MEM_ERROR:
146 			/* Fall-through */
147 			case Z_STREAM_ERROR:
148 				op->status = RTE_COMP_OP_STATUS_ERROR;
149 			/* Fall-through */
150 			case Z_STREAM_END:
151 				/* no further computation needed if
152 				 * Z_STREAM_END is encountered
153 				 */
154 				goto inf_end;
155 			default:
156 				/* success */
157 				break;
158 
159 			}
160 		/* Keep looping until input mbuf is consumed.
161 		 * Exit if destination mbuf gets exhausted.
162 		 */
163 		} while ((strm->avail_out == 0) &&
164 			COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
165 
166 		if (!strm->avail_out) {
167 			/* there is no more space for decompressed output */
168 			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
169 			break;
170 		}
171 	/* Read next input buffer to be processed, exit if compressed
172 	 * blocks are fully read
173 	 */
174 	} while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
175 
176 inf_end:
177 	/* Update op stats */
178 	switch (op->status) {
179 	case RTE_COMP_OP_STATUS_SUCCESS:
180 		op->consumed += strm->total_in;
181 	/* Fall-through */
182 	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
183 		op->produced += strm->total_out;
184 		break;
185 	default:
186 		ZLIB_PMD_ERR("stats not produced for status:%d\n",
187 				op->status);
188 	}
189 
190 	inflateReset(strm);
191 }
192 
193 /** Process comp operation for mbuf */
194 static inline int
195 process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op)
196 {
197 	struct zlib_stream *stream;
198 	struct zlib_priv_xform *private_xform;
199 
200 	if ((op->op_type == RTE_COMP_OP_STATEFUL) ||
201 			(op->src.offset > rte_pktmbuf_data_len(op->m_src)) ||
202 			(op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) {
203 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
204 		ZLIB_PMD_ERR("Invalid source or destination buffers or "
205 			     "invalid Operation requested\n");
206 	} else {
207 		private_xform = (struct zlib_priv_xform *)op->private_xform;
208 		stream = &private_xform->stream;
209 		stream->comp(op, &stream->strm);
210 	}
211 	/* whatever is out of op, put it into completion queue with
212 	 * its status
213 	 */
214 	return rte_ring_enqueue(qp->processed_pkts, (void *)op);
215 }
216 
217 /** Parse comp xform and set private xform/Stream parameters */
218 int
219 zlib_set_stream_parameters(const struct rte_comp_xform *xform,
220 		struct zlib_stream *stream)
221 {
222 	int strategy, level, wbits;
223 	z_stream *strm = &stream->strm;
224 
225 	/* allocate deflate state */
226 	strm->zalloc = Z_NULL;
227 	strm->zfree = Z_NULL;
228 	strm->opaque = Z_NULL;
229 
230 	switch (xform->type) {
231 	case RTE_COMP_COMPRESS:
232 		stream->comp = process_zlib_deflate;
233 		stream->free = deflateEnd;
234 		/** Compression window bits */
235 		switch (xform->compress.algo) {
236 		case RTE_COMP_ALGO_DEFLATE:
237 			wbits = -(xform->compress.window_size);
238 			break;
239 		default:
240 			ZLIB_PMD_ERR("Compression algorithm not supported\n");
241 			return -1;
242 		}
243 		/** Compression Level */
244 		switch (xform->compress.level) {
245 		case RTE_COMP_LEVEL_PMD_DEFAULT:
246 			level = Z_DEFAULT_COMPRESSION;
247 			break;
248 		case RTE_COMP_LEVEL_NONE:
249 			level = Z_NO_COMPRESSION;
250 			break;
251 		case RTE_COMP_LEVEL_MIN:
252 			level = Z_BEST_SPEED;
253 			break;
254 		case RTE_COMP_LEVEL_MAX:
255 			level = Z_BEST_COMPRESSION;
256 			break;
257 		default:
258 			level = xform->compress.level;
259 			if (level < RTE_COMP_LEVEL_MIN ||
260 					level > RTE_COMP_LEVEL_MAX) {
261 				ZLIB_PMD_ERR("Compression level %d "
262 						"not supported\n",
263 						level);
264 				return -1;
265 			}
266 			break;
267 		}
268 		/** Compression strategy */
269 		switch (xform->compress.deflate.huffman) {
270 		case RTE_COMP_HUFFMAN_DEFAULT:
271 			strategy = Z_DEFAULT_STRATEGY;
272 			break;
273 		case RTE_COMP_HUFFMAN_FIXED:
274 			strategy = Z_FIXED;
275 			break;
276 		case RTE_COMP_HUFFMAN_DYNAMIC:
277 			strategy = Z_DEFAULT_STRATEGY;
278 			break;
279 		default:
280 			ZLIB_PMD_ERR("Compression strategy not supported\n");
281 			return -1;
282 		}
283 		if (deflateInit2(strm, level,
284 					Z_DEFLATED, wbits,
285 					DEF_MEM_LEVEL, strategy) != Z_OK) {
286 			ZLIB_PMD_ERR("Deflate init failed\n");
287 			return -1;
288 		}
289 		break;
290 
291 	case RTE_COMP_DECOMPRESS:
292 		stream->comp = process_zlib_inflate;
293 		stream->free = inflateEnd;
294 		/** window bits */
295 		switch (xform->decompress.algo) {
296 		case RTE_COMP_ALGO_DEFLATE:
297 			wbits = -(xform->decompress.window_size);
298 			break;
299 		default:
300 			ZLIB_PMD_ERR("Compression algorithm not supported\n");
301 			return -1;
302 		}
303 
304 		if (inflateInit2(strm, wbits) != Z_OK) {
305 			ZLIB_PMD_ERR("Inflate init failed\n");
306 			return -1;
307 		}
308 		break;
309 	default:
310 		return -1;
311 	}
312 	return 0;
313 }
314 
315 static uint16_t
316 zlib_pmd_enqueue_burst(void *queue_pair,
317 			struct rte_comp_op **ops, uint16_t nb_ops)
318 {
319 	struct zlib_qp *qp = queue_pair;
320 	int ret;
321 	uint16_t i;
322 	uint16_t enqd = 0;
323 	for (i = 0; i < nb_ops; i++) {
324 		ret = process_zlib_op(qp, ops[i]);
325 		if (unlikely(ret < 0)) {
326 			/* increment count if failed to push to completion
327 			 * queue
328 			 */
329 			qp->qp_stats.enqueue_err_count++;
330 		} else {
331 			qp->qp_stats.enqueued_count++;
332 			enqd++;
333 		}
334 	}
335 	return enqd;
336 }
337 
338 static uint16_t
339 zlib_pmd_dequeue_burst(void *queue_pair,
340 			struct rte_comp_op **ops, uint16_t nb_ops)
341 {
342 	struct zlib_qp *qp = queue_pair;
343 
344 	unsigned int nb_dequeued = 0;
345 
346 	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
347 			(void **)ops, nb_ops, NULL);
348 	qp->qp_stats.dequeued_count += nb_dequeued;
349 
350 	return nb_dequeued;
351 }
352 
353 static int
354 zlib_create(const char *name,
355 		struct rte_vdev_device *vdev,
356 		struct rte_compressdev_pmd_init_params *init_params)
357 {
358 	struct rte_compressdev *dev;
359 
360 	dev = rte_compressdev_pmd_create(name, &vdev->device,
361 			sizeof(struct zlib_private), init_params);
362 	if (dev == NULL) {
363 		ZLIB_PMD_ERR("driver %s: create failed", init_params->name);
364 		return -ENODEV;
365 	}
366 
367 	dev->dev_ops = rte_zlib_pmd_ops;
368 
369 	/* register rx/tx burst functions for data path */
370 	dev->dequeue_burst = zlib_pmd_dequeue_burst;
371 	dev->enqueue_burst = zlib_pmd_enqueue_burst;
372 
373 	return 0;
374 }
375 
376 static int
377 zlib_probe(struct rte_vdev_device *vdev)
378 {
379 	struct rte_compressdev_pmd_init_params init_params = {
380 		"",
381 		rte_socket_id()
382 	};
383 	const char *name;
384 	const char *input_args;
385 	int retval;
386 
387 	name = rte_vdev_device_name(vdev);
388 
389 	if (name == NULL)
390 		return -EINVAL;
391 
392 	input_args = rte_vdev_device_args(vdev);
393 
394 	retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args);
395 	if (retval < 0) {
396 		ZLIB_PMD_LOG(ERR,
397 			"Failed to parse initialisation arguments[%s]\n",
398 			input_args);
399 		return -EINVAL;
400 	}
401 
402 	return zlib_create(name, vdev, &init_params);
403 }
404 
405 static int
406 zlib_remove(struct rte_vdev_device *vdev)
407 {
408 	struct rte_compressdev *compressdev;
409 	const char *name;
410 
411 	name = rte_vdev_device_name(vdev);
412 	if (name == NULL)
413 		return -EINVAL;
414 
415 	compressdev = rte_compressdev_pmd_get_named_dev(name);
416 	if (compressdev == NULL)
417 		return -ENODEV;
418 
419 	return rte_compressdev_pmd_destroy(compressdev);
420 }
421 
422 static struct rte_vdev_driver zlib_pmd_drv = {
423 	.probe = zlib_probe,
424 	.remove = zlib_remove
425 };
426 
427 RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv);
428 
429 RTE_INIT(zlib_init_log)
430 {
431 	zlib_logtype_driver = rte_log_register("pmd.compress.zlib");
432 	if (zlib_logtype_driver >= 0)
433 		rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO);
434 }
435