xref: /dpdk/drivers/compress/octeontx/otx_zip_pmd.c (revision f4eac3a09c51a1a2dab1f2fd3a10fe0619286a0d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <string.h>
6 
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11 
12 #include "otx_zip.h"
13 
14 static const struct rte_compressdev_capabilities
15 				octtx_zip_pmd_capabilities[] = {
16 	{	.algo = RTE_COMP_ALGO_DEFLATE,
17 		/* Deflate */
18 		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
19 					RTE_COMP_FF_HUFFMAN_DYNAMIC,
20 		/* Non sharable Priv XFORM and Stateless */
21 		.window_size = {
22 				.min = 1,
23 				.max = 14,
24 				.increment = 1
25 				/* size supported 2^1 to 2^14 */
26 		},
27 	},
28 	RTE_COMP_END_OF_CAPABILITIES_LIST()
29 };
30 
31 /*
32  * Reset session to default state for next set of stateless operation
33  */
34 static inline void
35 reset_stream(struct zip_stream *z_stream)
36 {
37 	union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
38 
39 	inst->s.bf = 1;
40 	inst->s.ef = 0;
41 }
42 
43 int
44 zip_process_op(struct rte_comp_op *op,
45 		struct zipvf_qp *qp,
46 		struct zip_stream *zstrm)
47 {
48 	union zip_inst_s *inst = zstrm->inst;
49 	volatile union zip_zres_s *zresult = NULL;
50 
51 
52 	if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
53 			(op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
54 			(op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
55 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
56 		ZIP_PMD_ERR("Segmented packet is not supported\n");
57 		return 0;
58 	}
59 
60 	zipvf_prepare_cmd_stateless(op, zstrm);
61 
62 	zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
63 	zresult->s.compcode = 0;
64 
65 #ifdef ZIP_DBG
66 	zip_dump_instruction(inst);
67 #endif
68 
69 	/* Submit zip command */
70 	zipvf_push_command(qp, (void *)inst);
71 
72 	/* Check and Process results in sync mode */
73 	do {
74 	} while (!zresult->s.compcode);
75 
76 	if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
77 		op->status = RTE_COMP_OP_STATUS_SUCCESS;
78 	} else {
79 		/* FATAL error cannot do anything */
80 		ZIP_PMD_ERR("operation failed with error code:%d\n",
81 			zresult->s.compcode);
82 		if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
83 			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
84 		else
85 			op->status = RTE_COMP_OP_STATUS_ERROR;
86 	}
87 
88 	ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
89 
90 	/* Update op stats */
91 	switch (op->status) {
92 	case RTE_COMP_OP_STATUS_SUCCESS:
93 		op->consumed = zresult->s.totalbytesread;
94 	/* Fall-through */
95 	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
96 		op->produced = zresult->s.totalbyteswritten;
97 		break;
98 	default:
99 		ZIP_PMD_ERR("stats not updated for status:%d\n",
100 				op->status);
101 		break;
102 	}
103 	/* zstream is reset irrespective of result */
104 	reset_stream(zstrm);
105 
106 	zresult->s.compcode = ZIP_COMP_E_NOTDONE;
107 	return 0;
108 }
109 
110 /** Parse xform parameters and setup a stream */
111 static int
112 zip_set_stream_parameters(struct rte_compressdev *dev,
113 			const struct rte_comp_xform *xform,
114 			struct zip_stream *z_stream)
115 {
116 	int ret;
117 	union zip_inst_s *inst;
118 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
119 	void *res;
120 
121 	/* Allocate resources required by a stream */
122 	ret = rte_mempool_get_bulk(vf->zip_mp,
123 			z_stream->bufs, MAX_BUFS_PER_STREAM);
124 	if (ret < 0)
125 		return -1;
126 
127 	/* get one command buffer from pool and set up */
128 	inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
129 	res = z_stream->bufs[RES_BUF];
130 
131 	memset(inst->u, 0, sizeof(inst->u));
132 
133 	/* set bf for only first ops of stream */
134 	inst->s.bf = 1;
135 
136 	if (xform->type == RTE_COMP_COMPRESS) {
137 		inst->s.op = ZIP_OP_E_COMP;
138 
139 		switch (xform->compress.deflate.huffman) {
140 		case RTE_COMP_HUFFMAN_DEFAULT:
141 			inst->s.cc = ZIP_CC_DEFAULT;
142 			break;
143 		case RTE_COMP_HUFFMAN_FIXED:
144 			inst->s.cc = ZIP_CC_FIXED_HUFF;
145 			break;
146 		case RTE_COMP_HUFFMAN_DYNAMIC:
147 			inst->s.cc = ZIP_CC_DYN_HUFF;
148 			break;
149 		default:
150 			ret = -1;
151 			goto err;
152 		}
153 
154 		switch (xform->compress.level) {
155 		case RTE_COMP_LEVEL_MIN:
156 			inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
157 			break;
158 		case RTE_COMP_LEVEL_MAX:
159 			inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
160 			break;
161 		case RTE_COMP_LEVEL_NONE:
162 			ZIP_PMD_ERR("Compression level not supported");
163 			ret = -1;
164 			goto err;
165 		default:
166 			/* for any value between min and max , choose
167 			 * PMD default.
168 			 */
169 			inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
170 			break;
171 		}
172 	} else if (xform->type == RTE_COMP_DECOMPRESS) {
173 		inst->s.op = ZIP_OP_E_DECOMP;
174 		/* from HRM,
175 		 * For DEFLATE decompression, [CC] must be 0x0.
176 		 * For decompression, [SS] must be 0x0
177 		 */
178 		inst->s.cc = 0;
179 		/* Speed bit should not be set for decompression */
180 		inst->s.ss = 0;
181 		/* decompression context is supported only for STATEFUL
182 		 * operations. Currently we support STATELESS ONLY so
183 		 * skip setting of ctx pointer
184 		 */
185 
186 	} else {
187 		ZIP_PMD_ERR("\nxform type not supported");
188 		ret = -1;
189 		goto err;
190 	}
191 
192 	inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
193 	inst->s.res_ptr_ctl.s.length = 0;
194 
195 	z_stream->inst = inst;
196 	z_stream->func = zip_process_op;
197 
198 	return 0;
199 
200 err:
201 	rte_mempool_put_bulk(vf->zip_mp,
202 			     (void *)&(z_stream->bufs[0]),
203 			     MAX_BUFS_PER_STREAM);
204 
205 	return ret;
206 }
207 
208 /** Configure device */
209 static int
210 zip_pmd_config(struct rte_compressdev *dev,
211 		struct rte_compressdev_config *config)
212 {
213 	int nb_streams;
214 	char res_pool[RTE_MEMZONE_NAMESIZE];
215 	struct zip_vf *vf;
216 	struct rte_mempool *zip_buf_mp;
217 
218 	if (!config || !dev)
219 		return -EIO;
220 
221 	vf = (struct zip_vf *)(dev->data->dev_private);
222 
223 	/* create pool with maximum numbers of resources
224 	 * required by streams
225 	 */
226 
227 	/* use common pool for non-shareable priv_xform and stream */
228 	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
229 
230 	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
231 		 dev->data->dev_id);
232 
233 	/** TBD Should we use the per core object cache for stream resources */
234 	zip_buf_mp = rte_mempool_create(
235 			res_pool,
236 			nb_streams * MAX_BUFS_PER_STREAM,
237 			ZIP_BUF_SIZE,
238 			0,
239 			0,
240 			NULL,
241 			NULL,
242 			NULL,
243 			NULL,
244 			SOCKET_ID_ANY,
245 			0);
246 
247 	if (zip_buf_mp == NULL) {
248 		ZIP_PMD_ERR(
249 			"Failed to create buf mempool octtx_zip_res_pool%u",
250 			dev->data->dev_id);
251 		return -1;
252 	}
253 
254 	vf->zip_mp = zip_buf_mp;
255 
256 	return 0;
257 }
258 
259 /** Start device */
260 static int
261 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
262 {
263 	return 0;
264 }
265 
266 /** Stop device */
267 static void
268 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
269 {
270 
271 }
272 
273 /** Close device */
274 static int
275 zip_pmd_close(struct rte_compressdev *dev)
276 {
277 	if (dev == NULL)
278 		return -1;
279 
280 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
281 	rte_mempool_free(vf->zip_mp);
282 
283 	return 0;
284 }
285 
286 /** Get device statistics */
287 static void
288 zip_pmd_stats_get(struct rte_compressdev *dev,
289 		struct rte_compressdev_stats *stats)
290 {
291 	int qp_id;
292 
293 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
294 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
295 
296 		stats->enqueued_count += qp->qp_stats.enqueued_count;
297 		stats->dequeued_count += qp->qp_stats.dequeued_count;
298 
299 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
300 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
301 	}
302 }
303 
304 /** Reset device statistics */
305 static void
306 zip_pmd_stats_reset(struct rte_compressdev *dev)
307 {
308 	int qp_id;
309 
310 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
311 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
312 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
313 	}
314 }
315 
316 /** Get device info */
317 static void
318 zip_pmd_info_get(struct rte_compressdev *dev,
319 		struct rte_compressdev_info *dev_info)
320 {
321 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
322 
323 	if (dev_info != NULL) {
324 		dev_info->driver_name = dev->device->driver->name;
325 		dev_info->feature_flags = dev->feature_flags;
326 		dev_info->capabilities = octtx_zip_pmd_capabilities;
327 		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
328 	}
329 }
330 
331 /** Release queue pair */
332 static int
333 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
334 {
335 	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
336 
337 	if (qp != NULL) {
338 		zipvf_q_term(qp);
339 
340 		rte_ring_free(qp->processed_pkts);
341 
342 		rte_free(qp);
343 		dev->data->queue_pairs[qp_id] = NULL;
344 	}
345 	return 0;
346 }
347 
348 /** Create a ring to place process packets on */
349 static struct rte_ring *
350 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
351 		unsigned int ring_size, int socket_id)
352 {
353 	struct rte_ring *r;
354 
355 	r = rte_ring_lookup(qp->name);
356 	if (r) {
357 		if (rte_ring_get_size(r) >= ring_size) {
358 			ZIP_PMD_INFO("Reusing existing ring %s for processed"
359 					" packets", qp->name);
360 			return r;
361 		}
362 
363 		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
364 				" packets", qp->name);
365 		return NULL;
366 	}
367 
368 	return rte_ring_create(qp->name, ring_size, socket_id,
369 						RING_F_EXACT_SZ);
370 }
371 
372 /** Setup a queue pair */
373 static int
374 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
375 		uint32_t max_inflight_ops, int socket_id)
376 {
377 	struct zipvf_qp *qp = NULL;
378 	struct zip_vf *vf;
379 	char *name;
380 	int ret;
381 
382 	if (!dev)
383 		return -1;
384 
385 	vf = (struct zip_vf *) (dev->data->dev_private);
386 
387 	/* Free memory prior to re-allocation if needed. */
388 	if (dev->data->queue_pairs[qp_id] != NULL) {
389 		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
390 		return 0;
391 	}
392 
393 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
394 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
395 		 "zip_pmd_%u_qp_%u",
396 		 dev->data->dev_id, qp_id);
397 
398 	/* Allocate the queue pair data structure. */
399 	qp = rte_zmalloc_socket(name, sizeof(*qp),
400 				RTE_CACHE_LINE_SIZE, socket_id);
401 	if (qp == NULL)
402 		return (-ENOMEM);
403 
404 	qp->name = name;
405 
406 	/* Create completion queue up to max_inflight_ops */
407 	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
408 						max_inflight_ops, socket_id);
409 	if (qp->processed_pkts == NULL)
410 		goto qp_setup_cleanup;
411 
412 	qp->id = qp_id;
413 	qp->vf = vf;
414 
415 	ret = zipvf_q_init(qp);
416 	if (ret < 0)
417 		goto qp_setup_cleanup;
418 
419 	dev->data->queue_pairs[qp_id] = qp;
420 
421 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
422 	return 0;
423 
424 qp_setup_cleanup:
425 	rte_ring_free(qp->processed_pkts);
426 	rte_free(qp);
427 	return -1;
428 }
429 
430 static int
431 zip_pmd_stream_create(struct rte_compressdev *dev,
432 		const struct rte_comp_xform *xform, void **stream)
433 {
434 	int ret;
435 	struct zip_stream *strm = NULL;
436 
437 	strm = rte_malloc(NULL,
438 			sizeof(struct zip_stream), 0);
439 
440 	if (strm == NULL)
441 		return (-ENOMEM);
442 
443 	ret = zip_set_stream_parameters(dev, xform, strm);
444 	if (ret < 0) {
445 		ZIP_PMD_ERR("failed configure xform parameters");
446 		rte_free(strm);
447 		return ret;
448 	}
449 	*stream = strm;
450 	return 0;
451 }
452 
453 static int
454 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
455 {
456 	struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
457 	struct zip_stream *z_stream;
458 
459 	if (stream == NULL)
460 		return 0;
461 
462 	z_stream = (struct zip_stream *)stream;
463 
464 	/* Free resources back to pool */
465 	rte_mempool_put_bulk(vf->zip_mp,
466 				(void *)&(z_stream->bufs[0]),
467 				MAX_BUFS_PER_STREAM);
468 
469 	/* Zero out the whole structure */
470 	memset(stream, 0, sizeof(struct zip_stream));
471 	rte_free(stream);
472 
473 	return 0;
474 }
475 
476 
477 static uint16_t
478 zip_pmd_enqueue_burst_sync(void *queue_pair,
479 		struct rte_comp_op **ops, uint16_t nb_ops)
480 {
481 	struct zipvf_qp *qp = queue_pair;
482 	struct rte_comp_op *op;
483 	struct zip_stream *zstrm;
484 	int i, ret = 0;
485 	uint16_t enqd = 0;
486 
487 	for (i = 0; i < nb_ops; i++) {
488 		op = ops[i];
489 
490 		if (op->op_type == RTE_COMP_OP_STATEFUL) {
491 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
492 		} else {
493 			/* process stateless ops */
494 			zstrm = (struct zip_stream *)op->private_xform;
495 			if (unlikely(zstrm == NULL))
496 				op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
497 			else
498 				ret = zstrm->func(op, qp, zstrm);
499 		}
500 
501 		/* Whatever is out of op, put it into completion queue with
502 		 * its status
503 		 */
504 		if (!ret)
505 			ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
506 
507 		if (unlikely(ret < 0)) {
508 			/* increment count if failed to enqueue op*/
509 			qp->qp_stats.enqueue_err_count++;
510 		} else {
511 			qp->qp_stats.enqueued_count++;
512 			enqd++;
513 		}
514 	}
515 	return enqd;
516 }
517 
518 static uint16_t
519 zip_pmd_dequeue_burst_sync(void *queue_pair,
520 		struct rte_comp_op **ops, uint16_t nb_ops)
521 {
522 	struct zipvf_qp *qp = queue_pair;
523 
524 	unsigned int nb_dequeued = 0;
525 
526 	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
527 			(void **)ops, nb_ops, NULL);
528 	qp->qp_stats.dequeued_count += nb_dequeued;
529 
530 	return nb_dequeued;
531 }
532 
533 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
534 		.dev_configure		= zip_pmd_config,
535 		.dev_start		= zip_pmd_start,
536 		.dev_stop		= zip_pmd_stop,
537 		.dev_close		= zip_pmd_close,
538 
539 		.stats_get		= zip_pmd_stats_get,
540 		.stats_reset		= zip_pmd_stats_reset,
541 
542 		.dev_infos_get		= zip_pmd_info_get,
543 
544 		.queue_pair_setup	= zip_pmd_qp_setup,
545 		.queue_pair_release	= zip_pmd_qp_release,
546 
547 		.private_xform_create	= zip_pmd_stream_create,
548 		.private_xform_free	= zip_pmd_stream_free,
549 		.stream_create		= NULL,
550 		.stream_free		= NULL
551 };
552 
553 static int
554 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
555 	struct rte_pci_device *pci_dev)
556 {
557 	int ret = 0;
558 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
559 	struct rte_compressdev *compressdev;
560 	struct rte_compressdev_pmd_init_params init_params = {
561 		"",
562 		rte_socket_id(),
563 	};
564 
565 	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
566 			(unsigned int)pci_dev->id.vendor_id,
567 			(unsigned int)pci_dev->id.device_id);
568 
569 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
570 			    sizeof(compressdev_name));
571 
572 	compressdev = rte_compressdev_pmd_create(compressdev_name,
573 		&pci_dev->device, sizeof(struct zip_vf), &init_params);
574 	if (compressdev == NULL) {
575 		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
576 		return -ENODEV;
577 	}
578 
579 	/*
580 	 * create only if proc_type is primary.
581 	 */
582 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
583 		/*  create vf dev with given pmd dev id */
584 		ret = zipvf_create(compressdev);
585 		if (ret < 0) {
586 			ZIP_PMD_ERR("Device creation failed");
587 			rte_compressdev_pmd_destroy(compressdev);
588 			return ret;
589 		}
590 	}
591 
592 	compressdev->dev_ops = &octtx_zip_pmd_ops;
593 	/* register rx/tx burst functions for data path */
594 	compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
595 	compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
596 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
597 	return ret;
598 }
599 
600 static int
601 zip_pci_remove(struct rte_pci_device *pci_dev)
602 {
603 	struct rte_compressdev *compressdev;
604 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
605 
606 	if (pci_dev == NULL) {
607 		ZIP_PMD_ERR(" Invalid PCI Device\n");
608 		return -EINVAL;
609 	}
610 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
611 			sizeof(compressdev_name));
612 
613 	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
614 	if (compressdev == NULL)
615 		return -ENODEV;
616 
617 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
618 		if (zipvf_destroy(compressdev) < 0)
619 			return -ENODEV;
620 	}
621 	return rte_compressdev_pmd_destroy(compressdev);
622 }
623 
624 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
625 	{
626 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
627 			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
628 	},
629 	{
630 		.device_id = 0
631 	},
632 };
633 
634 /**
635  * Structure that represents a PCI driver
636  */
637 static struct rte_pci_driver octtx_zip_pmd = {
638 	.id_table    = pci_id_octtx_zipvf_table,
639 	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
640 	.probe       = zip_pci_probe,
641 	.remove      = zip_pci_remove,
642 };
643 
644 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
645 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
646 RTE_LOG_REGISTER_DEFAULT(octtx_zip_logtype_driver, INFO);
647