xref: /dpdk/drivers/compress/octeontx/otx_zip_pmd.c (revision 97fbfe5a9526c6ef6e5583330161f1ea4add22bd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Cavium, Inc
3  */
4 
5 #include <string.h>
6 
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11 
12 #include "otx_zip.h"
13 
14 int octtx_zip_logtype_driver;
15 
16 static const struct rte_compressdev_capabilities
17 				octtx_zip_pmd_capabilities[] = {
18 	{	.algo = RTE_COMP_ALGO_DEFLATE,
19 		/* Deflate */
20 		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
21 					RTE_COMP_FF_HUFFMAN_DYNAMIC,
22 		/* Non sharable Priv XFORM and Stateless */
23 		.window_size = {
24 				.min = 1,
25 				.max = 14,
26 				.increment = 1
27 				/* size supported 2^1 to 2^14 */
28 		},
29 	},
30 	RTE_COMP_END_OF_CAPABILITIES_LIST()
31 };
32 
33 /*
34  * Reset session to default state for next set of stateless operation
35  */
36 static inline void
37 reset_stream(struct zip_stream *z_stream)
38 {
39 	union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
40 
41 	inst->s.bf = 1;
42 	inst->s.ef = 0;
43 }
44 
45 int
46 zip_process_op(struct rte_comp_op *op,
47 		struct zipvf_qp *qp,
48 		struct zip_stream *zstrm)
49 {
50 	union zip_inst_s *inst = zstrm->inst;
51 	volatile union zip_zres_s *zresult = NULL;
52 
53 
54 	if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
55 			(op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
56 			(op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
57 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
58 		ZIP_PMD_ERR("Segmented packet is not supported\n");
59 		return 0;
60 	}
61 
62 	zipvf_prepare_cmd_stateless(op, zstrm);
63 
64 	zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
65 	zresult->s.compcode = 0;
66 
67 #ifdef ZIP_DBG
68 	zip_dump_instruction(inst);
69 #endif
70 
71 	/* Submit zip command */
72 	zipvf_push_command(qp, (void *)inst);
73 
74 	/* Check and Process results in sync mode */
75 	do {
76 	} while (!zresult->s.compcode);
77 
78 	if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
79 		op->status = RTE_COMP_OP_STATUS_SUCCESS;
80 	} else {
81 		/* FATAL error cannot do anything */
82 		ZIP_PMD_ERR("operation failed with error code:%d\n",
83 			zresult->s.compcode);
84 		if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
85 			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
86 		else
87 			op->status = RTE_COMP_OP_STATUS_ERROR;
88 	}
89 
90 	ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
91 
92 	/* Update op stats */
93 	switch (op->status) {
94 	case RTE_COMP_OP_STATUS_SUCCESS:
95 		op->consumed = zresult->s.totalbytesread;
96 	/* Fall-through */
97 	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
98 		op->produced = zresult->s.totalbyteswritten;
99 		break;
100 	default:
101 		ZIP_PMD_ERR("stats not updated for status:%d\n",
102 				op->status);
103 		break;
104 	}
105 	/* zstream is reset irrespective of result */
106 	reset_stream(zstrm);
107 
108 	zresult->s.compcode = ZIP_COMP_E_NOTDONE;
109 	return 0;
110 }
111 
112 /** Parse xform parameters and setup a stream */
113 static int
114 zip_set_stream_parameters(struct rte_compressdev *dev,
115 			const struct rte_comp_xform *xform,
116 			struct zip_stream *z_stream)
117 {
118 	int ret;
119 	union zip_inst_s *inst;
120 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
121 	void *res;
122 
123 	/* Allocate resources required by a stream */
124 	ret = rte_mempool_get_bulk(vf->zip_mp,
125 			z_stream->bufs, MAX_BUFS_PER_STREAM);
126 	if (ret < 0)
127 		return -1;
128 
129 	/* get one command buffer from pool and set up */
130 	inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
131 	res = z_stream->bufs[RES_BUF];
132 
133 	memset(inst->u, 0, sizeof(inst->u));
134 
135 	/* set bf for only first ops of stream */
136 	inst->s.bf = 1;
137 
138 	if (xform->type == RTE_COMP_COMPRESS) {
139 		inst->s.op = ZIP_OP_E_COMP;
140 
141 		switch (xform->compress.deflate.huffman) {
142 		case RTE_COMP_HUFFMAN_DEFAULT:
143 			inst->s.cc = ZIP_CC_DEFAULT;
144 			break;
145 		case RTE_COMP_HUFFMAN_FIXED:
146 			inst->s.cc = ZIP_CC_FIXED_HUFF;
147 			break;
148 		case RTE_COMP_HUFFMAN_DYNAMIC:
149 			inst->s.cc = ZIP_CC_DYN_HUFF;
150 			break;
151 		default:
152 			ret = -1;
153 			goto err;
154 		}
155 
156 		switch (xform->compress.level) {
157 		case RTE_COMP_LEVEL_MIN:
158 			inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
159 			break;
160 		case RTE_COMP_LEVEL_MAX:
161 			inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
162 			break;
163 		case RTE_COMP_LEVEL_NONE:
164 			ZIP_PMD_ERR("Compression level not supported");
165 			ret = -1;
166 			goto err;
167 		default:
168 			/* for any value between min and max , choose
169 			 * PMD default.
170 			 */
171 			inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
172 			break;
173 		}
174 	} else if (xform->type == RTE_COMP_DECOMPRESS) {
175 		inst->s.op = ZIP_OP_E_DECOMP;
176 		/* from HRM,
177 		 * For DEFLATE decompression, [CC] must be 0x0.
178 		 * For decompression, [SS] must be 0x0
179 		 */
180 		inst->s.cc = 0;
181 		/* Speed bit should not be set for decompression */
182 		inst->s.ss = 0;
183 		/* decompression context is supported only for STATEFUL
184 		 * operations. Currently we support STATELESS ONLY so
185 		 * skip setting of ctx pointer
186 		 */
187 
188 	} else {
189 		ZIP_PMD_ERR("\nxform type not supported");
190 		ret = -1;
191 		goto err;
192 	}
193 
194 	inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
195 	inst->s.res_ptr_ctl.s.length = 0;
196 
197 	z_stream->inst = inst;
198 	z_stream->func = zip_process_op;
199 
200 	return 0;
201 
202 err:
203 	rte_mempool_put_bulk(vf->zip_mp,
204 			     (void *)&(z_stream->bufs[0]),
205 			     MAX_BUFS_PER_STREAM);
206 
207 	return ret;
208 }
209 
210 /** Configure device */
211 static int
212 zip_pmd_config(struct rte_compressdev *dev,
213 		struct rte_compressdev_config *config)
214 {
215 	int nb_streams;
216 	char res_pool[RTE_MEMZONE_NAMESIZE];
217 	struct zip_vf *vf;
218 	struct rte_mempool *zip_buf_mp;
219 
220 	if (!config || !dev)
221 		return -EIO;
222 
223 	vf = (struct zip_vf *)(dev->data->dev_private);
224 
225 	/* create pool with maximum numbers of resources
226 	 * required by streams
227 	 */
228 
229 	/* use common pool for non-shareable priv_xform and stream */
230 	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
231 
232 	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
233 		 dev->data->dev_id);
234 
235 	/** TBD Should we use the per core object cache for stream resources */
236 	zip_buf_mp = rte_mempool_create(
237 			res_pool,
238 			nb_streams * MAX_BUFS_PER_STREAM,
239 			ZIP_BUF_SIZE,
240 			0,
241 			0,
242 			NULL,
243 			NULL,
244 			NULL,
245 			NULL,
246 			SOCKET_ID_ANY,
247 			0);
248 
249 	if (zip_buf_mp == NULL) {
250 		ZIP_PMD_ERR(
251 			"Failed to create buf mempool octtx_zip_res_pool%u",
252 			dev->data->dev_id);
253 		return -1;
254 	}
255 
256 	vf->zip_mp = zip_buf_mp;
257 
258 	return 0;
259 }
260 
261 /** Start device */
262 static int
263 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
264 {
265 	return 0;
266 }
267 
268 /** Stop device */
269 static void
270 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
271 {
272 
273 }
274 
275 /** Close device */
276 static int
277 zip_pmd_close(struct rte_compressdev *dev)
278 {
279 	if (dev == NULL)
280 		return -1;
281 
282 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
283 	rte_mempool_free(vf->zip_mp);
284 
285 	return 0;
286 }
287 
288 /** Get device statistics */
289 static void
290 zip_pmd_stats_get(struct rte_compressdev *dev,
291 		struct rte_compressdev_stats *stats)
292 {
293 	int qp_id;
294 
295 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
296 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
297 
298 		stats->enqueued_count += qp->qp_stats.enqueued_count;
299 		stats->dequeued_count += qp->qp_stats.dequeued_count;
300 
301 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
302 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
303 	}
304 }
305 
306 /** Reset device statistics */
307 static void
308 zip_pmd_stats_reset(struct rte_compressdev *dev)
309 {
310 	int qp_id;
311 
312 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
313 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
314 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
315 	}
316 }
317 
318 /** Get device info */
319 static void
320 zip_pmd_info_get(struct rte_compressdev *dev,
321 		struct rte_compressdev_info *dev_info)
322 {
323 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
324 
325 	if (dev_info != NULL) {
326 		dev_info->driver_name = dev->device->driver->name;
327 		dev_info->feature_flags = dev->feature_flags;
328 		dev_info->capabilities = octtx_zip_pmd_capabilities;
329 		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
330 	}
331 }
332 
333 /** Release queue pair */
334 static int
335 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
336 {
337 	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
338 
339 	if (qp != NULL) {
340 		zipvf_q_term(qp);
341 
342 		if (qp->processed_pkts)
343 			rte_ring_free(qp->processed_pkts);
344 
345 		rte_free(qp);
346 		dev->data->queue_pairs[qp_id] = NULL;
347 	}
348 	return 0;
349 }
350 
351 /** Create a ring to place process packets on */
352 static struct rte_ring *
353 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
354 		unsigned int ring_size, int socket_id)
355 {
356 	struct rte_ring *r;
357 
358 	r = rte_ring_lookup(qp->name);
359 	if (r) {
360 		if (rte_ring_get_size(r) >= ring_size) {
361 			ZIP_PMD_INFO("Reusing existing ring %s for processed"
362 					" packets", qp->name);
363 			return r;
364 		}
365 
366 		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
367 				" packets", qp->name);
368 		return NULL;
369 	}
370 
371 	return rte_ring_create(qp->name, ring_size, socket_id,
372 						RING_F_EXACT_SZ);
373 }
374 
375 /** Setup a queue pair */
376 static int
377 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
378 		uint32_t max_inflight_ops, int socket_id)
379 {
380 	struct zipvf_qp *qp = NULL;
381 	struct zip_vf *vf;
382 	char *name;
383 	int ret;
384 
385 	if (!dev)
386 		return -1;
387 
388 	vf = (struct zip_vf *) (dev->data->dev_private);
389 
390 	/* Free memory prior to re-allocation if needed. */
391 	if (dev->data->queue_pairs[qp_id] != NULL) {
392 		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
393 		return 0;
394 	}
395 
396 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
397 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
398 		 "zip_pmd_%u_qp_%u",
399 		 dev->data->dev_id, qp_id);
400 
401 	/* Allocate the queue pair data structure. */
402 	qp = rte_zmalloc_socket(name, sizeof(*qp),
403 				RTE_CACHE_LINE_SIZE, socket_id);
404 	if (qp == NULL)
405 		return (-ENOMEM);
406 
407 	qp->name = name;
408 
409 	/* Create completion queue up to max_inflight_ops */
410 	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
411 						max_inflight_ops, socket_id);
412 	if (qp->processed_pkts == NULL)
413 		goto qp_setup_cleanup;
414 
415 	qp->id = qp_id;
416 	qp->vf = vf;
417 
418 	ret = zipvf_q_init(qp);
419 	if (ret < 0)
420 		goto qp_setup_cleanup;
421 
422 	dev->data->queue_pairs[qp_id] = qp;
423 
424 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
425 	return 0;
426 
427 qp_setup_cleanup:
428 	if (qp->processed_pkts)
429 		rte_ring_free(qp->processed_pkts);
430 	if (qp)
431 		rte_free(qp);
432 	return -1;
433 }
434 
435 static int
436 zip_pmd_stream_create(struct rte_compressdev *dev,
437 		const struct rte_comp_xform *xform, void **stream)
438 {
439 	int ret;
440 	struct zip_stream *strm = NULL;
441 
442 	strm = rte_malloc(NULL,
443 			sizeof(struct zip_stream), 0);
444 
445 	if (strm == NULL)
446 		return (-ENOMEM);
447 
448 	ret = zip_set_stream_parameters(dev, xform, strm);
449 	if (ret < 0) {
450 		ZIP_PMD_ERR("failed configure xform parameters");
451 		rte_free(strm);
452 		return ret;
453 	}
454 	*stream = strm;
455 	return 0;
456 }
457 
458 static int
459 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
460 {
461 	struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
462 	struct zip_stream *z_stream;
463 
464 	if (stream == NULL)
465 		return 0;
466 
467 	z_stream = (struct zip_stream *)stream;
468 
469 	/* Free resources back to pool */
470 	rte_mempool_put_bulk(vf->zip_mp,
471 				(void *)&(z_stream->bufs[0]),
472 				MAX_BUFS_PER_STREAM);
473 
474 	/* Zero out the whole structure */
475 	memset(stream, 0, sizeof(struct zip_stream));
476 	rte_free(stream);
477 
478 	return 0;
479 }
480 
481 
482 static uint16_t
483 zip_pmd_enqueue_burst_sync(void *queue_pair,
484 		struct rte_comp_op **ops, uint16_t nb_ops)
485 {
486 	struct zipvf_qp *qp = queue_pair;
487 	struct rte_comp_op *op;
488 	struct zip_stream *zstrm;
489 	int i, ret = 0;
490 	uint16_t enqd = 0;
491 
492 	for (i = 0; i < nb_ops; i++) {
493 		op = ops[i];
494 
495 		if (op->op_type == RTE_COMP_OP_STATEFUL) {
496 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
497 		} else {
498 			/* process stateless ops */
499 			zstrm = (struct zip_stream *)op->private_xform;
500 			if (unlikely(zstrm == NULL))
501 				op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
502 			else
503 				ret = zstrm->func(op, qp, zstrm);
504 		}
505 
506 		/* Whatever is out of op, put it into completion queue with
507 		 * its status
508 		 */
509 		if (!ret)
510 			ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
511 
512 		if (unlikely(ret < 0)) {
513 			/* increment count if failed to enqueue op*/
514 			qp->qp_stats.enqueue_err_count++;
515 		} else {
516 			qp->qp_stats.enqueued_count++;
517 			enqd++;
518 		}
519 	}
520 	return enqd;
521 }
522 
523 static uint16_t
524 zip_pmd_dequeue_burst_sync(void *queue_pair,
525 		struct rte_comp_op **ops, uint16_t nb_ops)
526 {
527 	struct zipvf_qp *qp = queue_pair;
528 
529 	unsigned int nb_dequeued = 0;
530 
531 	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
532 			(void **)ops, nb_ops, NULL);
533 	qp->qp_stats.dequeued_count += nb_dequeued;
534 
535 	return nb_dequeued;
536 }
537 
538 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
539 		.dev_configure		= zip_pmd_config,
540 		.dev_start		= zip_pmd_start,
541 		.dev_stop		= zip_pmd_stop,
542 		.dev_close		= zip_pmd_close,
543 
544 		.stats_get		= zip_pmd_stats_get,
545 		.stats_reset		= zip_pmd_stats_reset,
546 
547 		.dev_infos_get		= zip_pmd_info_get,
548 
549 		.queue_pair_setup	= zip_pmd_qp_setup,
550 		.queue_pair_release	= zip_pmd_qp_release,
551 
552 		.private_xform_create	= zip_pmd_stream_create,
553 		.private_xform_free	= zip_pmd_stream_free,
554 		.stream_create		= NULL,
555 		.stream_free		= NULL
556 };
557 
558 static int
559 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
560 	struct rte_pci_device *pci_dev)
561 {
562 	int ret = 0;
563 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
564 	struct rte_compressdev *compressdev;
565 	struct rte_compressdev_pmd_init_params init_params = {
566 		"",
567 		rte_socket_id(),
568 	};
569 
570 	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
571 			(unsigned int)pci_dev->id.vendor_id,
572 			(unsigned int)pci_dev->id.device_id);
573 
574 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
575 			    sizeof(compressdev_name));
576 
577 	compressdev = rte_compressdev_pmd_create(compressdev_name,
578 		&pci_dev->device, sizeof(struct zip_vf), &init_params);
579 	if (compressdev == NULL) {
580 		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
581 		return -ENODEV;
582 	}
583 
584 	/*
585 	 * create only if proc_type is primary.
586 	 */
587 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
588 		/*  create vf dev with given pmd dev id */
589 		ret = zipvf_create(compressdev);
590 		if (ret < 0) {
591 			ZIP_PMD_ERR("Device creation failed");
592 			rte_compressdev_pmd_destroy(compressdev);
593 			return ret;
594 		}
595 	}
596 
597 	compressdev->dev_ops = &octtx_zip_pmd_ops;
598 	/* register rx/tx burst functions for data path */
599 	compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
600 	compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
601 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
602 	return ret;
603 }
604 
605 static int
606 zip_pci_remove(struct rte_pci_device *pci_dev)
607 {
608 	struct rte_compressdev *compressdev;
609 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
610 
611 	if (pci_dev == NULL) {
612 		ZIP_PMD_ERR(" Invalid PCI Device\n");
613 		return -EINVAL;
614 	}
615 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
616 			sizeof(compressdev_name));
617 
618 	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
619 	if (compressdev == NULL)
620 		return -ENODEV;
621 
622 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
623 		if (zipvf_destroy(compressdev) < 0)
624 			return -ENODEV;
625 	}
626 	return rte_compressdev_pmd_destroy(compressdev);
627 }
628 
629 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
630 	{
631 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
632 			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
633 	},
634 	{
635 		.device_id = 0
636 	},
637 };
638 
639 /**
640  * Structure that represents a PCI driver
641  */
642 static struct rte_pci_driver octtx_zip_pmd = {
643 	.id_table    = pci_id_octtx_zipvf_table,
644 	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
645 	.probe       = zip_pci_probe,
646 	.remove      = zip_pci_remove,
647 };
648 
649 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
650 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
651 
652 RTE_INIT(octtx_zip_init_log)
653 {
654 	octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
655 	if (octtx_zip_logtype_driver >= 0)
656 		rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
657 }
658