xref: /dpdk/drivers/compress/octeontx/otx_zip_pmd.c (revision 52048f8f89aff15b90d6dbf81ec665e5fc53d24d)
143e610bbSSunila Sahu /* SPDX-License-Identifier: BSD-3-Clause
243e610bbSSunila Sahu  * Copyright(c) 2018 Cavium, Inc
343e610bbSSunila Sahu  */
443e610bbSSunila Sahu 
543e610bbSSunila Sahu #include <string.h>
643e610bbSSunila Sahu 
743e610bbSSunila Sahu #include <rte_byteorder.h>
843e610bbSSunila Sahu #include <rte_common.h>
943e610bbSSunila Sahu #include <rte_cpuflags.h>
1043e610bbSSunila Sahu #include <rte_malloc.h>
1143e610bbSSunila Sahu 
1243e610bbSSunila Sahu #include "otx_zip.h"
1343e610bbSSunila Sahu 
14c378f084SAshish Gupta static const struct rte_compressdev_capabilities
15c378f084SAshish Gupta 				octtx_zip_pmd_capabilities[] = {
16c378f084SAshish Gupta 	{	.algo = RTE_COMP_ALGO_DEFLATE,
17c378f084SAshish Gupta 		/* Deflate */
18c378f084SAshish Gupta 		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
19c378f084SAshish Gupta 					RTE_COMP_FF_HUFFMAN_DYNAMIC,
20c378f084SAshish Gupta 		/* Non sharable Priv XFORM and Stateless */
21c378f084SAshish Gupta 		.window_size = {
22c378f084SAshish Gupta 				.min = 1,
23c378f084SAshish Gupta 				.max = 14,
24c378f084SAshish Gupta 				.increment = 1
25c378f084SAshish Gupta 				/* size supported 2^1 to 2^14 */
26c378f084SAshish Gupta 		},
27c378f084SAshish Gupta 	},
28c378f084SAshish Gupta 	RTE_COMP_END_OF_CAPABILITIES_LIST()
29c378f084SAshish Gupta };
3043e610bbSSunila Sahu 
31*52048f8fSAshish Gupta /*
32*52048f8fSAshish Gupta  * Reset session to default state for next set of stateless operation
33*52048f8fSAshish Gupta  */
34*52048f8fSAshish Gupta static inline void
35*52048f8fSAshish Gupta reset_stream(struct zip_stream *z_stream)
36*52048f8fSAshish Gupta {
37*52048f8fSAshish Gupta 	union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
38*52048f8fSAshish Gupta 
39*52048f8fSAshish Gupta 	inst->s.bf = 1;
40*52048f8fSAshish Gupta 	inst->s.ef = 0;
41*52048f8fSAshish Gupta }
42*52048f8fSAshish Gupta 
43*52048f8fSAshish Gupta int
44*52048f8fSAshish Gupta zip_process_op(struct rte_comp_op *op,
45*52048f8fSAshish Gupta 		struct zipvf_qp *qp,
46*52048f8fSAshish Gupta 		struct zip_stream *zstrm)
47*52048f8fSAshish Gupta {
48*52048f8fSAshish Gupta 	union zip_inst_s *inst = zstrm->inst;
49*52048f8fSAshish Gupta 	volatile union zip_zres_s *zresult = NULL;
50*52048f8fSAshish Gupta 
51*52048f8fSAshish Gupta 
52*52048f8fSAshish Gupta 	if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
53*52048f8fSAshish Gupta 			(op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
54*52048f8fSAshish Gupta 			(op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
55*52048f8fSAshish Gupta 		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
56*52048f8fSAshish Gupta 		ZIP_PMD_ERR("Segmented packet is not supported\n");
57*52048f8fSAshish Gupta 		return 0;
58*52048f8fSAshish Gupta 	}
59*52048f8fSAshish Gupta 
60*52048f8fSAshish Gupta 	zipvf_prepare_cmd_stateless(op, zstrm);
61*52048f8fSAshish Gupta 
62*52048f8fSAshish Gupta 	zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
63*52048f8fSAshish Gupta 	zresult->s.compcode = 0;
64*52048f8fSAshish Gupta 
65*52048f8fSAshish Gupta #ifdef ZIP_DBG
66*52048f8fSAshish Gupta 	zip_dump_instruction(inst);
67*52048f8fSAshish Gupta #endif
68*52048f8fSAshish Gupta 
69*52048f8fSAshish Gupta 	/* Submit zip command */
70*52048f8fSAshish Gupta 	zipvf_push_command(qp, (void *)inst);
71*52048f8fSAshish Gupta 
72*52048f8fSAshish Gupta 	/* Check and Process results in sync mode */
73*52048f8fSAshish Gupta 	do {
74*52048f8fSAshish Gupta 	} while (!zresult->s.compcode);
75*52048f8fSAshish Gupta 
76*52048f8fSAshish Gupta 	if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
77*52048f8fSAshish Gupta 		op->status = RTE_COMP_OP_STATUS_SUCCESS;
78*52048f8fSAshish Gupta 	} else {
79*52048f8fSAshish Gupta 		/* FATAL error cannot do anything */
80*52048f8fSAshish Gupta 		ZIP_PMD_ERR("operation failed with error code:%d\n",
81*52048f8fSAshish Gupta 			zresult->s.compcode);
82*52048f8fSAshish Gupta 		if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
83*52048f8fSAshish Gupta 			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
84*52048f8fSAshish Gupta 		else
85*52048f8fSAshish Gupta 			op->status = RTE_COMP_OP_STATUS_ERROR;
86*52048f8fSAshish Gupta 	}
87*52048f8fSAshish Gupta 
88*52048f8fSAshish Gupta 	ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
89*52048f8fSAshish Gupta 
90*52048f8fSAshish Gupta 	/* Update op stats */
91*52048f8fSAshish Gupta 	switch (op->status) {
92*52048f8fSAshish Gupta 	case RTE_COMP_OP_STATUS_SUCCESS:
93*52048f8fSAshish Gupta 		op->consumed = zresult->s.totalbytesread;
94*52048f8fSAshish Gupta 	/* Fall-through */
95*52048f8fSAshish Gupta 	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
96*52048f8fSAshish Gupta 		op->produced = zresult->s.totalbyteswritten;
97*52048f8fSAshish Gupta 		break;
98*52048f8fSAshish Gupta 	default:
99*52048f8fSAshish Gupta 		ZIP_PMD_ERR("stats not updated for status:%d\n",
100*52048f8fSAshish Gupta 				op->status);
101*52048f8fSAshish Gupta 		break;
102*52048f8fSAshish Gupta 	}
103*52048f8fSAshish Gupta 	/* zstream is reset irrespective of result */
104*52048f8fSAshish Gupta 	reset_stream(zstrm);
105*52048f8fSAshish Gupta 
106*52048f8fSAshish Gupta 	zresult->s.compcode = ZIP_COMP_E_NOTDONE;
107*52048f8fSAshish Gupta 	return 0;
108*52048f8fSAshish Gupta }
109*52048f8fSAshish Gupta 
110b43ebc65SAshish Gupta /** Parse xform parameters and setup a stream */
111b43ebc65SAshish Gupta static int
112b43ebc65SAshish Gupta zip_set_stream_parameters(struct rte_compressdev *dev,
113b43ebc65SAshish Gupta 			const struct rte_comp_xform *xform,
114b43ebc65SAshish Gupta 			struct zip_stream *z_stream)
115b43ebc65SAshish Gupta {
116b43ebc65SAshish Gupta 	int ret;
117b43ebc65SAshish Gupta 	union zip_inst_s *inst;
118b43ebc65SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
119b43ebc65SAshish Gupta 	void *res;
120b43ebc65SAshish Gupta 
121b43ebc65SAshish Gupta 	/* Allocate resources required by a stream */
122b43ebc65SAshish Gupta 	ret = rte_mempool_get_bulk(vf->zip_mp,
123b43ebc65SAshish Gupta 			z_stream->bufs, MAX_BUFS_PER_STREAM);
124b43ebc65SAshish Gupta 	if (ret < 0)
125b43ebc65SAshish Gupta 		return -1;
126b43ebc65SAshish Gupta 
127b43ebc65SAshish Gupta 	/* get one command buffer from pool and set up */
128b43ebc65SAshish Gupta 	inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
129b43ebc65SAshish Gupta 	res = z_stream->bufs[RES_BUF];
130b43ebc65SAshish Gupta 
131b43ebc65SAshish Gupta 	memset(inst->u, 0, sizeof(inst->u));
132b43ebc65SAshish Gupta 
133b43ebc65SAshish Gupta 	/* set bf for only first ops of stream */
134b43ebc65SAshish Gupta 	inst->s.bf = 1;
135b43ebc65SAshish Gupta 
136b43ebc65SAshish Gupta 	if (xform->type == RTE_COMP_COMPRESS) {
137b43ebc65SAshish Gupta 		inst->s.op = ZIP_OP_E_COMP;
138b43ebc65SAshish Gupta 
139b43ebc65SAshish Gupta 		switch (xform->compress.deflate.huffman) {
140b43ebc65SAshish Gupta 		case RTE_COMP_HUFFMAN_DEFAULT:
141b43ebc65SAshish Gupta 			inst->s.cc = ZIP_CC_DEFAULT;
142b43ebc65SAshish Gupta 			break;
143b43ebc65SAshish Gupta 		case RTE_COMP_HUFFMAN_FIXED:
144b43ebc65SAshish Gupta 			inst->s.cc = ZIP_CC_FIXED_HUFF;
145b43ebc65SAshish Gupta 			break;
146b43ebc65SAshish Gupta 		case RTE_COMP_HUFFMAN_DYNAMIC:
147b43ebc65SAshish Gupta 			inst->s.cc = ZIP_CC_DYN_HUFF;
148b43ebc65SAshish Gupta 			break;
149b43ebc65SAshish Gupta 		default:
150b43ebc65SAshish Gupta 			ret = -1;
151b43ebc65SAshish Gupta 			goto err;
152b43ebc65SAshish Gupta 		}
153b43ebc65SAshish Gupta 
154b43ebc65SAshish Gupta 		switch (xform->compress.level) {
155b43ebc65SAshish Gupta 		case RTE_COMP_LEVEL_MIN:
156b43ebc65SAshish Gupta 			inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
157b43ebc65SAshish Gupta 			break;
158b43ebc65SAshish Gupta 		case RTE_COMP_LEVEL_MAX:
159b43ebc65SAshish Gupta 			inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
160b43ebc65SAshish Gupta 			break;
161b43ebc65SAshish Gupta 		case RTE_COMP_LEVEL_NONE:
162b43ebc65SAshish Gupta 			ZIP_PMD_ERR("Compression level not supported");
163b43ebc65SAshish Gupta 			ret = -1;
164b43ebc65SAshish Gupta 			goto err;
165b43ebc65SAshish Gupta 		default:
166b43ebc65SAshish Gupta 			/* for any value between min and max , choose
167b43ebc65SAshish Gupta 			 * PMD default.
168b43ebc65SAshish Gupta 			 */
169b43ebc65SAshish Gupta 			inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
170b43ebc65SAshish Gupta 			break;
171b43ebc65SAshish Gupta 		}
172b43ebc65SAshish Gupta 	} else if (xform->type == RTE_COMP_DECOMPRESS) {
173b43ebc65SAshish Gupta 		inst->s.op = ZIP_OP_E_DECOMP;
174b43ebc65SAshish Gupta 		/* from HRM,
175b43ebc65SAshish Gupta 		 * For DEFLATE decompression, [CC] must be 0x0.
176b43ebc65SAshish Gupta 		 * For decompression, [SS] must be 0x0
177b43ebc65SAshish Gupta 		 */
178b43ebc65SAshish Gupta 		inst->s.cc = 0;
179b43ebc65SAshish Gupta 		/* Speed bit should not be set for decompression */
180b43ebc65SAshish Gupta 		inst->s.ss = 0;
181b43ebc65SAshish Gupta 		/* decompression context is supported only for STATEFUL
182b43ebc65SAshish Gupta 		 * operations. Currently we support STATELESS ONLY so
183b43ebc65SAshish Gupta 		 * skip setting of ctx pointer
184b43ebc65SAshish Gupta 		 */
185b43ebc65SAshish Gupta 
186b43ebc65SAshish Gupta 	} else {
187b43ebc65SAshish Gupta 		ZIP_PMD_ERR("\nxform type not supported");
188b43ebc65SAshish Gupta 		ret = -1;
189b43ebc65SAshish Gupta 		goto err;
190b43ebc65SAshish Gupta 	}
191b43ebc65SAshish Gupta 
192b43ebc65SAshish Gupta 	inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
193b43ebc65SAshish Gupta 	inst->s.res_ptr_ctl.s.length = 0;
194b43ebc65SAshish Gupta 
195b43ebc65SAshish Gupta 	z_stream->inst = inst;
196*52048f8fSAshish Gupta 	z_stream->func = zip_process_op;
197b43ebc65SAshish Gupta 
198b43ebc65SAshish Gupta 	return 0;
199b43ebc65SAshish Gupta 
200b43ebc65SAshish Gupta err:
201b43ebc65SAshish Gupta 	rte_mempool_put_bulk(vf->zip_mp,
202b43ebc65SAshish Gupta 			     (void *)&(z_stream->bufs[0]),
203b43ebc65SAshish Gupta 			     MAX_BUFS_PER_STREAM);
204b43ebc65SAshish Gupta 
205b43ebc65SAshish Gupta 	return ret;
206b43ebc65SAshish Gupta }
207b43ebc65SAshish Gupta 
208c378f084SAshish Gupta /** Configure device */
209c378f084SAshish Gupta static int
210c378f084SAshish Gupta zip_pmd_config(struct rte_compressdev *dev,
211c378f084SAshish Gupta 		struct rte_compressdev_config *config)
212c378f084SAshish Gupta {
213c378f084SAshish Gupta 	int nb_streams;
214c378f084SAshish Gupta 	char res_pool[RTE_MEMZONE_NAMESIZE];
215c378f084SAshish Gupta 	struct zip_vf *vf;
216c378f084SAshish Gupta 	struct rte_mempool *zip_buf_mp;
217c378f084SAshish Gupta 
218c378f084SAshish Gupta 	if (!config || !dev)
219c378f084SAshish Gupta 		return -EIO;
220c378f084SAshish Gupta 
221c378f084SAshish Gupta 	vf = (struct zip_vf *)(dev->data->dev_private);
222c378f084SAshish Gupta 
223c378f084SAshish Gupta 	/* create pool with maximum numbers of resources
224c378f084SAshish Gupta 	 * required by streams
225c378f084SAshish Gupta 	 */
226c378f084SAshish Gupta 
227c378f084SAshish Gupta 	/* use common pool for non-shareable priv_xform and stream */
228c378f084SAshish Gupta 	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
229c378f084SAshish Gupta 
230c378f084SAshish Gupta 	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
231c378f084SAshish Gupta 		 dev->data->dev_id);
232c378f084SAshish Gupta 
233c378f084SAshish Gupta 	/** TBD Should we use the per core object cache for stream resources */
234c378f084SAshish Gupta 	zip_buf_mp = rte_mempool_create(
235c378f084SAshish Gupta 			res_pool,
236c378f084SAshish Gupta 			nb_streams * MAX_BUFS_PER_STREAM,
237c378f084SAshish Gupta 			ZIP_BUF_SIZE,
238c378f084SAshish Gupta 			0,
239c378f084SAshish Gupta 			0,
240c378f084SAshish Gupta 			NULL,
241c378f084SAshish Gupta 			NULL,
242c378f084SAshish Gupta 			NULL,
243c378f084SAshish Gupta 			NULL,
244c378f084SAshish Gupta 			SOCKET_ID_ANY,
245c378f084SAshish Gupta 			0);
246c378f084SAshish Gupta 
247c378f084SAshish Gupta 	if (zip_buf_mp == NULL) {
248c378f084SAshish Gupta 		ZIP_PMD_ERR(
249c378f084SAshish Gupta 			"Failed to create buf mempool octtx_zip_res_pool%u",
250c378f084SAshish Gupta 			dev->data->dev_id);
251c378f084SAshish Gupta 		return -1;
252c378f084SAshish Gupta 	}
253c378f084SAshish Gupta 
254c378f084SAshish Gupta 	vf->zip_mp = zip_buf_mp;
255c378f084SAshish Gupta 
256c378f084SAshish Gupta 	return 0;
257c378f084SAshish Gupta }
258c378f084SAshish Gupta 
259c378f084SAshish Gupta /** Start device */
260c378f084SAshish Gupta static int
261c378f084SAshish Gupta zip_pmd_start(__rte_unused struct rte_compressdev *dev)
262c378f084SAshish Gupta {
263c378f084SAshish Gupta 	return 0;
264c378f084SAshish Gupta }
265c378f084SAshish Gupta 
266c378f084SAshish Gupta /** Stop device */
267c378f084SAshish Gupta static void
268c378f084SAshish Gupta zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
269c378f084SAshish Gupta {
270c378f084SAshish Gupta 
271c378f084SAshish Gupta }
272c378f084SAshish Gupta 
273c378f084SAshish Gupta /** Close device */
274c378f084SAshish Gupta static int
275c378f084SAshish Gupta zip_pmd_close(struct rte_compressdev *dev)
276c378f084SAshish Gupta {
277c378f084SAshish Gupta 	if (dev == NULL)
278c378f084SAshish Gupta 		return -1;
279c378f084SAshish Gupta 
280c378f084SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
281c378f084SAshish Gupta 	rte_mempool_free(vf->zip_mp);
282c378f084SAshish Gupta 
283c378f084SAshish Gupta 	return 0;
284c378f084SAshish Gupta }
285c378f084SAshish Gupta 
286c378f084SAshish Gupta /** Get device statistics */
287c378f084SAshish Gupta static void
288c378f084SAshish Gupta zip_pmd_stats_get(struct rte_compressdev *dev,
289c378f084SAshish Gupta 		struct rte_compressdev_stats *stats)
290c378f084SAshish Gupta {
291c378f084SAshish Gupta 	int qp_id;
292c378f084SAshish Gupta 
293c378f084SAshish Gupta 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
294c378f084SAshish Gupta 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
295c378f084SAshish Gupta 
296c378f084SAshish Gupta 		stats->enqueued_count += qp->qp_stats.enqueued_count;
297c378f084SAshish Gupta 		stats->dequeued_count += qp->qp_stats.dequeued_count;
298c378f084SAshish Gupta 
299c378f084SAshish Gupta 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
300c378f084SAshish Gupta 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
301c378f084SAshish Gupta 	}
302c378f084SAshish Gupta }
303c378f084SAshish Gupta 
304c378f084SAshish Gupta /** Reset device statistics */
305c378f084SAshish Gupta static void
306c378f084SAshish Gupta zip_pmd_stats_reset(struct rte_compressdev *dev)
307c378f084SAshish Gupta {
308c378f084SAshish Gupta 	int qp_id;
309c378f084SAshish Gupta 
310c378f084SAshish Gupta 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
311c378f084SAshish Gupta 		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
312c378f084SAshish Gupta 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
313c378f084SAshish Gupta 	}
314c378f084SAshish Gupta }
315c378f084SAshish Gupta 
316c378f084SAshish Gupta /** Get device info */
317c378f084SAshish Gupta static void
318c378f084SAshish Gupta zip_pmd_info_get(struct rte_compressdev *dev,
319c378f084SAshish Gupta 		struct rte_compressdev_info *dev_info)
320c378f084SAshish Gupta {
321c378f084SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
322c378f084SAshish Gupta 
323c378f084SAshish Gupta 	if (dev_info != NULL) {
324c378f084SAshish Gupta 		dev_info->driver_name = dev->device->driver->name;
325c378f084SAshish Gupta 		dev_info->feature_flags = dev->feature_flags;
326c378f084SAshish Gupta 		dev_info->capabilities = octtx_zip_pmd_capabilities;
327c378f084SAshish Gupta 		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
328c378f084SAshish Gupta 	}
329c378f084SAshish Gupta }
330c378f084SAshish Gupta 
331c378f084SAshish Gupta /** Release queue pair */
332c378f084SAshish Gupta static int
333c378f084SAshish Gupta zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
334c378f084SAshish Gupta {
335c378f084SAshish Gupta 	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
336c378f084SAshish Gupta 
337c378f084SAshish Gupta 	if (qp != NULL) {
338c378f084SAshish Gupta 		zipvf_q_term(qp);
339c378f084SAshish Gupta 
340c378f084SAshish Gupta 		if (qp->processed_pkts)
341c378f084SAshish Gupta 			rte_ring_free(qp->processed_pkts);
342c378f084SAshish Gupta 
343c378f084SAshish Gupta 		rte_free(qp);
344c378f084SAshish Gupta 		dev->data->queue_pairs[qp_id] = NULL;
345c378f084SAshish Gupta 	}
346c378f084SAshish Gupta 	return 0;
347c378f084SAshish Gupta }
348c378f084SAshish Gupta 
349c378f084SAshish Gupta /** Create a ring to place process packets on */
350c378f084SAshish Gupta static struct rte_ring *
351c378f084SAshish Gupta zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
352c378f084SAshish Gupta 		unsigned int ring_size, int socket_id)
353c378f084SAshish Gupta {
354c378f084SAshish Gupta 	struct rte_ring *r;
355c378f084SAshish Gupta 
356c378f084SAshish Gupta 	r = rte_ring_lookup(qp->name);
357c378f084SAshish Gupta 	if (r) {
358c378f084SAshish Gupta 		if (rte_ring_get_size(r) >= ring_size) {
359c378f084SAshish Gupta 			ZIP_PMD_INFO("Reusing existing ring %s for processed"
360c378f084SAshish Gupta 					" packets", qp->name);
361c378f084SAshish Gupta 			return r;
362c378f084SAshish Gupta 		}
363c378f084SAshish Gupta 
364c378f084SAshish Gupta 		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
365c378f084SAshish Gupta 				" packets", qp->name);
366c378f084SAshish Gupta 		return NULL;
367c378f084SAshish Gupta 	}
368c378f084SAshish Gupta 
369c378f084SAshish Gupta 	return rte_ring_create(qp->name, ring_size, socket_id,
370c378f084SAshish Gupta 						RING_F_EXACT_SZ);
371c378f084SAshish Gupta }
372c378f084SAshish Gupta 
373c378f084SAshish Gupta /** Setup a queue pair */
374c378f084SAshish Gupta static int
375c378f084SAshish Gupta zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
376c378f084SAshish Gupta 		uint32_t max_inflight_ops, int socket_id)
377c378f084SAshish Gupta {
378c378f084SAshish Gupta 	struct zipvf_qp *qp = NULL;
379c378f084SAshish Gupta 	struct zip_vf *vf;
380c378f084SAshish Gupta 	char *name;
381c378f084SAshish Gupta 	int ret;
382c378f084SAshish Gupta 
383c378f084SAshish Gupta 	if (!dev)
384c378f084SAshish Gupta 		return -1;
385c378f084SAshish Gupta 
386c378f084SAshish Gupta 	vf = (struct zip_vf *) (dev->data->dev_private);
387c378f084SAshish Gupta 
388c378f084SAshish Gupta 	/* Free memory prior to re-allocation if needed. */
389c378f084SAshish Gupta 	if (dev->data->queue_pairs[qp_id] != NULL) {
390c378f084SAshish Gupta 		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
391c378f084SAshish Gupta 		return 0;
392c378f084SAshish Gupta 	}
393c378f084SAshish Gupta 
394c378f084SAshish Gupta 	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
395c378f084SAshish Gupta 	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
396c378f084SAshish Gupta 		 "zip_pmd_%u_qp_%u",
397c378f084SAshish Gupta 		 dev->data->dev_id, qp_id);
398c378f084SAshish Gupta 
399c378f084SAshish Gupta 	/* Allocate the queue pair data structure. */
400c378f084SAshish Gupta 	qp = rte_zmalloc_socket(name, sizeof(*qp),
401c378f084SAshish Gupta 				RTE_CACHE_LINE_SIZE, socket_id);
402c378f084SAshish Gupta 	if (qp == NULL)
403c378f084SAshish Gupta 		return (-ENOMEM);
404c378f084SAshish Gupta 
405c378f084SAshish Gupta 	qp->name = name;
406c378f084SAshish Gupta 
407c378f084SAshish Gupta 	/* Create completion queue upto max_inflight_ops */
408c378f084SAshish Gupta 	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
409c378f084SAshish Gupta 						max_inflight_ops, socket_id);
410c378f084SAshish Gupta 	if (qp->processed_pkts == NULL)
411c378f084SAshish Gupta 		goto qp_setup_cleanup;
412c378f084SAshish Gupta 
413c378f084SAshish Gupta 	qp->id = qp_id;
414c378f084SAshish Gupta 	qp->vf = vf;
415c378f084SAshish Gupta 
416c378f084SAshish Gupta 	ret = zipvf_q_init(qp);
417c378f084SAshish Gupta 	if (ret < 0)
418c378f084SAshish Gupta 		goto qp_setup_cleanup;
419c378f084SAshish Gupta 
420c378f084SAshish Gupta 	dev->data->queue_pairs[qp_id] = qp;
421c378f084SAshish Gupta 
422c378f084SAshish Gupta 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
423c378f084SAshish Gupta 	return 0;
424c378f084SAshish Gupta 
425c378f084SAshish Gupta qp_setup_cleanup:
426c378f084SAshish Gupta 	if (qp->processed_pkts)
427c378f084SAshish Gupta 		rte_ring_free(qp->processed_pkts);
428c378f084SAshish Gupta 	if (qp)
429c378f084SAshish Gupta 		rte_free(qp);
430c378f084SAshish Gupta 	return -1;
431c378f084SAshish Gupta }
432c378f084SAshish Gupta 
433b43ebc65SAshish Gupta static int
434b43ebc65SAshish Gupta zip_pmd_stream_create(struct rte_compressdev *dev,
435b43ebc65SAshish Gupta 		const struct rte_comp_xform *xform, void **stream)
436b43ebc65SAshish Gupta {
437b43ebc65SAshish Gupta 	int ret;
438b43ebc65SAshish Gupta 	struct zip_stream *strm = NULL;
439b43ebc65SAshish Gupta 
440b43ebc65SAshish Gupta 	strm = rte_malloc(NULL,
441b43ebc65SAshish Gupta 			sizeof(struct zip_stream), 0);
442b43ebc65SAshish Gupta 
443b43ebc65SAshish Gupta 	if (strm == NULL)
444b43ebc65SAshish Gupta 		return (-ENOMEM);
445b43ebc65SAshish Gupta 
446b43ebc65SAshish Gupta 	ret = zip_set_stream_parameters(dev, xform, strm);
447b43ebc65SAshish Gupta 	if (ret < 0) {
448b43ebc65SAshish Gupta 		ZIP_PMD_ERR("failed configure xform parameters");
449b43ebc65SAshish Gupta 		rte_free(strm);
450b43ebc65SAshish Gupta 		return ret;
451b43ebc65SAshish Gupta 	}
452b43ebc65SAshish Gupta 	*stream = strm;
453b43ebc65SAshish Gupta 	return 0;
454b43ebc65SAshish Gupta }
455b43ebc65SAshish Gupta 
456b43ebc65SAshish Gupta static int
457b43ebc65SAshish Gupta zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
458b43ebc65SAshish Gupta {
459b43ebc65SAshish Gupta 	struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
460b43ebc65SAshish Gupta 	struct zip_stream *z_stream;
461b43ebc65SAshish Gupta 
462b43ebc65SAshish Gupta 	if (stream == NULL)
463b43ebc65SAshish Gupta 		return 0;
464b43ebc65SAshish Gupta 
465b43ebc65SAshish Gupta 	z_stream = (struct zip_stream *)stream;
466b43ebc65SAshish Gupta 
467b43ebc65SAshish Gupta 	/* Free resources back to pool */
468b43ebc65SAshish Gupta 	rte_mempool_put_bulk(vf->zip_mp,
469b43ebc65SAshish Gupta 				(void *)&(z_stream->bufs[0]),
470b43ebc65SAshish Gupta 				MAX_BUFS_PER_STREAM);
471b43ebc65SAshish Gupta 
472b43ebc65SAshish Gupta 	/* Zero out the whole structure */
473b43ebc65SAshish Gupta 	memset(stream, 0, sizeof(struct zip_stream));
474b43ebc65SAshish Gupta 	rte_free(stream);
475b43ebc65SAshish Gupta 
476b43ebc65SAshish Gupta 	return 0;
477b43ebc65SAshish Gupta }
478b43ebc65SAshish Gupta 
479b43ebc65SAshish Gupta 
480*52048f8fSAshish Gupta static uint16_t
481*52048f8fSAshish Gupta zip_pmd_enqueue_burst_sync(void *queue_pair,
482*52048f8fSAshish Gupta 		struct rte_comp_op **ops, uint16_t nb_ops)
483*52048f8fSAshish Gupta {
484*52048f8fSAshish Gupta 	struct zipvf_qp *qp = queue_pair;
485*52048f8fSAshish Gupta 	struct rte_comp_op *op;
486*52048f8fSAshish Gupta 	struct zip_stream *zstrm;
487*52048f8fSAshish Gupta 	int i, ret = 0;
488*52048f8fSAshish Gupta 	uint16_t enqd = 0;
489*52048f8fSAshish Gupta 
490*52048f8fSAshish Gupta 	for (i = 0; i < nb_ops; i++) {
491*52048f8fSAshish Gupta 		op = ops[i];
492*52048f8fSAshish Gupta 
493*52048f8fSAshish Gupta 		if (op->op_type == RTE_COMP_OP_STATEFUL) {
494*52048f8fSAshish Gupta 			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
495*52048f8fSAshish Gupta 		} else {
496*52048f8fSAshish Gupta 			/* process stateless ops */
497*52048f8fSAshish Gupta 			zstrm = (struct zip_stream *)op->private_xform;
498*52048f8fSAshish Gupta 			if (unlikely(zstrm == NULL))
499*52048f8fSAshish Gupta 				op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
500*52048f8fSAshish Gupta 			else
501*52048f8fSAshish Gupta 				ret = zstrm->func(op, qp, zstrm);
502*52048f8fSAshish Gupta 		}
503*52048f8fSAshish Gupta 
504*52048f8fSAshish Gupta 		/* Whatever is out of op, put it into completion queue with
505*52048f8fSAshish Gupta 		 * its status
506*52048f8fSAshish Gupta 		 */
507*52048f8fSAshish Gupta 		if (!ret)
508*52048f8fSAshish Gupta 			ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
509*52048f8fSAshish Gupta 
510*52048f8fSAshish Gupta 		if (unlikely(ret < 0)) {
511*52048f8fSAshish Gupta 			/* increment count if failed to enqueue op*/
512*52048f8fSAshish Gupta 			qp->qp_stats.enqueue_err_count++;
513*52048f8fSAshish Gupta 		} else {
514*52048f8fSAshish Gupta 			qp->qp_stats.enqueued_count++;
515*52048f8fSAshish Gupta 			enqd++;
516*52048f8fSAshish Gupta 		}
517*52048f8fSAshish Gupta 	}
518*52048f8fSAshish Gupta 	return enqd;
519*52048f8fSAshish Gupta }
520*52048f8fSAshish Gupta 
521*52048f8fSAshish Gupta static uint16_t
522*52048f8fSAshish Gupta zip_pmd_dequeue_burst_sync(void *queue_pair,
523*52048f8fSAshish Gupta 		struct rte_comp_op **ops, uint16_t nb_ops)
524*52048f8fSAshish Gupta {
525*52048f8fSAshish Gupta 	struct zipvf_qp *qp = queue_pair;
526*52048f8fSAshish Gupta 
527*52048f8fSAshish Gupta 	unsigned int nb_dequeued = 0;
528*52048f8fSAshish Gupta 
529*52048f8fSAshish Gupta 	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
530*52048f8fSAshish Gupta 			(void **)ops, nb_ops, NULL);
531*52048f8fSAshish Gupta 	qp->qp_stats.dequeued_count += nb_dequeued;
532*52048f8fSAshish Gupta 
533*52048f8fSAshish Gupta 	return nb_dequeued;
534*52048f8fSAshish Gupta }
535*52048f8fSAshish Gupta 
536c378f084SAshish Gupta struct rte_compressdev_ops octtx_zip_pmd_ops = {
537c378f084SAshish Gupta 		.dev_configure		= zip_pmd_config,
538c378f084SAshish Gupta 		.dev_start		= zip_pmd_start,
539c378f084SAshish Gupta 		.dev_stop		= zip_pmd_stop,
540c378f084SAshish Gupta 		.dev_close		= zip_pmd_close,
541c378f084SAshish Gupta 
542c378f084SAshish Gupta 		.stats_get		= zip_pmd_stats_get,
543c378f084SAshish Gupta 		.stats_reset		= zip_pmd_stats_reset,
544c378f084SAshish Gupta 
545c378f084SAshish Gupta 		.dev_infos_get		= zip_pmd_info_get,
546c378f084SAshish Gupta 
547c378f084SAshish Gupta 		.queue_pair_setup	= zip_pmd_qp_setup,
548c378f084SAshish Gupta 		.queue_pair_release	= zip_pmd_qp_release,
549b43ebc65SAshish Gupta 
550b43ebc65SAshish Gupta 		.private_xform_create	= zip_pmd_stream_create,
551b43ebc65SAshish Gupta 		.private_xform_free	= zip_pmd_stream_free,
552b43ebc65SAshish Gupta 		.stream_create		= NULL,
553b43ebc65SAshish Gupta 		.stream_free		= NULL
55443e610bbSSunila Sahu };
55543e610bbSSunila Sahu 
55643e610bbSSunila Sahu static int
55743e610bbSSunila Sahu zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
55843e610bbSSunila Sahu 	struct rte_pci_device *pci_dev)
55943e610bbSSunila Sahu {
56043e610bbSSunila Sahu 	int ret = 0;
56143e610bbSSunila Sahu 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
56243e610bbSSunila Sahu 	struct rte_compressdev *compressdev;
56343e610bbSSunila Sahu 	struct rte_compressdev_pmd_init_params init_params = {
56443e610bbSSunila Sahu 		"",
56543e610bbSSunila Sahu 		rte_socket_id(),
56643e610bbSSunila Sahu 	};
56743e610bbSSunila Sahu 
56843e610bbSSunila Sahu 	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
56943e610bbSSunila Sahu 			(unsigned int)pci_dev->id.vendor_id,
57043e610bbSSunila Sahu 			(unsigned int)pci_dev->id.device_id);
57143e610bbSSunila Sahu 
57243e610bbSSunila Sahu 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
57343e610bbSSunila Sahu 			    sizeof(compressdev_name));
57443e610bbSSunila Sahu 
57543e610bbSSunila Sahu 	compressdev = rte_compressdev_pmd_create(compressdev_name,
57643e610bbSSunila Sahu 		&pci_dev->device, sizeof(struct zip_vf), &init_params);
57743e610bbSSunila Sahu 	if (compressdev == NULL) {
57843e610bbSSunila Sahu 		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
57943e610bbSSunila Sahu 		return -ENODEV;
58043e610bbSSunila Sahu 	}
58143e610bbSSunila Sahu 
58243e610bbSSunila Sahu 	/*
58343e610bbSSunila Sahu 	 * create only if proc_type is primary.
58443e610bbSSunila Sahu 	 */
58543e610bbSSunila Sahu 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
58643e610bbSSunila Sahu 		/*  create vf dev with given pmd dev id */
58743e610bbSSunila Sahu 		ret = zipvf_create(compressdev);
58843e610bbSSunila Sahu 		if (ret < 0) {
58943e610bbSSunila Sahu 			ZIP_PMD_ERR("Device creation failed");
59043e610bbSSunila Sahu 			rte_compressdev_pmd_destroy(compressdev);
59143e610bbSSunila Sahu 			return ret;
59243e610bbSSunila Sahu 		}
59343e610bbSSunila Sahu 	}
59443e610bbSSunila Sahu 
59543e610bbSSunila Sahu 	compressdev->dev_ops = &octtx_zip_pmd_ops;
59643e610bbSSunila Sahu 	/* register rx/tx burst functions for data path */
597*52048f8fSAshish Gupta 	compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
598*52048f8fSAshish Gupta 	compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
59943e610bbSSunila Sahu 	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
60043e610bbSSunila Sahu 	return ret;
60143e610bbSSunila Sahu }
60243e610bbSSunila Sahu 
60343e610bbSSunila Sahu static int
60443e610bbSSunila Sahu zip_pci_remove(struct rte_pci_device *pci_dev)
60543e610bbSSunila Sahu {
60643e610bbSSunila Sahu 	struct rte_compressdev *compressdev;
60743e610bbSSunila Sahu 	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
60843e610bbSSunila Sahu 
60943e610bbSSunila Sahu 	if (pci_dev == NULL) {
61043e610bbSSunila Sahu 		ZIP_PMD_ERR(" Invalid PCI Device\n");
61143e610bbSSunila Sahu 		return -EINVAL;
61243e610bbSSunila Sahu 	}
61343e610bbSSunila Sahu 	rte_pci_device_name(&pci_dev->addr, compressdev_name,
61443e610bbSSunila Sahu 			sizeof(compressdev_name));
61543e610bbSSunila Sahu 
61643e610bbSSunila Sahu 	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
61743e610bbSSunila Sahu 	if (compressdev == NULL)
61843e610bbSSunila Sahu 		return -ENODEV;
61943e610bbSSunila Sahu 
62043e610bbSSunila Sahu 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
62143e610bbSSunila Sahu 		if (zipvf_destroy(compressdev) < 0)
62243e610bbSSunila Sahu 			return -ENODEV;
62343e610bbSSunila Sahu 	}
62443e610bbSSunila Sahu 	return rte_compressdev_pmd_destroy(compressdev);
62543e610bbSSunila Sahu }
62643e610bbSSunila Sahu 
62743e610bbSSunila Sahu static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
62843e610bbSSunila Sahu 	{
62943e610bbSSunila Sahu 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
63043e610bbSSunila Sahu 			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
63143e610bbSSunila Sahu 	},
63243e610bbSSunila Sahu 	{
63343e610bbSSunila Sahu 		.device_id = 0
63443e610bbSSunila Sahu 	},
63543e610bbSSunila Sahu };
63643e610bbSSunila Sahu 
63743e610bbSSunila Sahu /**
63843e610bbSSunila Sahu  * Structure that represents a PCI driver
63943e610bbSSunila Sahu  */
64043e610bbSSunila Sahu static struct rte_pci_driver octtx_zip_pmd = {
64143e610bbSSunila Sahu 	.id_table    = pci_id_octtx_zipvf_table,
64243e610bbSSunila Sahu 	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
64343e610bbSSunila Sahu 	.probe       = zip_pci_probe,
64443e610bbSSunila Sahu 	.remove      = zip_pci_remove,
64543e610bbSSunila Sahu };
64643e610bbSSunila Sahu 
64743e610bbSSunila Sahu RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
64843e610bbSSunila Sahu RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
64943e610bbSSunila Sahu 
65043e610bbSSunila Sahu RTE_INIT(octtx_zip_init_log);
65143e610bbSSunila Sahu 
65243e610bbSSunila Sahu static void
65343e610bbSSunila Sahu octtx_zip_init_log(void)
65443e610bbSSunila Sahu {
65543e610bbSSunila Sahu 	octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
65643e610bbSSunila Sahu 	if (octtx_zip_logtype_driver >= 0)
65743e610bbSSunila Sahu 		rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
65843e610bbSSunila Sahu }
659