xref: /dpdk/drivers/compress/isal/isal_compress_pmd_ops.c (revision e977e4199a8d6bab72cf94e154adcad1fb964e5e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <isa-l.h>
5 
6 #include <rte_common.h>
7 #include <rte_compressdev_pmd.h>
8 #include <rte_malloc.h>
9 
10 #include "isal_compress_pmd_private.h"
11 
12 static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
13 	{
14 		.algo = RTE_COMP_ALGO_DEFLATE,
15 		.comp_feature_flags =	RTE_COMP_FF_SHAREABLE_PRIV_XFORM,
16 		.window_size = {
17 			.min = 15,
18 			.max = 15,
19 			.increment = 0
20 		},
21 	},
22 	RTE_COMP_END_OF_CAPABILITIES_LIST()
23 };
24 
25 /** Configure device */
26 static int
27 isal_comp_pmd_config(struct rte_compressdev *dev,
28 		struct rte_compressdev_config *config)
29 {
30 	int ret = 0;
31 	unsigned int n;
32 	char mp_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
33 	unsigned int elt_size = sizeof(struct isal_priv_xform);
34 	struct isal_comp_private *internals = dev->data->dev_private;
35 
36 	n = snprintf(mp_name, sizeof(mp_name), "compdev_%d_xform_mp",
37 			dev->data->dev_id);
38 	if (n > sizeof(mp_name)) {
39 		ISAL_PMD_LOG(ERR,
40 			"Unable to create unique name for xform mempool");
41 		return -ENOMEM;
42 	}
43 
44 	internals->priv_xform_mp = rte_mempool_lookup(mp_name);
45 
46 	if (internals->priv_xform_mp != NULL) {
47 		if (((internals->priv_xform_mp)->elt_size != elt_size) ||
48 				((internals->priv_xform_mp)->size <
49 					config->max_nb_priv_xforms)) {
50 
51 			ISAL_PMD_LOG(ERR, "%s mempool already exists with different"
52 				" initialization parameters", mp_name);
53 			internals->priv_xform_mp = NULL;
54 			return -ENOMEM;
55 		}
56 	} else { /* First time configuration */
57 		internals->priv_xform_mp = rte_mempool_create(
58 				mp_name, /* mempool name */
59 				/* number of elements*/
60 				config->max_nb_priv_xforms,
61 				elt_size, /* element size*/
62 				0, /* Cache size*/
63 				0, /* private data size */
64 				NULL, /* obj initialization constructor */
65 				NULL, /* obj initialization constructor arg */
66 				NULL, /**< obj constructor*/
67 				NULL, /* obj constructor arg */
68 				config->socket_id, /* socket id */
69 				0); /* flags */
70 	}
71 
72 	if (internals->priv_xform_mp == NULL) {
73 		ISAL_PMD_LOG(ERR, "%s mempool allocation failed", mp_name);
74 		return -ENOMEM;
75 	}
76 
77 	dev->data->dev_private = internals;
78 
79 	return ret;
80 }
81 
82 /** Start device */
83 static int
84 isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev)
85 {
86 	return 0;
87 }
88 
89 /** Stop device */
90 static void
91 isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev)
92 {
93 }
94 
95 /** Close device */
96 static int
97 isal_comp_pmd_close(struct rte_compressdev *dev)
98 {
99 	/* Free private data */
100 	struct isal_comp_private *internals = dev->data->dev_private;
101 
102 	rte_mempool_free(internals->priv_xform_mp);
103 	return 0;
104 }
105 
106 /** Get device statistics */
107 static void
108 isal_comp_pmd_stats_get(struct rte_compressdev *dev,
109 		struct rte_compressdev_stats *stats)
110 {
111 	uint16_t qp_id;
112 
113 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
114 		struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
115 
116 		stats->enqueued_count += qp->qp_stats.enqueued_count;
117 		stats->dequeued_count += qp->qp_stats.dequeued_count;
118 
119 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
120 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
121 	}
122 }
123 
124 /** Get device info */
125 static void
126 isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused,
127 		struct rte_compressdev_info *dev_info)
128 {
129 	if (dev_info != NULL) {
130 		dev_info->capabilities = isal_pmd_capabilities;
131 		dev_info->feature_flags = RTE_COMPDEV_FF_CPU_AVX512 |
132 				RTE_COMPDEV_FF_CPU_AVX2 |
133 				RTE_COMPDEV_FF_CPU_AVX |
134 				RTE_COMPDEV_FF_CPU_SSE;
135 	}
136 }
137 
138 /** Reset device statistics */
139 static void
140 isal_comp_pmd_stats_reset(struct rte_compressdev *dev)
141 {
142 	uint16_t qp_id;
143 
144 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
145 		struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
146 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
147 	}
148 }
149 
150 /** Release queue pair */
151 static int
152 isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
153 {
154 	struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
155 
156 	if (qp == NULL)
157 		return -EINVAL;
158 
159 	if (qp->stream != NULL)
160 		rte_free(qp->stream);
161 
162 	if (qp->stream->level_buf != NULL)
163 		rte_free(qp->stream->level_buf);
164 
165 	if (qp->state != NULL)
166 		rte_free(qp->state);
167 
168 	if (dev->data->queue_pairs[qp_id] != NULL)
169 		rte_free(dev->data->queue_pairs[qp_id]);
170 
171 	return 0;
172 }
173 
174 /** Create a ring to place process packets on */
175 static struct rte_ring *
176 isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp,
177 		unsigned int ring_size, int socket_id)
178 {
179 	struct rte_ring *r;
180 
181 	r = rte_ring_lookup(qp->name);
182 	if (r) {
183 		if (rte_ring_get_size(r) >= ring_size) {
184 			ISAL_PMD_LOG(DEBUG,
185 				"Reusing existing ring %s for processed packets",
186 				qp->name);
187 			return r;
188 		}
189 
190 			ISAL_PMD_LOG(ERR,
191 				"Unable to reuse existing ring %s"
192 				" for processed packets",
193 			 qp->name);
194 		return NULL;
195 	}
196 
197 	return rte_ring_create(qp->name, ring_size, socket_id,
198 			RING_F_SP_ENQ | RING_F_SC_DEQ);
199 }
200 
201 /** set a unique name for the queue pair based on its name, dev_id and qp_id */
202 static int
203 isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev,
204 struct isal_comp_qp *qp)
205 {
206 	unsigned int n = snprintf(qp->name, sizeof(qp->name),
207 			"isal_compression_pmd_%u_qp_%u",
208 			dev->data->dev_id, qp->id);
209 
210 	if (n >= sizeof(qp->name))
211 		return -1;
212 
213 	return 0;
214 }
215 
216 /* Setup a queue pair */
217 static int
218 isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
219 		uint32_t max_inflight_ops, int socket_id)
220 {
221 	struct isal_comp_qp *qp = NULL;
222 	int retval;
223 
224 	/* Free memory prior to re-allocation if needed. */
225 	if (dev->data->queue_pairs[qp_id] != NULL)
226 		isal_comp_pmd_qp_release(dev, qp_id);
227 
228 	/* Allocate the queue pair data structure. */
229 	qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp),
230 					RTE_CACHE_LINE_SIZE, socket_id);
231 	if (qp == NULL) {
232 		ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory");
233 		return (-ENOMEM);
234 	}
235 
236 	/* Initialize memory for compression stream structure */
237 	qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
238 			sizeof(struct isal_zstream),  RTE_CACHE_LINE_SIZE,
239 			socket_id);
240 
241 	/* Initialize memory for compression level buffer */
242 	qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
243 			ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
244 			socket_id);
245 
246 	/* Initialize memory for decompression state structure */
247 	qp->state = rte_zmalloc_socket("Isa-l decompression state",
248 			sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
249 			socket_id);
250 
251 	qp->id = qp_id;
252 	dev->data->queue_pairs[qp_id] = qp;
253 
254 	retval = isal_comp_pmd_qp_set_unique_name(dev, qp);
255 	if (retval) {
256 		ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
257 				"compression device");
258 		goto qp_setup_cleanup;
259 	}
260 
261 	qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp,
262 			max_inflight_ops, socket_id);
263 	if (qp->processed_pkts == NULL) {
264 		ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
265 				"compression device");
266 		goto qp_setup_cleanup;
267 	}
268 
269 	qp->num_free_elements = rte_ring_free_count(qp->processed_pkts);
270 
271 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
272 	return 0;
273 
274 qp_setup_cleanup:
275 	if (qp)
276 		rte_free(qp);
277 
278 	return -1;
279 }
280 
281 /** Set private xform data*/
282 static int
283 isal_comp_pmd_priv_xform_create(struct rte_compressdev *dev,
284 			const struct rte_comp_xform *xform, void **priv_xform)
285 {
286 	int ret;
287 	struct isal_comp_private *internals = dev->data->dev_private;
288 
289 	if (xform == NULL) {
290 		ISAL_PMD_LOG(ERR, "Invalid Xform struct");
291 		return -EINVAL;
292 	}
293 
294 	if (rte_mempool_get(internals->priv_xform_mp, priv_xform)) {
295 		ISAL_PMD_LOG(ERR,
296 			"Couldn't get object from private xform mempool");
297 		return -ENOMEM;
298 	}
299 
300 	ret = isal_comp_set_priv_xform_parameters(*priv_xform, xform);
301 	if (ret != 0) {
302 		ISAL_PMD_LOG(ERR, "Failed to configure private xform parameters");
303 
304 		/* Return private xform to mempool */
305 		rte_mempool_put(internals->priv_xform_mp, priv_xform);
306 		return ret;
307 	}
308 	return 0;
309 }
310 
311 /** Clear memory of the private xform so it doesn't leave key material behind */
312 static int
313 isal_comp_pmd_priv_xform_free(struct rte_compressdev *dev, void *priv_xform)
314 {
315 	struct isal_comp_private *internals = dev->data->dev_private;
316 
317 	/* Zero out the whole structure */
318 	if (priv_xform) {
319 		memset(priv_xform, 0, sizeof(struct isal_priv_xform));
320 		rte_mempool_put(internals->priv_xform_mp, priv_xform);
321 	}
322 	return 0;
323 }
324 
325 struct rte_compressdev_ops isal_pmd_ops = {
326 		.dev_configure		= isal_comp_pmd_config,
327 		.dev_start		= isal_comp_pmd_start,
328 		.dev_stop		= isal_comp_pmd_stop,
329 		.dev_close		= isal_comp_pmd_close,
330 
331 		.stats_get		= isal_comp_pmd_stats_get,
332 		.stats_reset		= isal_comp_pmd_stats_reset,
333 
334 		.dev_infos_get		= isal_comp_pmd_info_get,
335 
336 		.queue_pair_setup	= isal_comp_pmd_qp_setup,
337 		.queue_pair_release	= isal_comp_pmd_qp_release,
338 
339 		.private_xform_create	= isal_comp_pmd_priv_xform_create,
340 		.private_xform_free	= isal_comp_pmd_priv_xform_free,
341 };
342 
343 struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops;
344