xref: /dpdk/drivers/compress/isal/isal_compress_pmd_ops.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 #include <isa-l.h>
5 
6 #include <rte_common.h>
7 #include <rte_compressdev_pmd.h>
8 #include <rte_malloc.h>
9 
10 #include "isal_compress_pmd_private.h"
11 
12 static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
13 	{
14 		.algo = RTE_COMP_ALGO_DEFLATE,
15 		.comp_feature_flags =	RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
16 					RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
17 					RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
18 					RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
19 					RTE_COMP_FF_HUFFMAN_FIXED |
20 					RTE_COMP_FF_HUFFMAN_DYNAMIC |
21 					RTE_COMP_FF_CRC32_CHECKSUM |
22 					RTE_COMP_FF_ADLER32_CHECKSUM,
23 		.window_size = {
24 			.min = 15,
25 			.max = 15,
26 			.increment = 0
27 		},
28 	},
29 	RTE_COMP_END_OF_CAPABILITIES_LIST()
30 };
31 
32 /** Configure device */
33 static int
34 isal_comp_pmd_config(struct rte_compressdev *dev,
35 		struct rte_compressdev_config *config)
36 {
37 	int ret = 0;
38 	unsigned int n;
39 	char mp_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
40 	unsigned int elt_size = sizeof(struct isal_priv_xform);
41 	struct isal_comp_private *internals = dev->data->dev_private;
42 
43 	n = snprintf(mp_name, sizeof(mp_name), "compdev_%d_xform_mp",
44 			dev->data->dev_id);
45 	if (n > sizeof(mp_name)) {
46 		ISAL_PMD_LOG(ERR,
47 			"Unable to create unique name for xform mempool");
48 		return -ENOMEM;
49 	}
50 
51 	internals->priv_xform_mp = rte_mempool_lookup(mp_name);
52 
53 	if (internals->priv_xform_mp != NULL) {
54 		if (((internals->priv_xform_mp)->elt_size != elt_size) ||
55 				((internals->priv_xform_mp)->size <
56 					config->max_nb_priv_xforms)) {
57 
58 			ISAL_PMD_LOG(ERR, "%s mempool already exists with different"
59 				" initialization parameters", mp_name);
60 			internals->priv_xform_mp = NULL;
61 			return -ENOMEM;
62 		}
63 	} else { /* First time configuration */
64 		internals->priv_xform_mp = rte_mempool_create(
65 				mp_name, /* mempool name */
66 				/* number of elements*/
67 				config->max_nb_priv_xforms,
68 				elt_size, /* element size*/
69 				0, /* Cache size*/
70 				0, /* private data size */
71 				NULL, /* obj initialization constructor */
72 				NULL, /* obj initialization constructor arg */
73 				NULL, /**< obj constructor*/
74 				NULL, /* obj constructor arg */
75 				config->socket_id, /* socket id */
76 				0); /* flags */
77 	}
78 
79 	if (internals->priv_xform_mp == NULL) {
80 		ISAL_PMD_LOG(ERR, "%s mempool allocation failed", mp_name);
81 		return -ENOMEM;
82 	}
83 
84 	dev->data->dev_private = internals;
85 
86 	return ret;
87 }
88 
89 /** Start device */
90 static int
91 isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev)
92 {
93 	return 0;
94 }
95 
96 /** Stop device */
97 static void
98 isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev)
99 {
100 }
101 
102 /** Close device */
103 static int
104 isal_comp_pmd_close(struct rte_compressdev *dev)
105 {
106 	/* Free private data */
107 	struct isal_comp_private *internals = dev->data->dev_private;
108 
109 	rte_mempool_free(internals->priv_xform_mp);
110 	return 0;
111 }
112 
113 /** Get device statistics */
114 static void
115 isal_comp_pmd_stats_get(struct rte_compressdev *dev,
116 		struct rte_compressdev_stats *stats)
117 {
118 	uint16_t qp_id;
119 
120 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
121 		struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
122 
123 		stats->enqueued_count += qp->qp_stats.enqueued_count;
124 		stats->dequeued_count += qp->qp_stats.dequeued_count;
125 
126 		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
127 		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
128 	}
129 }
130 
131 /** Get device info */
132 static void
133 isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused,
134 		struct rte_compressdev_info *dev_info)
135 {
136 	if (dev_info != NULL) {
137 		dev_info->capabilities = isal_pmd_capabilities;
138 
139 		/* Check CPU for supported vector instruction and set
140 		 * feature_flags
141 		 */
142 		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
143 			dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_AVX512;
144 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
145 			dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_AVX2;
146 		else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
147 			dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_AVX;
148 		else
149 			dev_info->feature_flags |= RTE_COMPDEV_FF_CPU_SSE;
150 	}
151 }
152 
153 /** Reset device statistics */
154 static void
155 isal_comp_pmd_stats_reset(struct rte_compressdev *dev)
156 {
157 	uint16_t qp_id;
158 
159 	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
160 		struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
161 		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
162 	}
163 }
164 
165 /** Release queue pair */
166 static int
167 isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
168 {
169 	struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
170 
171 	if (qp == NULL)
172 		return -EINVAL;
173 
174 	if (qp->stream)
175 		rte_free(qp->stream->level_buf);
176 
177 	rte_free(qp->state);
178 	rte_ring_free(qp->processed_pkts);
179 	rte_free(qp->stream);
180 	rte_free(qp);
181 	dev->data->queue_pairs[qp_id] = NULL;
182 
183 	return 0;
184 }
185 
186 /** Create a ring to place process packets on */
187 static struct rte_ring *
188 isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp,
189 		unsigned int ring_size, int socket_id)
190 {
191 	struct rte_ring *r;
192 
193 	r = rte_ring_lookup(qp->name);
194 	if (r) {
195 		if (rte_ring_get_size(r) >= ring_size) {
196 			ISAL_PMD_LOG(DEBUG,
197 				"Reusing existing ring %s for processed packets",
198 				qp->name);
199 			return r;
200 		}
201 
202 			ISAL_PMD_LOG(ERR,
203 				"Unable to reuse existing ring %s"
204 				" for processed packets",
205 			 qp->name);
206 		return NULL;
207 	}
208 
209 	return rte_ring_create(qp->name, ring_size, socket_id,
210 			RING_F_SP_ENQ | RING_F_SC_DEQ);
211 }
212 
213 /** set a unique name for the queue pair based on its name, dev_id and qp_id */
214 static int
215 isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev,
216 struct isal_comp_qp *qp)
217 {
218 	unsigned int n = snprintf(qp->name, sizeof(qp->name),
219 			"isal_comp_pmd_%u_qp_%u",
220 			dev->data->dev_id, qp->id);
221 
222 	if (n >= sizeof(qp->name))
223 		return -1;
224 
225 	return 0;
226 }
227 
228 /* Setup a queue pair */
229 static int
230 isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
231 		uint32_t max_inflight_ops, int socket_id)
232 {
233 	struct isal_comp_qp *qp = NULL;
234 	int retval;
235 
236 	/* Free memory prior to re-allocation if needed. */
237 	if (dev->data->queue_pairs[qp_id] != NULL)
238 		isal_comp_pmd_qp_release(dev, qp_id);
239 
240 	/* Allocate the queue pair data structure. */
241 	qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp),
242 					RTE_CACHE_LINE_SIZE, socket_id);
243 	if (qp == NULL) {
244 		ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory");
245 		return (-ENOMEM);
246 	}
247 
248 	/* Initialize memory for compression stream structure */
249 	qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
250 			sizeof(struct isal_zstream),  RTE_CACHE_LINE_SIZE,
251 			socket_id);
252 	if (qp->stream == NULL) {
253 		ISAL_PMD_LOG(ERR, "Failed to allocate compression stream memory");
254 		goto qp_setup_cleanup;
255 	}
256 	/* Initialize memory for compression level buffer */
257 	qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
258 			ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
259 			socket_id);
260 	if (qp->stream->level_buf == NULL) {
261 		ISAL_PMD_LOG(ERR, "Failed to allocate compression level_buf memory");
262 		goto qp_setup_cleanup;
263 	}
264 
265 	/* Initialize memory for decompression state structure */
266 	qp->state = rte_zmalloc_socket("Isa-l decompression state",
267 			sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
268 			socket_id);
269 	if (qp->state == NULL) {
270 		ISAL_PMD_LOG(ERR, "Failed to allocate decompression state memory");
271 		goto qp_setup_cleanup;
272 	}
273 
274 	qp->id = qp_id;
275 	dev->data->queue_pairs[qp_id] = qp;
276 
277 	retval = isal_comp_pmd_qp_set_unique_name(dev, qp);
278 	if (retval) {
279 		ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
280 				"compression device");
281 		goto qp_setup_cleanup;
282 	}
283 
284 	qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp,
285 			max_inflight_ops, socket_id);
286 	if (qp->processed_pkts == NULL) {
287 		ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
288 				"compression device");
289 		goto qp_setup_cleanup;
290 	}
291 
292 	qp->num_free_elements = rte_ring_free_count(qp->processed_pkts);
293 
294 	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
295 	return 0;
296 
297 qp_setup_cleanup:
298 	if (qp->stream)
299 		rte_free(qp->stream->level_buf);
300 	rte_free(qp->stream);
301 	rte_free(qp->state);
302 	rte_free(qp);
303 
304 	return -1;
305 }
306 
307 /** Set private xform data*/
308 static int
309 isal_comp_pmd_priv_xform_create(struct rte_compressdev *dev,
310 			const struct rte_comp_xform *xform, void **priv_xform)
311 {
312 	int ret;
313 	struct isal_comp_private *internals = dev->data->dev_private;
314 
315 	if (xform == NULL) {
316 		ISAL_PMD_LOG(ERR, "Invalid Xform struct");
317 		return -EINVAL;
318 	}
319 
320 	if (rte_mempool_get(internals->priv_xform_mp, priv_xform)) {
321 		ISAL_PMD_LOG(ERR,
322 			"Couldn't get object from private xform mempool");
323 		return -ENOMEM;
324 	}
325 
326 	ret = isal_comp_set_priv_xform_parameters(*priv_xform, xform);
327 	if (ret != 0) {
328 		ISAL_PMD_LOG(ERR, "Failed to configure private xform parameters");
329 
330 		/* Return private xform to mempool */
331 		rte_mempool_put(internals->priv_xform_mp, priv_xform);
332 		return ret;
333 	}
334 	return 0;
335 }
336 
337 /** Clear memory of the private xform so it doesn't leave key material behind */
338 static int
339 isal_comp_pmd_priv_xform_free(struct rte_compressdev *dev, void *priv_xform)
340 {
341 	struct isal_comp_private *internals = dev->data->dev_private;
342 
343 	/* Zero out the whole structure */
344 	if (priv_xform) {
345 		memset(priv_xform, 0, sizeof(struct isal_priv_xform));
346 		rte_mempool_put(internals->priv_xform_mp, priv_xform);
347 	}
348 	return 0;
349 }
350 
351 struct rte_compressdev_ops isal_pmd_ops = {
352 		.dev_configure		= isal_comp_pmd_config,
353 		.dev_start		= isal_comp_pmd_start,
354 		.dev_stop		= isal_comp_pmd_stop,
355 		.dev_close		= isal_comp_pmd_close,
356 
357 		.stats_get		= isal_comp_pmd_stats_get,
358 		.stats_reset		= isal_comp_pmd_stats_reset,
359 
360 		.dev_infos_get		= isal_comp_pmd_info_get,
361 
362 		.queue_pair_setup	= isal_comp_pmd_qp_setup,
363 		.queue_pair_release	= isal_comp_pmd_qp_release,
364 
365 		.private_xform_create	= isal_comp_pmd_priv_xform_create,
366 		.private_xform_free	= isal_comp_pmd_priv_xform_free,
367 };
368 
369 struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops;
370