1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2024-2025 Huawei Technologies Co.,Ltd. All rights reserved.
3 * Copyright 2024-2025 Linaro ltd.
4 */
5
6 #include <bus_vdev_driver.h>
7 #include <rte_compressdev_pmd.h>
8 #include <rte_malloc.h>
9
10 #include <uadk/wd_comp.h>
11 #include <uadk/wd_sched.h>
12
13 #include "uadk_compress_pmd_private.h"
14
15 static const struct
16 rte_compressdev_capabilities uadk_compress_pmd_capabilities[] = {
17 { /* Deflate */
18 .algo = RTE_COMP_ALGO_DEFLATE,
19 .comp_feature_flags = RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
20 RTE_COMP_FF_HUFFMAN_FIXED |
21 RTE_COMP_FF_HUFFMAN_DYNAMIC,
22 },
23
24 RTE_COMP_END_OF_CAPABILITIES_LIST()
25 };
26
27 static int
uadk_compress_pmd_config(struct rte_compressdev * dev,struct rte_compressdev_config * config __rte_unused)28 uadk_compress_pmd_config(struct rte_compressdev *dev,
29 struct rte_compressdev_config *config __rte_unused)
30 {
31 struct uadk_compress_priv *priv = dev->data->dev_private;
32 int ret;
33
34 if (!priv->env_init) {
35 ret = wd_comp_env_init(NULL);
36 if (ret < 0)
37 return -EINVAL;
38 priv->env_init = true;
39 }
40
41 return 0;
42 }
43
44 static int
uadk_compress_pmd_start(struct rte_compressdev * dev __rte_unused)45 uadk_compress_pmd_start(struct rte_compressdev *dev __rte_unused)
46 {
47 return 0;
48 }
49
50 static void
uadk_compress_pmd_stop(struct rte_compressdev * dev __rte_unused)51 uadk_compress_pmd_stop(struct rte_compressdev *dev __rte_unused)
52 {
53 }
54
55 static int
uadk_compress_pmd_close(struct rte_compressdev * dev)56 uadk_compress_pmd_close(struct rte_compressdev *dev)
57 {
58 struct uadk_compress_priv *priv = dev->data->dev_private;
59
60 if (priv->env_init) {
61 wd_comp_env_uninit();
62 priv->env_init = false;
63 }
64
65 return 0;
66 }
67
68 static void
uadk_compress_pmd_stats_get(struct rte_compressdev * dev,struct rte_compressdev_stats * stats)69 uadk_compress_pmd_stats_get(struct rte_compressdev *dev,
70 struct rte_compressdev_stats *stats)
71 {
72 int qp_id;
73
74 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
75 struct uadk_compress_qp *qp = dev->data->queue_pairs[qp_id];
76
77 stats->enqueued_count += qp->qp_stats.enqueued_count;
78 stats->dequeued_count += qp->qp_stats.dequeued_count;
79 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
80 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
81 }
82 }
83
84 static void
uadk_compress_pmd_stats_reset(struct rte_compressdev * dev)85 uadk_compress_pmd_stats_reset(struct rte_compressdev *dev)
86 {
87 int qp_id;
88
89 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
90 struct uadk_compress_qp *qp = dev->data->queue_pairs[qp_id];
91
92 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
93 }
94 }
95
96 static void
uadk_compress_pmd_info_get(struct rte_compressdev * dev,struct rte_compressdev_info * dev_info)97 uadk_compress_pmd_info_get(struct rte_compressdev *dev,
98 struct rte_compressdev_info *dev_info)
99 {
100 if (dev_info != NULL) {
101 dev_info->driver_name = dev->device->driver->name;
102 dev_info->feature_flags = dev->feature_flags;
103 dev_info->capabilities = uadk_compress_pmd_capabilities;
104 }
105 }
106
107 static int
uadk_compress_pmd_qp_release(struct rte_compressdev * dev,uint16_t qp_id)108 uadk_compress_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
109 {
110 struct uadk_compress_qp *qp = dev->data->queue_pairs[qp_id];
111
112 if (qp != NULL) {
113 rte_ring_free(qp->processed_pkts);
114 rte_free(qp);
115 dev->data->queue_pairs[qp_id] = NULL;
116 }
117
118 return 0;
119 }
120
121 static int
uadk_pmd_qp_set_unique_name(struct rte_compressdev * dev,struct uadk_compress_qp * qp)122 uadk_pmd_qp_set_unique_name(struct rte_compressdev *dev,
123 struct uadk_compress_qp *qp)
124 {
125 unsigned int n = snprintf(qp->name, sizeof(qp->name),
126 "uadk_pmd_%u_qp_%u",
127 dev->data->dev_id, qp->id);
128
129 if (n >= sizeof(qp->name))
130 return -EINVAL;
131
132 return 0;
133 }
134
135 static struct rte_ring *
uadk_pmd_qp_create_processed_pkts_ring(struct uadk_compress_qp * qp,unsigned int ring_size,int socket_id)136 uadk_pmd_qp_create_processed_pkts_ring(struct uadk_compress_qp *qp,
137 unsigned int ring_size, int socket_id)
138 {
139 struct rte_ring *r = qp->processed_pkts;
140
141 if (r) {
142 if (rte_ring_get_size(r) >= ring_size) {
143 UADK_LOG(INFO, "Reusing existing ring %s for processed packets",
144 qp->name);
145 return r;
146 }
147
148 UADK_LOG(ERR, "Unable to reuse existing ring %s for processed packets",
149 qp->name);
150 return NULL;
151 }
152
153 return rte_ring_create(qp->name, ring_size, socket_id,
154 RING_F_EXACT_SZ);
155 }
156
157 static int
uadk_compress_pmd_qp_setup(struct rte_compressdev * dev,uint16_t qp_id,uint32_t max_inflight_ops,int socket_id)158 uadk_compress_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
159 uint32_t max_inflight_ops, int socket_id)
160 {
161 struct uadk_compress_qp *qp = NULL;
162
163 /* Free memory prior to re-allocation if needed. */
164 if (dev->data->queue_pairs[qp_id] != NULL)
165 uadk_compress_pmd_qp_release(dev, qp_id);
166
167 /* Allocate the queue pair data structure. */
168 qp = rte_zmalloc_socket("uadk PMD Queue Pair", sizeof(*qp),
169 RTE_CACHE_LINE_SIZE, socket_id);
170 if (qp == NULL)
171 return (-ENOMEM);
172
173 qp->id = qp_id;
174 dev->data->queue_pairs[qp_id] = qp;
175
176 if (uadk_pmd_qp_set_unique_name(dev, qp))
177 goto qp_setup_cleanup;
178
179 qp->processed_pkts = uadk_pmd_qp_create_processed_pkts_ring(qp,
180 max_inflight_ops, socket_id);
181 if (qp->processed_pkts == NULL)
182 goto qp_setup_cleanup;
183
184 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
185
186 return 0;
187
188 qp_setup_cleanup:
189 if (qp) {
190 rte_free(qp);
191 qp = NULL;
192 }
193 return -EINVAL;
194 }
195
196 static int
uadk_compress_pmd_xform_create(struct rte_compressdev * dev __rte_unused,const struct rte_comp_xform * xform,void ** private_xform)197 uadk_compress_pmd_xform_create(struct rte_compressdev *dev __rte_unused,
198 const struct rte_comp_xform *xform,
199 void **private_xform)
200 {
201 struct wd_comp_sess_setup setup = {0};
202 struct sched_params param = {0};
203 struct uadk_compress_xform *xfrm;
204 handle_t handle;
205
206 if (xform == NULL) {
207 UADK_LOG(ERR, "invalid xform struct");
208 return -EINVAL;
209 }
210
211 xfrm = rte_malloc(NULL, sizeof(struct uadk_compress_xform), 0);
212 if (xfrm == NULL)
213 return -ENOMEM;
214
215 switch (xform->type) {
216 case RTE_COMP_COMPRESS:
217 switch (xform->compress.algo) {
218 case RTE_COMP_ALGO_NULL:
219 break;
220 case RTE_COMP_ALGO_DEFLATE:
221 setup.alg_type = WD_DEFLATE;
222 setup.win_sz = WD_COMP_WS_8K;
223 setup.comp_lv = WD_COMP_L8;
224 setup.op_type = WD_DIR_COMPRESS;
225 param.type = setup.op_type;
226 param.numa_id = -1; /* choose nearby numa node */
227 setup.sched_param = ¶m;
228 break;
229 default:
230 goto err;
231 }
232 break;
233 case RTE_COMP_DECOMPRESS:
234 switch (xform->decompress.algo) {
235 case RTE_COMP_ALGO_NULL:
236 break;
237 case RTE_COMP_ALGO_DEFLATE:
238 setup.alg_type = WD_DEFLATE;
239 setup.comp_lv = WD_COMP_L8;
240 setup.op_type = WD_DIR_DECOMPRESS;
241 param.type = setup.op_type;
242 param.numa_id = -1; /* choose nearby numa node */
243 setup.sched_param = ¶m;
244 break;
245 default:
246 goto err;
247 }
248 break;
249 default:
250 UADK_LOG(ERR, "Algorithm %u is not supported.", xform->type);
251 goto err;
252 }
253
254 handle = wd_comp_alloc_sess(&setup);
255 if (!handle)
256 goto err;
257
258 xfrm->handle = handle;
259 xfrm->type = xform->type;
260 *private_xform = xfrm;
261 return 0;
262
263 err:
264 rte_free(xfrm);
265 return -EINVAL;
266 }
267
268 static int
uadk_compress_pmd_xform_free(struct rte_compressdev * dev __rte_unused,void * xform)269 uadk_compress_pmd_xform_free(struct rte_compressdev *dev __rte_unused, void *xform)
270 {
271 if (!xform)
272 return -EINVAL;
273
274 wd_comp_free_sess(((struct uadk_compress_xform *)xform)->handle);
275 rte_free(xform);
276
277 return 0;
278 }
279
280 static struct rte_compressdev_ops uadk_compress_pmd_ops = {
281 .dev_configure = uadk_compress_pmd_config,
282 .dev_start = uadk_compress_pmd_start,
283 .dev_stop = uadk_compress_pmd_stop,
284 .dev_close = uadk_compress_pmd_close,
285 .stats_get = uadk_compress_pmd_stats_get,
286 .stats_reset = uadk_compress_pmd_stats_reset,
287 .dev_infos_get = uadk_compress_pmd_info_get,
288 .queue_pair_setup = uadk_compress_pmd_qp_setup,
289 .queue_pair_release = uadk_compress_pmd_qp_release,
290 .private_xform_create = uadk_compress_pmd_xform_create,
291 .private_xform_free = uadk_compress_pmd_xform_free,
292 };
293
294 static uint16_t
uadk_compress_pmd_enqueue_burst_sync(void * queue_pair,struct rte_comp_op ** ops,uint16_t nb_ops)295 uadk_compress_pmd_enqueue_burst_sync(void *queue_pair,
296 struct rte_comp_op **ops, uint16_t nb_ops)
297 {
298 struct uadk_compress_qp *qp = queue_pair;
299 struct uadk_compress_xform *xform;
300 struct rte_comp_op *op;
301 uint16_t enqd = 0;
302 int i, ret = 0;
303
304 for (i = 0; i < nb_ops; i++) {
305 op = ops[i];
306
307 if (op->op_type == RTE_COMP_OP_STATEFUL) {
308 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
309 } else {
310 /* process stateless ops */
311 xform = op->private_xform;
312 if (xform) {
313 struct wd_comp_req req = {0};
314 uint16_t dst_len = rte_pktmbuf_data_len(op->m_dst);
315
316 req.src = rte_pktmbuf_mtod(op->m_src, uint8_t *);
317 req.src_len = op->src.length;
318 req.dst = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
319 req.dst_len = dst_len;
320 req.op_type = (enum wd_comp_op_type)xform->type;
321 req.cb = NULL;
322 req.data_fmt = WD_FLAT_BUF;
323 do {
324 ret = wd_do_comp_sync(xform->handle, &req);
325 } while (ret == -WD_EBUSY);
326
327 op->consumed += req.src_len;
328
329 if (req.dst_len <= dst_len) {
330 op->produced += req.dst_len;
331 op->status = RTE_COMP_OP_STATUS_SUCCESS;
332 } else {
333 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
334 }
335
336 if (ret) {
337 op->status = RTE_COMP_OP_STATUS_ERROR;
338 break;
339 }
340 } else {
341 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
342 }
343 }
344
345 /* Whatever is out of op, put it into completion queue with
346 * its status
347 */
348 if (!ret)
349 ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
350
351 if (unlikely(ret)) {
352 /* increment count if failed to enqueue op */
353 qp->qp_stats.enqueue_err_count++;
354 } else {
355 qp->qp_stats.enqueued_count++;
356 enqd++;
357 }
358 }
359
360 return enqd;
361 }
362
363 static uint16_t
uadk_compress_pmd_dequeue_burst_sync(void * queue_pair,struct rte_comp_op ** ops,uint16_t nb_ops)364 uadk_compress_pmd_dequeue_burst_sync(void *queue_pair,
365 struct rte_comp_op **ops,
366 uint16_t nb_ops)
367 {
368 struct uadk_compress_qp *qp = queue_pair;
369 unsigned int nb_dequeued = 0;
370
371 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
372 (void **)ops, nb_ops, NULL);
373 qp->qp_stats.dequeued_count += nb_dequeued;
374
375 return nb_dequeued;
376 }
377
378 static int
uadk_compress_probe(struct rte_vdev_device * vdev)379 uadk_compress_probe(struct rte_vdev_device *vdev)
380 {
381 struct rte_compressdev_pmd_init_params init_params = {
382 "",
383 rte_socket_id(),
384 };
385 struct rte_compressdev *compressdev;
386 struct uacce_dev *udev;
387 const char *name;
388
389 udev = wd_get_accel_dev("deflate");
390 if (!udev)
391 return -ENODEV;
392
393 name = rte_vdev_device_name(vdev);
394 if (name == NULL)
395 return -EINVAL;
396
397 compressdev = rte_compressdev_pmd_create(name, &vdev->device,
398 sizeof(struct uadk_compress_priv), &init_params);
399 if (compressdev == NULL) {
400 UADK_LOG(ERR, "driver %s: create failed", init_params.name);
401 return -ENODEV;
402 }
403
404 compressdev->dev_ops = &uadk_compress_pmd_ops;
405 compressdev->dequeue_burst = uadk_compress_pmd_dequeue_burst_sync;
406 compressdev->enqueue_burst = uadk_compress_pmd_enqueue_burst_sync;
407 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
408
409 return 0;
410 }
411
412 static int
uadk_compress_remove(struct rte_vdev_device * vdev)413 uadk_compress_remove(struct rte_vdev_device *vdev)
414 {
415 struct rte_compressdev *compressdev;
416 const char *name;
417
418 name = rte_vdev_device_name(vdev);
419 if (name == NULL)
420 return -EINVAL;
421
422 compressdev = rte_compressdev_pmd_get_named_dev(name);
423 if (compressdev == NULL)
424 return -ENODEV;
425
426 return rte_compressdev_pmd_destroy(compressdev);
427 }
428
429 static struct rte_vdev_driver uadk_compress_pmd = {
430 .probe = uadk_compress_probe,
431 .remove = uadk_compress_remove,
432 };
433
434 #define UADK_COMPRESS_DRIVER_NAME compress_uadk
435 RTE_PMD_REGISTER_VDEV(UADK_COMPRESS_DRIVER_NAME, uadk_compress_pmd);
436 RTE_LOG_REGISTER_DEFAULT(uadk_compress_logtype, INFO);
437