xref: /dpdk/lib/bbdev/rte_bbdev.c (revision 353e3639d458f5cdaf3d938aade25579fa490b1b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8 
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12 #include <rte_eal.h>
13 #include <rte_malloc.h>
14 #include <rte_mempool.h>
15 #include <rte_memzone.h>
16 #include <rte_lcore.h>
17 #include <rte_spinlock.h>
18 #include <rte_interrupts.h>
19 
20 #include "rte_bbdev_op.h"
21 #include "rte_bbdev.h"
22 #include "rte_bbdev_pmd.h"
23 
24 #define DEV_NAME "BBDEV"
25 
26 /* Number of supported operation types in *rte_bbdev_op_type*. */
27 #define BBDEV_OP_TYPE_COUNT 7
28 
29 /* BBDev library logging ID */
30 RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE);
31 #define RTE_LOGTYPE_BBDEV bbdev_logtype
32 
33 /* Helper macro for logging */
34 #define rte_bbdev_log(level, ...) \
35 	RTE_LOG_LINE(level, BBDEV, "" __VA_ARGS__)
36 
37 #define rte_bbdev_log_debug(fmt, ...) \
38 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
39 		##__VA_ARGS__)
40 
41 /* Helper macro to check dev_id is valid */
42 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
43 	if (dev == NULL) { \
44 		rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
45 		return -ENODEV; \
46 	} \
47 } while (0)
48 
49 /* Helper macro to check dev_ops is valid */
50 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
51 	if (dev->dev_ops == NULL) { \
52 		rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
53 				dev_id); \
54 		return -ENODEV; \
55 	} \
56 } while (0)
57 
58 /* Helper macro to check that driver implements required function pointer */
59 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
60 	if (func == NULL) { \
61 		rte_bbdev_log(ERR, "device %u does not support %s", \
62 				dev_id, #func); \
63 		return -ENOTSUP; \
64 	} \
65 } while (0)
66 
67 /* Helper macro to check that queue is valid */
68 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
69 	if (queue_id >= dev->data->num_queues) { \
70 		rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
71 				queue_id, dev->data->dev_id); \
72 		return -ERANGE; \
73 	} \
74 } while (0)
75 
76 /* List of callback functions registered by an application */
77 struct rte_bbdev_callback {
78 	TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
79 	rte_bbdev_cb_fn cb_fn;  /* Callback address */
80 	void *cb_arg;  /* Parameter for callback */
81 	void *ret_param;  /* Return parameter */
82 	enum rte_bbdev_event_type event; /* Interrupt event type */
83 	uint32_t active; /* Callback is executing */
84 };
85 
86 /* spinlock for bbdev device callbacks */
87 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
88 
89 /*
90  * Global array of all devices. This is not static because it's used by the
91  * inline enqueue and dequeue functions
92  */
93 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
94 
95 /* Global array with rte_bbdev_data structures */
96 static struct rte_bbdev_data *rte_bbdev_data;
97 
98 /* Memzone name for global bbdev data pool */
99 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
100 
101 /* Number of currently valid devices */
102 static uint16_t num_devs;
103 
104 /* Return pointer to device structure, with validity check */
105 static struct rte_bbdev *
106 get_dev(uint16_t dev_id)
107 {
108 	if (rte_bbdev_is_valid(dev_id))
109 		return &rte_bbdev_devices[dev_id];
110 	return NULL;
111 }
112 
113 /* Allocate global data array */
114 static int
115 rte_bbdev_data_alloc(void)
116 {
117 	const unsigned int flags = 0;
118 	const struct rte_memzone *mz;
119 
120 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
121 		mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
122 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
123 				rte_socket_id(), flags);
124 	} else
125 		mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
126 	if (mz == NULL) {
127 		rte_bbdev_log(CRIT,
128 				"Cannot allocate memzone for bbdev port data");
129 		return -ENOMEM;
130 	}
131 
132 	rte_bbdev_data = mz->addr;
133 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
134 		memset(rte_bbdev_data, 0,
135 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
136 	return 0;
137 }
138 
139 /*
140  * Find data allocated for the device or if not found return first unused bbdev
141  * data. If all structures are in use and none is used by the device return
142  * NULL.
143  */
144 static struct rte_bbdev_data *
145 find_bbdev_data(const char *name)
146 {
147 	uint16_t data_id;
148 
149 	for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
150 		if (strlen(rte_bbdev_data[data_id].name) == 0) {
151 			memset(&rte_bbdev_data[data_id], 0,
152 					sizeof(struct rte_bbdev_data));
153 			return &rte_bbdev_data[data_id];
154 		} else if (strncmp(rte_bbdev_data[data_id].name, name,
155 				RTE_BBDEV_NAME_MAX_LEN) == 0)
156 			return &rte_bbdev_data[data_id];
157 	}
158 
159 	return NULL;
160 }
161 
162 /* Find lowest device id with no attached device */
163 static uint16_t
164 find_free_dev_id(void)
165 {
166 	uint16_t i;
167 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
168 		if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
169 			return i;
170 	}
171 	return RTE_BBDEV_MAX_DEVS;
172 }
173 
174 struct rte_bbdev *
175 rte_bbdev_allocate(const char *name)
176 {
177 	int ret;
178 	struct rte_bbdev *bbdev;
179 	uint16_t dev_id;
180 
181 	if (name == NULL) {
182 		rte_bbdev_log(ERR, "Invalid null device name");
183 		return NULL;
184 	}
185 
186 	if (rte_bbdev_get_named_dev(name) != NULL) {
187 		rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
188 		return NULL;
189 	}
190 
191 	dev_id = find_free_dev_id();
192 	if (dev_id == RTE_BBDEV_MAX_DEVS) {
193 		rte_bbdev_log(ERR, "Reached maximum number of devices");
194 		return NULL;
195 	}
196 
197 	bbdev = &rte_bbdev_devices[dev_id];
198 
199 	if (rte_bbdev_data == NULL) {
200 		ret = rte_bbdev_data_alloc();
201 		if (ret != 0)
202 			return NULL;
203 	}
204 
205 	bbdev->data = find_bbdev_data(name);
206 	if (bbdev->data == NULL) {
207 		rte_bbdev_log(ERR,
208 				"Max BBDevs already allocated in multi-process environment!");
209 		return NULL;
210 	}
211 
212 	rte_atomic_fetch_add_explicit(&bbdev->data->process_cnt, 1, rte_memory_order_relaxed);
213 	bbdev->data->dev_id = dev_id;
214 	bbdev->state = RTE_BBDEV_INITIALIZED;
215 
216 	ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
217 	if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
218 		rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
219 		return NULL;
220 	}
221 
222 	/* init user callbacks */
223 	TAILQ_INIT(&(bbdev->list_cbs));
224 
225 	num_devs++;
226 
227 	rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
228 			name, dev_id, num_devs);
229 
230 	return bbdev;
231 }
232 
233 int
234 rte_bbdev_release(struct rte_bbdev *bbdev)
235 {
236 	uint16_t dev_id;
237 	struct rte_bbdev_callback *cb, *next;
238 
239 	if (bbdev == NULL) {
240 		rte_bbdev_log(ERR, "NULL bbdev");
241 		return -ENODEV;
242 	}
243 	dev_id = bbdev->data->dev_id;
244 
245 	/* free all callbacks from the device's list */
246 	for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
247 
248 		next = TAILQ_NEXT(cb, next);
249 		TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
250 		rte_free(cb);
251 	}
252 
253 	/* clear shared BBDev Data if no process is using the device anymore */
254 	if (rte_atomic_fetch_sub_explicit(&bbdev->data->process_cnt, 1,
255 			      rte_memory_order_relaxed) - 1 == 0)
256 		memset(bbdev->data, 0, sizeof(*bbdev->data));
257 
258 	memset(bbdev, 0, sizeof(*bbdev));
259 	num_devs--;
260 	bbdev->state = RTE_BBDEV_UNUSED;
261 
262 	rte_bbdev_log_debug(
263 			"Un-initialised device id = %u. Num devices = %u",
264 			dev_id, num_devs);
265 	return 0;
266 }
267 
268 struct rte_bbdev *
269 rte_bbdev_get_named_dev(const char *name)
270 {
271 	unsigned int i;
272 
273 	if (name == NULL) {
274 		rte_bbdev_log(ERR, "NULL driver name");
275 		return NULL;
276 	}
277 
278 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
279 		struct rte_bbdev *dev = get_dev(i);
280 		if (dev && (strncmp(dev->data->name,
281 				name, RTE_BBDEV_NAME_MAX_LEN) == 0))
282 			return dev;
283 	}
284 
285 	return NULL;
286 }
287 
288 uint16_t
289 rte_bbdev_count(void)
290 {
291 	return num_devs;
292 }
293 
294 bool
295 rte_bbdev_is_valid(uint16_t dev_id)
296 {
297 	if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
298 		rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
299 		return true;
300 	return false;
301 }
302 
303 uint16_t
304 rte_bbdev_find_next(uint16_t dev_id)
305 {
306 	dev_id++;
307 	for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
308 		if (rte_bbdev_is_valid(dev_id))
309 			break;
310 	return dev_id;
311 }
312 
313 int
314 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
315 {
316 	unsigned int i;
317 	int ret;
318 	struct rte_bbdev_driver_info dev_info;
319 	struct rte_bbdev *dev = get_dev(dev_id);
320 	VALID_DEV_OR_RET_ERR(dev, dev_id);
321 
322 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
323 
324 	if (dev->data->started) {
325 		rte_bbdev_log(ERR,
326 				"Device %u cannot be configured when started",
327 				dev_id);
328 		return -EBUSY;
329 	}
330 
331 	/* Get device driver information to get max number of queues */
332 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
333 	memset(&dev_info, 0, sizeof(dev_info));
334 	dev->dev_ops->info_get(dev, &dev_info);
335 
336 	if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
337 		rte_bbdev_log(ERR,
338 				"Device %u supports 0 < N <= %u queues, not %u",
339 				dev_id, dev_info.max_num_queues, num_queues);
340 		return -EINVAL;
341 	}
342 
343 	/* If re-configuration, get driver to free existing internal memory */
344 	if (dev->data->queues != NULL) {
345 		VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
346 		for (i = 0; i < dev->data->num_queues; i++) {
347 			int ret = dev->dev_ops->queue_release(dev, i);
348 			if (ret < 0) {
349 				rte_bbdev_log(ERR,
350 						"Device %u queue %u release failed",
351 						dev_id, i);
352 				return ret;
353 			}
354 		}
355 		/* Call optional device close */
356 		if (dev->dev_ops->close) {
357 			ret = dev->dev_ops->close(dev);
358 			if (ret < 0) {
359 				rte_bbdev_log(ERR,
360 						"Device %u couldn't be closed",
361 						dev_id);
362 				return ret;
363 			}
364 		}
365 		rte_free(dev->data->queues);
366 	}
367 
368 	/* Allocate queue pointers */
369 	dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
370 			sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
371 				dev->data->socket_id);
372 	if (dev->data->queues == NULL) {
373 		rte_bbdev_log(ERR,
374 				"calloc of %u queues for device %u on socket %i failed",
375 				num_queues, dev_id, dev->data->socket_id);
376 		return -ENOMEM;
377 	}
378 
379 	dev->data->num_queues = num_queues;
380 
381 	/* Call optional device configuration */
382 	if (dev->dev_ops->setup_queues) {
383 		ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
384 		if (ret < 0) {
385 			rte_bbdev_log(ERR,
386 					"Device %u memory configuration failed",
387 					dev_id);
388 			goto error;
389 		}
390 	}
391 
392 	rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
393 			num_queues);
394 	return 0;
395 
396 error:
397 	dev->data->num_queues = 0;
398 	rte_free(dev->data->queues);
399 	dev->data->queues = NULL;
400 	return ret;
401 }
402 
403 int
404 rte_bbdev_intr_enable(uint16_t dev_id)
405 {
406 	int ret;
407 	struct rte_bbdev *dev = get_dev(dev_id);
408 	VALID_DEV_OR_RET_ERR(dev, dev_id);
409 
410 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
411 
412 	if (dev->data->started) {
413 		rte_bbdev_log(ERR,
414 				"Device %u cannot be configured when started",
415 				dev_id);
416 		return -EBUSY;
417 	}
418 
419 	if (dev->dev_ops->intr_enable) {
420 		ret = dev->dev_ops->intr_enable(dev);
421 		if (ret < 0) {
422 			rte_bbdev_log(ERR,
423 					"Device %u interrupts configuration failed",
424 					dev_id);
425 			return ret;
426 		}
427 		rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
428 		return 0;
429 	}
430 
431 	rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
432 	return -ENOTSUP;
433 }
434 
435 int
436 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
437 		const struct rte_bbdev_queue_conf *conf)
438 {
439 	int ret = 0;
440 	struct rte_bbdev_driver_info dev_info;
441 	struct rte_bbdev *dev = get_dev(dev_id);
442 	const struct rte_bbdev_op_cap *p;
443 	struct rte_bbdev_queue_conf *stored_conf;
444 	const char *op_type_str;
445 	unsigned int max_priority;
446 	VALID_DEV_OR_RET_ERR(dev, dev_id);
447 
448 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
449 
450 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
451 
452 	if (dev->data->queues[queue_id].started || dev->data->started) {
453 		rte_bbdev_log(ERR,
454 				"Queue %u of device %u cannot be configured when started",
455 				queue_id, dev_id);
456 		return -EBUSY;
457 	}
458 
459 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
460 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
461 
462 	/* Get device driver information to verify config is valid */
463 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
464 	memset(&dev_info, 0, sizeof(dev_info));
465 	dev->dev_ops->info_get(dev, &dev_info);
466 
467 	/* Check configuration is valid */
468 	if (conf != NULL) {
469 		if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
470 				(dev_info.capabilities[0].type ==
471 				RTE_BBDEV_OP_NONE)) {
472 			ret = 1;
473 		} else {
474 			for (p = dev_info.capabilities;
475 					p->type != RTE_BBDEV_OP_NONE; p++) {
476 				if (conf->op_type == p->type) {
477 					ret = 1;
478 					break;
479 				}
480 			}
481 		}
482 		if (ret == 0) {
483 			rte_bbdev_log(ERR, "Invalid operation type");
484 			return -EINVAL;
485 		}
486 		if (conf->queue_size > dev_info.queue_size_lim) {
487 			rte_bbdev_log(ERR,
488 					"Size (%u) of queue %u of device %u must be: <= %u",
489 					conf->queue_size, queue_id, dev_id,
490 					dev_info.queue_size_lim);
491 			return -EINVAL;
492 		}
493 		if (!rte_is_power_of_2(conf->queue_size)) {
494 			rte_bbdev_log(ERR,
495 					"Size (%u) of queue %u of device %u must be a power of 2",
496 					conf->queue_size, queue_id, dev_id);
497 			return -EINVAL;
498 		}
499 		if ((uint8_t)conf->op_type >= RTE_BBDEV_OP_TYPE_SIZE_MAX) {
500 			rte_bbdev_log(ERR,
501 					"Invalid operation type (%u) ", conf->op_type);
502 			return -EINVAL;
503 		}
504 		max_priority = dev_info.queue_priority[conf->op_type];
505 		if (conf->priority > max_priority) {
506 			rte_bbdev_log(ERR,
507 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
508 					conf->priority, queue_id, dev_id, max_priority);
509 			return -EINVAL;
510 		}
511 	}
512 
513 	/* Release existing queue (in case of queue reconfiguration) */
514 	if (dev->data->queues[queue_id].queue_private != NULL) {
515 		ret = dev->dev_ops->queue_release(dev, queue_id);
516 		if (ret < 0) {
517 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
518 					dev_id, queue_id);
519 			return ret;
520 		}
521 	}
522 
523 	/* Get driver to setup the queue */
524 	ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
525 			conf : &dev_info.default_queue_conf);
526 	if (ret < 0) {
527 		/* This may happen when trying different priority levels */
528 		rte_bbdev_log(INFO,
529 				"Device %u queue %u setup failed",
530 				dev_id, queue_id);
531 		return ret;
532 	}
533 
534 	/* Store configuration */
535 	stored_conf = &dev->data->queues[queue_id].conf;
536 	memcpy(stored_conf,
537 			(conf != NULL) ? conf : &dev_info.default_queue_conf,
538 			sizeof(*stored_conf));
539 
540 	op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
541 	if (op_type_str == NULL)
542 		return -EINVAL;
543 
544 	rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
545 			dev_id, queue_id, stored_conf->queue_size, op_type_str,
546 			stored_conf->priority);
547 
548 	return 0;
549 }
550 
551 int
552 rte_bbdev_start(uint16_t dev_id)
553 {
554 	int i;
555 	struct rte_bbdev *dev = get_dev(dev_id);
556 	VALID_DEV_OR_RET_ERR(dev, dev_id);
557 
558 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
559 
560 	if (dev->data->started) {
561 		rte_bbdev_log_debug("Device %u is already started", dev_id);
562 		return 0;
563 	}
564 
565 	if (dev->dev_ops->start) {
566 		int ret = dev->dev_ops->start(dev);
567 		if (ret < 0) {
568 			rte_bbdev_log(ERR, "Device %u start failed", dev_id);
569 			return ret;
570 		}
571 	}
572 
573 	/* Store new state */
574 	for (i = 0; i < dev->data->num_queues; i++)
575 		if (!dev->data->queues[i].conf.deferred_start)
576 			dev->data->queues[i].started = true;
577 	dev->data->started = true;
578 
579 	rte_bbdev_log_debug("Started device %u", dev_id);
580 	return 0;
581 }
582 
583 int
584 rte_bbdev_stop(uint16_t dev_id)
585 {
586 	struct rte_bbdev *dev = get_dev(dev_id);
587 	VALID_DEV_OR_RET_ERR(dev, dev_id);
588 
589 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
590 
591 	if (!dev->data->started) {
592 		rte_bbdev_log_debug("Device %u is already stopped", dev_id);
593 		return 0;
594 	}
595 
596 	if (dev->dev_ops->stop)
597 		dev->dev_ops->stop(dev);
598 	dev->data->started = false;
599 
600 	rte_bbdev_log_debug("Stopped device %u", dev_id);
601 	return 0;
602 }
603 
604 int
605 rte_bbdev_close(uint16_t dev_id)
606 {
607 	int ret;
608 	uint16_t i;
609 	struct rte_bbdev *dev = get_dev(dev_id);
610 	VALID_DEV_OR_RET_ERR(dev, dev_id);
611 
612 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
613 
614 	if (dev->data->started) {
615 		ret = rte_bbdev_stop(dev_id);
616 		if (ret < 0) {
617 			rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
618 			return ret;
619 		}
620 	}
621 
622 	/* Free memory used by queues */
623 	for (i = 0; i < dev->data->num_queues; i++) {
624 		ret = dev->dev_ops->queue_release(dev, i);
625 		if (ret < 0) {
626 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
627 					dev_id, i);
628 			return ret;
629 		}
630 	}
631 	rte_free(dev->data->queues);
632 
633 	if (dev->dev_ops->close) {
634 		ret = dev->dev_ops->close(dev);
635 		if (ret < 0) {
636 			rte_bbdev_log(ERR, "Device %u close failed", dev_id);
637 			return ret;
638 		}
639 	}
640 
641 	/* Clear configuration */
642 	dev->data->queues = NULL;
643 	dev->data->num_queues = 0;
644 
645 	rte_bbdev_log_debug("Closed device %u", dev_id);
646 	return 0;
647 }
648 
649 int
650 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
651 {
652 	struct rte_bbdev *dev = get_dev(dev_id);
653 	VALID_DEV_OR_RET_ERR(dev, dev_id);
654 
655 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
656 
657 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
658 
659 	if (dev->data->queues[queue_id].started) {
660 		rte_bbdev_log_debug("Queue %u of device %u already started",
661 				queue_id, dev_id);
662 		return 0;
663 	}
664 
665 	if (dev->dev_ops->queue_start) {
666 		int ret = dev->dev_ops->queue_start(dev, queue_id);
667 		if (ret < 0) {
668 			rte_bbdev_log(ERR, "Device %u queue %u start failed",
669 					dev_id, queue_id);
670 			return ret;
671 		}
672 	}
673 	dev->data->queues[queue_id].started = true;
674 
675 	rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
676 	return 0;
677 }
678 
679 int
680 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
681 {
682 	struct rte_bbdev *dev = get_dev(dev_id);
683 	VALID_DEV_OR_RET_ERR(dev, dev_id);
684 
685 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
686 
687 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
688 
689 	if (!dev->data->queues[queue_id].started) {
690 		rte_bbdev_log_debug("Queue %u of device %u already stopped",
691 				queue_id, dev_id);
692 		return 0;
693 	}
694 
695 	if (dev->dev_ops->queue_stop) {
696 		int ret = dev->dev_ops->queue_stop(dev, queue_id);
697 		if (ret < 0) {
698 			rte_bbdev_log(ERR, "Device %u queue %u stop failed",
699 					dev_id, queue_id);
700 			return ret;
701 		}
702 	}
703 	dev->data->queues[queue_id].started = false;
704 
705 	rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
706 	return 0;
707 }
708 
709 /* Get device statistics */
710 static void
711 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
712 {
713 	unsigned int q_id;
714 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
715 		struct rte_bbdev_stats *q_stats =
716 				&dev->data->queues[q_id].queue_stats;
717 
718 		stats->enqueued_count += q_stats->enqueued_count;
719 		stats->dequeued_count += q_stats->dequeued_count;
720 		stats->enqueue_err_count += q_stats->enqueue_err_count;
721 		stats->dequeue_err_count += q_stats->dequeue_err_count;
722 		stats->enqueue_warn_count += q_stats->enqueue_warn_count;
723 		stats->dequeue_warn_count += q_stats->dequeue_warn_count;
724 	}
725 	rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
726 }
727 
728 static void
729 reset_stats_in_queues(struct rte_bbdev *dev)
730 {
731 	unsigned int q_id;
732 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
733 		struct rte_bbdev_stats *q_stats =
734 				&dev->data->queues[q_id].queue_stats;
735 
736 		memset(q_stats, 0, sizeof(*q_stats));
737 	}
738 	rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
739 }
740 
741 int
742 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
743 {
744 	struct rte_bbdev *dev = get_dev(dev_id);
745 	VALID_DEV_OR_RET_ERR(dev, dev_id);
746 
747 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
748 
749 	if (stats == NULL) {
750 		rte_bbdev_log(ERR, "NULL stats structure");
751 		return -EINVAL;
752 	}
753 
754 	memset(stats, 0, sizeof(*stats));
755 	if (dev->dev_ops->stats_get != NULL)
756 		dev->dev_ops->stats_get(dev, stats);
757 	else
758 		get_stats_from_queues(dev, stats);
759 
760 	rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
761 	return 0;
762 }
763 
764 int
765 rte_bbdev_stats_reset(uint16_t dev_id)
766 {
767 	struct rte_bbdev *dev = get_dev(dev_id);
768 	VALID_DEV_OR_RET_ERR(dev, dev_id);
769 
770 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
771 
772 	if (dev->dev_ops->stats_reset != NULL)
773 		dev->dev_ops->stats_reset(dev);
774 	else
775 		reset_stats_in_queues(dev);
776 
777 	rte_bbdev_log_debug("Reset stats of device %u", dev_id);
778 	return 0;
779 }
780 
781 int
782 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
783 {
784 	struct rte_bbdev *dev = get_dev(dev_id);
785 	VALID_DEV_OR_RET_ERR(dev, dev_id);
786 
787 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
788 
789 	if (dev_info == NULL) {
790 		rte_bbdev_log(ERR, "NULL dev info structure");
791 		return -EINVAL;
792 	}
793 
794 	/* Copy data maintained by device interface layer */
795 	memset(dev_info, 0, sizeof(*dev_info));
796 	dev_info->dev_name = dev->data->name;
797 	dev_info->num_queues = dev->data->num_queues;
798 	dev_info->device = dev->device;
799 	dev_info->socket_id = dev->data->socket_id;
800 	dev_info->started = dev->data->started;
801 
802 	/* Copy data maintained by device driver layer */
803 	dev->dev_ops->info_get(dev, &dev_info->drv);
804 
805 	rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
806 	return 0;
807 }
808 
809 int
810 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
811 		struct rte_bbdev_queue_info *queue_info)
812 {
813 	struct rte_bbdev *dev = get_dev(dev_id);
814 	VALID_DEV_OR_RET_ERR(dev, dev_id);
815 
816 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
817 
818 	if (queue_info == NULL) {
819 		rte_bbdev_log(ERR, "NULL queue info structure");
820 		return -EINVAL;
821 	}
822 
823 	/* Copy data to output */
824 	memset(queue_info, 0, sizeof(*queue_info));
825 	queue_info->conf = dev->data->queues[queue_id].conf;
826 	queue_info->started = dev->data->queues[queue_id].started;
827 
828 	rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
829 			queue_id, dev_id);
830 	return 0;
831 }
832 
833 /* Calculate size needed to store bbdev_op, depending on type */
834 static unsigned int
835 get_bbdev_op_size(enum rte_bbdev_op_type type)
836 {
837 	unsigned int result = 0;
838 	switch (type) {
839 	case RTE_BBDEV_OP_NONE:
840 		result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
841 				sizeof(struct rte_bbdev_enc_op));
842 		break;
843 	case RTE_BBDEV_OP_TURBO_DEC:
844 		result = sizeof(struct rte_bbdev_dec_op);
845 		break;
846 	case RTE_BBDEV_OP_TURBO_ENC:
847 		result = sizeof(struct rte_bbdev_enc_op);
848 		break;
849 	case RTE_BBDEV_OP_LDPC_DEC:
850 		result = sizeof(struct rte_bbdev_dec_op);
851 		break;
852 	case RTE_BBDEV_OP_LDPC_ENC:
853 		result = sizeof(struct rte_bbdev_enc_op);
854 		break;
855 	case RTE_BBDEV_OP_FFT:
856 		result = sizeof(struct rte_bbdev_fft_op);
857 		break;
858 	case RTE_BBDEV_OP_MLDTS:
859 		result = sizeof(struct rte_bbdev_mldts_op);
860 		break;
861 	default:
862 		break;
863 	}
864 
865 	return result;
866 }
867 
868 /* Initialise a bbdev_op structure */
869 static void
870 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
871 		__rte_unused unsigned int n)
872 {
873 	enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
874 
875 	if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
876 		struct rte_bbdev_dec_op *op = element;
877 		memset(op, 0, mempool->elt_size);
878 		op->mempool = mempool;
879 	} else if (type == RTE_BBDEV_OP_TURBO_ENC ||
880 			type == RTE_BBDEV_OP_LDPC_ENC) {
881 		struct rte_bbdev_enc_op *op = element;
882 		memset(op, 0, mempool->elt_size);
883 		op->mempool = mempool;
884 	} else if (type == RTE_BBDEV_OP_FFT) {
885 		struct rte_bbdev_fft_op *op = element;
886 		memset(op, 0, mempool->elt_size);
887 		op->mempool = mempool;
888 	} else if (type == RTE_BBDEV_OP_MLDTS) {
889 		struct rte_bbdev_mldts_op *op = element;
890 		memset(op, 0, mempool->elt_size);
891 		op->mempool = mempool;
892 	}
893 }
894 
895 struct rte_mempool *
896 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
897 		unsigned int num_elements, unsigned int cache_size,
898 		int socket_id)
899 {
900 	struct rte_bbdev_op_pool_private *priv;
901 	struct rte_mempool *mp;
902 	const char *op_type_str;
903 
904 	if (name == NULL) {
905 		rte_bbdev_log(ERR, "NULL name for op pool");
906 		return NULL;
907 	}
908 
909 	if (type >= BBDEV_OP_TYPE_COUNT) {
910 		rte_bbdev_log(ERR,
911 				"Invalid op type (%u), should be less than %u",
912 				type, BBDEV_OP_TYPE_COUNT);
913 		return NULL;
914 	}
915 
916 	mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
917 			cache_size, sizeof(struct rte_bbdev_op_pool_private),
918 			NULL, NULL, bbdev_op_init, &type, socket_id, 0);
919 	if (mp == NULL) {
920 		rte_bbdev_log(ERR,
921 				"Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
922 				name, num_elements, get_bbdev_op_size(type),
923 				rte_strerror(rte_errno));
924 		return NULL;
925 	}
926 
927 	op_type_str = rte_bbdev_op_type_str(type);
928 	if (op_type_str == NULL)
929 		return NULL;
930 
931 	rte_bbdev_log_debug(
932 			"Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
933 			name, num_elements, op_type_str, cache_size, socket_id,
934 			get_bbdev_op_size(type));
935 
936 	priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
937 	priv->type = type;
938 
939 	return mp;
940 }
941 
942 int
943 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
944 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
945 {
946 	struct rte_bbdev_callback *user_cb;
947 	struct rte_bbdev *dev = get_dev(dev_id);
948 	VALID_DEV_OR_RET_ERR(dev, dev_id);
949 
950 	if (event >= RTE_BBDEV_EVENT_MAX) {
951 		rte_bbdev_log(ERR,
952 				"Invalid event type (%u), should be less than %u",
953 				event, RTE_BBDEV_EVENT_MAX);
954 		return -EINVAL;
955 	}
956 
957 	if (cb_fn == NULL) {
958 		rte_bbdev_log(ERR, "NULL callback function");
959 		return -EINVAL;
960 	}
961 
962 	rte_spinlock_lock(&rte_bbdev_cb_lock);
963 
964 	TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
965 		if (user_cb->cb_fn == cb_fn &&
966 				user_cb->cb_arg == cb_arg &&
967 				user_cb->event == event)
968 			break;
969 	}
970 
971 	/* create a new callback. */
972 	if (user_cb == NULL) {
973 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
974 				sizeof(struct rte_bbdev_callback), 0);
975 		if (user_cb != NULL) {
976 			user_cb->cb_fn = cb_fn;
977 			user_cb->cb_arg = cb_arg;
978 			user_cb->event = event;
979 			TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
980 		}
981 	}
982 
983 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
984 	return (user_cb == NULL) ? -ENOMEM : 0;
985 }
986 
987 int
988 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
989 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
990 {
991 	int ret = 0;
992 	struct rte_bbdev_callback *cb, *next;
993 	struct rte_bbdev *dev = get_dev(dev_id);
994 	VALID_DEV_OR_RET_ERR(dev, dev_id);
995 
996 	if (event >= RTE_BBDEV_EVENT_MAX) {
997 		rte_bbdev_log(ERR,
998 				"Invalid event type (%u), should be less than %u",
999 				event, RTE_BBDEV_EVENT_MAX);
1000 		return -EINVAL;
1001 	}
1002 
1003 	if (cb_fn == NULL) {
1004 		rte_bbdev_log(ERR,
1005 				"NULL callback function cannot be unregistered");
1006 		return -EINVAL;
1007 	}
1008 
1009 	dev = &rte_bbdev_devices[dev_id];
1010 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1011 
1012 	for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
1013 
1014 		next = TAILQ_NEXT(cb, next);
1015 
1016 		if (cb->cb_fn != cb_fn || cb->event != event ||
1017 				(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1018 			continue;
1019 
1020 		/* If this callback is not executing right now, remove it. */
1021 		if (cb->active == 0) {
1022 			TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1023 			rte_free(cb);
1024 		} else
1025 			ret = -EAGAIN;
1026 	}
1027 
1028 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1029 	return ret;
1030 }
1031 
1032 void
1033 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1034 	enum rte_bbdev_event_type event, void *ret_param)
1035 {
1036 	struct rte_bbdev_callback *cb_lst;
1037 	struct rte_bbdev_callback dev_cb;
1038 
1039 	if (dev == NULL) {
1040 		rte_bbdev_log(ERR, "NULL device");
1041 		return;
1042 	}
1043 
1044 	if (dev->data == NULL) {
1045 		rte_bbdev_log(ERR, "NULL data structure");
1046 		return;
1047 	}
1048 
1049 	if (event >= RTE_BBDEV_EVENT_MAX) {
1050 		rte_bbdev_log(ERR,
1051 				"Invalid event type (%u), should be less than %u",
1052 				event, RTE_BBDEV_EVENT_MAX);
1053 		return;
1054 	}
1055 
1056 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1057 	TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1058 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1059 			continue;
1060 		dev_cb = *cb_lst;
1061 		cb_lst->active = 1;
1062 		if (ret_param != NULL)
1063 			dev_cb.ret_param = ret_param;
1064 
1065 		rte_spinlock_unlock(&rte_bbdev_cb_lock);
1066 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1067 				dev_cb.cb_arg, dev_cb.ret_param);
1068 		rte_spinlock_lock(&rte_bbdev_cb_lock);
1069 		cb_lst->active = 0;
1070 	}
1071 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1072 }
1073 
1074 int
1075 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1076 {
1077 	struct rte_bbdev *dev = get_dev(dev_id);
1078 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1079 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1080 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1081 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1082 	return dev->dev_ops->queue_intr_enable(dev, queue_id);
1083 }
1084 
1085 int
1086 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1087 {
1088 	struct rte_bbdev *dev = get_dev(dev_id);
1089 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1090 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1091 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1092 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1093 	return dev->dev_ops->queue_intr_disable(dev, queue_id);
1094 }
1095 
1096 int
1097 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1098 		void *data)
1099 {
1100 	uint32_t vec;
1101 	struct rte_bbdev *dev = get_dev(dev_id);
1102 	struct rte_intr_handle *intr_handle;
1103 	int ret;
1104 
1105 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1106 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1107 
1108 	intr_handle = dev->intr_handle;
1109 	if (intr_handle == NULL) {
1110 		rte_bbdev_log(ERR, "Device %u intr handle unset", dev_id);
1111 		return -ENOTSUP;
1112 	}
1113 
1114 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1115 		rte_bbdev_log(ERR, "Device %u queue_id %u is too big",
1116 				dev_id, queue_id);
1117 		return -ENOTSUP;
1118 	}
1119 
1120 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
1121 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1122 	if (ret && (ret != -EEXIST)) {
1123 		rte_bbdev_log(ERR,
1124 				"dev %u q %u int ctl error op %d epfd %d vec %u",
1125 				dev_id, queue_id, op, epfd, vec);
1126 		return ret;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 
1133 const char *
1134 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1135 {
1136 	static const char * const op_types[] = {
1137 		"RTE_BBDEV_OP_NONE",
1138 		"RTE_BBDEV_OP_TURBO_DEC",
1139 		"RTE_BBDEV_OP_TURBO_ENC",
1140 		"RTE_BBDEV_OP_LDPC_DEC",
1141 		"RTE_BBDEV_OP_LDPC_ENC",
1142 		"RTE_BBDEV_OP_FFT",
1143 		"RTE_BBDEV_OP_MLDTS",
1144 	};
1145 
1146 	if (op_type < BBDEV_OP_TYPE_COUNT)
1147 		return op_types[op_type];
1148 
1149 	rte_bbdev_log(ERR, "Invalid operation type");
1150 	return NULL;
1151 }
1152 
1153 const char *
1154 rte_bbdev_device_status_str(enum rte_bbdev_device_status status)
1155 {
1156 	static const char * const dev_sta_string[] = {
1157 		"RTE_BBDEV_DEV_NOSTATUS",
1158 		"RTE_BBDEV_DEV_NOT_SUPPORTED",
1159 		"RTE_BBDEV_DEV_RESET",
1160 		"RTE_BBDEV_DEV_CONFIGURED",
1161 		"RTE_BBDEV_DEV_ACTIVE",
1162 		"RTE_BBDEV_DEV_FATAL_ERR",
1163 		"RTE_BBDEV_DEV_RESTART_REQ",
1164 		"RTE_BBDEV_DEV_RECONFIG_REQ",
1165 		"RTE_BBDEV_DEV_CORRECT_ERR",
1166 	};
1167 
1168 	/* Cast from enum required for clang. */
1169 	if ((uint8_t)status < sizeof(dev_sta_string) / sizeof(char *))
1170 		return dev_sta_string[status];
1171 
1172 	rte_bbdev_log(ERR, "Invalid device status");
1173 	return NULL;
1174 }
1175 
1176 const char *
1177 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status)
1178 {
1179 	static const char * const enq_sta_string[] = {
1180 		"RTE_BBDEV_ENQ_STATUS_NONE",
1181 		"RTE_BBDEV_ENQ_STATUS_QUEUE_FULL",
1182 		"RTE_BBDEV_ENQ_STATUS_RING_FULL",
1183 		"RTE_BBDEV_ENQ_STATUS_INVALID_OP",
1184 	};
1185 
1186 	/* Cast from enum required for clang. */
1187 	if ((uint8_t)status < sizeof(enq_sta_string) / sizeof(char *))
1188 		return enq_sta_string[status];
1189 
1190 	rte_bbdev_log(ERR, "Invalid enqueue status");
1191 	return NULL;
1192 }
1193 
1194 
1195 int
1196 rte_bbdev_queue_ops_dump(uint16_t dev_id, uint16_t queue_id, FILE *f)
1197 {
1198 	struct rte_bbdev_queue_data *q_data;
1199 	struct rte_bbdev_stats *stats;
1200 	uint16_t i;
1201 	struct rte_bbdev *dev = get_dev(dev_id);
1202 
1203 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1204 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1205 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1206 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_ops_dump, dev_id);
1207 
1208 	q_data = &dev->data->queues[queue_id];
1209 
1210 	if (f == NULL)
1211 		return -EINVAL;
1212 
1213 	fprintf(f, "Dump of operations on %s queue %d\n",
1214 			dev->data->name, queue_id);
1215 	fprintf(f, "  Last Enqueue Status %s\n",
1216 			rte_bbdev_enqueue_status_str(q_data->enqueue_status));
1217 	for (i = 0; i < RTE_BBDEV_ENQ_STATUS_SIZE_MAX; i++)
1218 		if (q_data->queue_stats.enqueue_status_count[i] > 0)
1219 			fprintf(f, "  Enqueue Status Counters %s %" PRIu64 "\n",
1220 					rte_bbdev_enqueue_status_str(i),
1221 					q_data->queue_stats.enqueue_status_count[i]);
1222 	stats = &dev->data->queues[queue_id].queue_stats;
1223 
1224 	fprintf(f, "  Enqueue Count %" PRIu64 " Warning %" PRIu64 " Error %" PRIu64 "\n",
1225 			stats->enqueued_count, stats->enqueue_warn_count,
1226 			stats->enqueue_err_count);
1227 	fprintf(f, "  Dequeue Count %" PRIu64 " Warning %" PRIu64 " Error %" PRIu64 "\n",
1228 			stats->dequeued_count, stats->dequeue_warn_count,
1229 			stats->dequeue_err_count);
1230 
1231 	return dev->dev_ops->queue_ops_dump(dev, queue_id, f);
1232 }
1233 
1234 char *
1235 rte_bbdev_ops_param_string(void *op, enum rte_bbdev_op_type op_type, char *str, uint32_t len)
1236 {
1237 	static char partial[1024];
1238 	struct rte_bbdev_dec_op *op_dec;
1239 	struct rte_bbdev_enc_op *op_enc;
1240 	struct rte_bbdev_fft_op *op_fft;
1241 	struct rte_bbdev_mldts_op *op_mldts;
1242 
1243 	rte_iova_t add0 = 0, add1 = 0, add2 = 0, add3 = 0, add4 = 0;
1244 
1245 	if (op == NULL) {
1246 		snprintf(str, len, "Invalid Operation pointer\n");
1247 		return str;
1248 	}
1249 
1250 	if (op_type == RTE_BBDEV_OP_LDPC_DEC) {
1251 		op_dec = op;
1252 		if (op_dec->ldpc_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
1253 			snprintf(partial, sizeof(partial), "C %d Cab %d Ea %d Eb %d r %d",
1254 					op_dec->ldpc_dec.tb_params.c,
1255 					op_dec->ldpc_dec.tb_params.cab,
1256 					op_dec->ldpc_dec.tb_params.ea,
1257 					op_dec->ldpc_dec.tb_params.eb,
1258 					op_dec->ldpc_dec.tb_params.r);
1259 		else
1260 			snprintf(partial, sizeof(partial), "E %d", op_dec->ldpc_dec.cb_params.e);
1261 		if (op_dec->ldpc_dec.input.data != NULL)
1262 			add0 = rte_pktmbuf_iova_offset(op_dec->ldpc_dec.input.data, 0);
1263 		if (op_dec->ldpc_dec.hard_output.data != NULL)
1264 			add1 = rte_pktmbuf_iova_offset(op_dec->ldpc_dec.hard_output.data, 0);
1265 		if (op_dec->ldpc_dec.soft_output.data != NULL)
1266 			add2 = rte_pktmbuf_iova_offset(op_dec->ldpc_dec.soft_output.data, 0);
1267 		if (op_dec->ldpc_dec.harq_combined_input.data != NULL)
1268 			add3 = rte_pktmbuf_iova_offset(op_dec->ldpc_dec.harq_combined_input.data,
1269 					0);
1270 		if (op_dec->ldpc_dec.harq_combined_output.data != NULL)
1271 			add4 = rte_pktmbuf_iova_offset(op_dec->ldpc_dec.harq_combined_output.data,
1272 					0);
1273 		snprintf(str, len, "op %x st %x BG %d Zc %d Ncb %d qm %d F %d Rv %d It %d It %d "
1274 			"HARQin %d in %" PRIx64 " ho %" PRIx64 " so %" PRIx64 " hi %" PRIx64 " "
1275 			"ho %" PRIx64 " %s\n",
1276 			op_dec->ldpc_dec.op_flags, op_dec->status,
1277 			op_dec->ldpc_dec.basegraph, op_dec->ldpc_dec.z_c,
1278 			op_dec->ldpc_dec.n_cb, op_dec->ldpc_dec.q_m,
1279 			op_dec->ldpc_dec.n_filler, op_dec->ldpc_dec.rv_index,
1280 			op_dec->ldpc_dec.iter_max, op_dec->ldpc_dec.iter_count,
1281 			op_dec->ldpc_dec.harq_combined_input.length,
1282 			add0, add1, add2, add3, add4, partial);
1283 	} else if (op_type == RTE_BBDEV_OP_TURBO_DEC) {
1284 		op_dec = op;
1285 		if (op_dec->turbo_dec.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
1286 			snprintf(partial, sizeof(partial), "C %d Cab %d Ea %d Eb %d r %d K %d",
1287 					op_dec->turbo_dec.tb_params.c,
1288 					op_dec->turbo_dec.tb_params.cab,
1289 					op_dec->turbo_dec.tb_params.ea,
1290 					op_dec->turbo_dec.tb_params.eb,
1291 					op_dec->turbo_dec.tb_params.r,
1292 					op_dec->turbo_dec.tb_params.k_neg);
1293 		else
1294 			snprintf(partial, sizeof(partial), "E %d K %d",
1295 					op_dec->turbo_dec.cb_params.e,
1296 					op_dec->turbo_dec.cb_params.k);
1297 		if (op_dec->turbo_dec.input.data != NULL)
1298 			add0 = rte_pktmbuf_iova_offset(op_dec->turbo_dec.input.data, 0);
1299 		if (op_dec->turbo_dec.hard_output.data != NULL)
1300 			add1 = rte_pktmbuf_iova_offset(op_dec->turbo_dec.hard_output.data, 0);
1301 		if (op_dec->turbo_dec.soft_output.data != NULL)
1302 			add2 = rte_pktmbuf_iova_offset(op_dec->turbo_dec.soft_output.data, 0);
1303 		snprintf(str, len, "op %x st %x CBM %d Iter %d map %d Rv %d ext %d "
1304 				"in %" PRIx64 " ho %" PRIx64 " so %" PRIx64 " %s\n",
1305 				op_dec->turbo_dec.op_flags, op_dec->status,
1306 				op_dec->turbo_dec.code_block_mode,
1307 				op_dec->turbo_dec.iter_max, op_dec->turbo_dec.num_maps,
1308 				op_dec->turbo_dec.rv_index, op_dec->turbo_dec.ext_scale,
1309 				add0, add1, add2, partial);
1310 	} else if (op_type == RTE_BBDEV_OP_LDPC_ENC) {
1311 		op_enc = op;
1312 		if (op_enc->ldpc_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
1313 			snprintf(partial, sizeof(partial), "C %d Cab %d Ea %d Eb %d r %d",
1314 					op_enc->ldpc_enc.tb_params.c,
1315 					op_enc->ldpc_enc.tb_params.cab,
1316 					op_enc->ldpc_enc.tb_params.ea,
1317 					op_enc->ldpc_enc.tb_params.eb,
1318 					op_enc->ldpc_enc.tb_params.r);
1319 		else
1320 			snprintf(partial, sizeof(partial), "E %d",
1321 					op_enc->ldpc_enc.cb_params.e);
1322 		if (op_enc->ldpc_enc.input.data != NULL)
1323 			add0 = rte_pktmbuf_iova_offset(op_enc->ldpc_enc.input.data, 0);
1324 		if (op_enc->ldpc_enc.output.data != NULL)
1325 			add1 = rte_pktmbuf_iova_offset(op_enc->ldpc_enc.output.data, 0);
1326 		snprintf(str, len, "op %x st %x BG %d Zc %d Ncb %d q_m %d F %d Rv %d "
1327 				"in %" PRIx64 " out %" PRIx64 " %s\n",
1328 				op_enc->ldpc_enc.op_flags, op_enc->status,
1329 				op_enc->ldpc_enc.basegraph, op_enc->ldpc_enc.z_c,
1330 				op_enc->ldpc_enc.n_cb, op_enc->ldpc_enc.q_m,
1331 				op_enc->ldpc_enc.n_filler, op_enc->ldpc_enc.rv_index,
1332 				add0, add1, partial);
1333 	} else if (op_type == RTE_BBDEV_OP_TURBO_ENC) {
1334 		op_enc = op;
1335 		if (op_enc->turbo_enc.code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK)
1336 			snprintf(partial, sizeof(partial),
1337 					"C %d Cab %d Ea %d Eb %d r %d K %d Ncb %d",
1338 					op_enc->turbo_enc.tb_params.c,
1339 					op_enc->turbo_enc.tb_params.cab,
1340 					op_enc->turbo_enc.tb_params.ea,
1341 					op_enc->turbo_enc.tb_params.eb,
1342 					op_enc->turbo_enc.tb_params.r,
1343 					op_enc->turbo_enc.tb_params.k_neg,
1344 					op_enc->turbo_enc.tb_params.ncb_neg);
1345 		else
1346 			snprintf(partial, sizeof(partial), "E %d K %d",
1347 					op_enc->turbo_enc.cb_params.e,
1348 					op_enc->turbo_enc.cb_params.k);
1349 		if (op_enc->turbo_enc.input.data != NULL)
1350 			add0 = rte_pktmbuf_iova_offset(op_enc->turbo_enc.input.data, 0);
1351 		if (op_enc->turbo_enc.output.data != NULL)
1352 			add1 = rte_pktmbuf_iova_offset(op_enc->turbo_enc.output.data, 0);
1353 		snprintf(str, len, "op %x st %x CBM %d Rv %d In %" PRIx64 " Out %" PRIx64 " %s\n",
1354 				op_enc->turbo_enc.op_flags, op_enc->status,
1355 				op_enc->turbo_enc.code_block_mode, op_enc->turbo_enc.rv_index,
1356 				add0, add1, partial);
1357 	} else if (op_type == RTE_BBDEV_OP_FFT) {
1358 		op_fft = op;
1359 		if (op_fft->fft.base_input.data != NULL)
1360 			add0 = rte_pktmbuf_iova_offset(op_fft->fft.base_input.data, 0);
1361 		if (op_fft->fft.base_output.data != NULL)
1362 			add1 = rte_pktmbuf_iova_offset(op_fft->fft.base_output.data, 0);
1363 		if (op_fft->fft.dewindowing_input.data != NULL)
1364 			add2 = rte_pktmbuf_iova_offset(op_fft->fft.dewindowing_input.data, 0);
1365 		if (op_fft->fft.power_meas_output.data != NULL)
1366 			add3 = rte_pktmbuf_iova_offset(op_fft->fft.power_meas_output.data, 0);
1367 		snprintf(str, len, "op %x st %x in %d inl %d out %d outl %d cs %x ants %d "
1368 				"idft %d dft %d cst %d ish %d dsh %d ncs %d pwsh %d fp16 %d fr %d "
1369 				"outde %d in %" PRIx64 " out %" PRIx64 " dw %" PRIx64 " "
1370 				"pm %" PRIx64 "\n",
1371 				op_fft->fft.op_flags, op_fft->status,
1372 				op_fft->fft.input_sequence_size, op_fft->fft.input_leading_padding,
1373 				op_fft->fft.output_sequence_size,
1374 				op_fft->fft.output_leading_depadding,
1375 				op_fft->fft.cs_bitmap, op_fft->fft.num_antennas_log2,
1376 				op_fft->fft.idft_log2, op_fft->fft.dft_log2,
1377 				op_fft->fft.cs_time_adjustment,
1378 				op_fft->fft.idft_shift, op_fft->fft.dft_shift,
1379 				op_fft->fft.ncs_reciprocal, op_fft->fft.power_shift,
1380 				op_fft->fft.fp16_exp_adjust, op_fft->fft.freq_resample_mode,
1381 				op_fft->fft.output_depadded_size, add0, add1, add2, add3);
1382 	} else if (op_type == RTE_BBDEV_OP_MLDTS) {
1383 		op_mldts = op;
1384 		if (op_mldts->mldts.qhy_input.data != NULL)
1385 			add0 = rte_pktmbuf_iova_offset(op_mldts->mldts.qhy_input.data, 0);
1386 		if (op_mldts->mldts.r_input.data != NULL)
1387 			add1 = rte_pktmbuf_iova_offset(op_mldts->mldts.r_input.data, 0);
1388 		if (op_mldts->mldts.output.data != NULL)
1389 			add2 = rte_pktmbuf_iova_offset(op_mldts->mldts.output.data, 0);
1390 		snprintf(str, len,
1391 				"op %x st %x rbs %d lay %d rrep %d crep%d qm %d %d %d %d "
1392 				"qhy %" PRIx64 " r %" PRIx64 " out %" PRIx64 "\n",
1393 				op_mldts->mldts.op_flags, op_mldts->status,
1394 				op_mldts->mldts.num_rbs, op_mldts->mldts.num_layers,
1395 				op_mldts->mldts.r_rep, op_mldts->mldts.c_rep,
1396 				op_mldts->mldts.q_m[0], op_mldts->mldts.q_m[1],
1397 				op_mldts->mldts.q_m[2], op_mldts->mldts.q_m[3],
1398 				add0, add1, add2);
1399 
1400 	} else {
1401 		snprintf(str, len, "Invalid Operation type %d\n", op_type);
1402 	}
1403 
1404 	return str;
1405 }
1406