xref: /dpdk/lib/bbdev/rte_bbdev.c (revision 3b78aa7b2317fb385ed7fa5f5535f60050ede618)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8 
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12 #include <rte_eal.h>
13 #include <rte_malloc.h>
14 #include <rte_mempool.h>
15 #include <rte_memzone.h>
16 #include <rte_lcore.h>
17 #include <rte_spinlock.h>
18 #include <rte_interrupts.h>
19 
20 #include "rte_bbdev_op.h"
21 #include "rte_bbdev.h"
22 #include "rte_bbdev_pmd.h"
23 
24 #define DEV_NAME "BBDEV"
25 
26 
27 /* BBDev library logging ID */
28 RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE);
29 
30 /* Helper macro for logging */
31 #define rte_bbdev_log(level, fmt, ...) \
32 	rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
33 
34 #define rte_bbdev_log_debug(fmt, ...) \
35 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
36 		##__VA_ARGS__)
37 
38 /* Helper macro to check dev_id is valid */
39 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
40 	if (dev == NULL) { \
41 		rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
42 		return -ENODEV; \
43 	} \
44 } while (0)
45 
46 /* Helper macro to check dev_ops is valid */
47 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
48 	if (dev->dev_ops == NULL) { \
49 		rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
50 				dev_id); \
51 		return -ENODEV; \
52 	} \
53 } while (0)
54 
55 /* Helper macro to check that driver implements required function pointer */
56 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
57 	if (func == NULL) { \
58 		rte_bbdev_log(ERR, "device %u does not support %s", \
59 				dev_id, #func); \
60 		return -ENOTSUP; \
61 	} \
62 } while (0)
63 
64 /* Helper macro to check that queue is valid */
65 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
66 	if (queue_id >= dev->data->num_queues) { \
67 		rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
68 				queue_id, dev->data->dev_id); \
69 		return -ERANGE; \
70 	} \
71 } while (0)
72 
73 /* List of callback functions registered by an application */
74 struct rte_bbdev_callback {
75 	TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
76 	rte_bbdev_cb_fn cb_fn;  /* Callback address */
77 	void *cb_arg;  /* Parameter for callback */
78 	void *ret_param;  /* Return parameter */
79 	enum rte_bbdev_event_type event; /* Interrupt event type */
80 	uint32_t active; /* Callback is executing */
81 };
82 
83 /* spinlock for bbdev device callbacks */
84 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
85 
86 /*
87  * Global array of all devices. This is not static because it's used by the
88  * inline enqueue and dequeue functions
89  */
90 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
91 
92 /* Global array with rte_bbdev_data structures */
93 static struct rte_bbdev_data *rte_bbdev_data;
94 
95 /* Memzone name for global bbdev data pool */
96 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
97 
98 /* Number of currently valid devices */
99 static uint16_t num_devs;
100 
101 /* Return pointer to device structure, with validity check */
102 static struct rte_bbdev *
103 get_dev(uint16_t dev_id)
104 {
105 	if (rte_bbdev_is_valid(dev_id))
106 		return &rte_bbdev_devices[dev_id];
107 	return NULL;
108 }
109 
110 /* Allocate global data array */
111 static int
112 rte_bbdev_data_alloc(void)
113 {
114 	const unsigned int flags = 0;
115 	const struct rte_memzone *mz;
116 
117 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
118 		mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
119 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
120 				rte_socket_id(), flags);
121 	} else
122 		mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
123 	if (mz == NULL) {
124 		rte_bbdev_log(CRIT,
125 				"Cannot allocate memzone for bbdev port data");
126 		return -ENOMEM;
127 	}
128 
129 	rte_bbdev_data = mz->addr;
130 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
131 		memset(rte_bbdev_data, 0,
132 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
133 	return 0;
134 }
135 
136 /*
137  * Find data allocated for the device or if not found return first unused bbdev
138  * data. If all structures are in use and none is used by the device return
139  * NULL.
140  */
141 static struct rte_bbdev_data *
142 find_bbdev_data(const char *name)
143 {
144 	uint16_t data_id;
145 
146 	for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
147 		if (strlen(rte_bbdev_data[data_id].name) == 0) {
148 			memset(&rte_bbdev_data[data_id], 0,
149 					sizeof(struct rte_bbdev_data));
150 			return &rte_bbdev_data[data_id];
151 		} else if (strncmp(rte_bbdev_data[data_id].name, name,
152 				RTE_BBDEV_NAME_MAX_LEN) == 0)
153 			return &rte_bbdev_data[data_id];
154 	}
155 
156 	return NULL;
157 }
158 
159 /* Find lowest device id with no attached device */
160 static uint16_t
161 find_free_dev_id(void)
162 {
163 	uint16_t i;
164 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
165 		if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
166 			return i;
167 	}
168 	return RTE_BBDEV_MAX_DEVS;
169 }
170 
171 struct rte_bbdev *
172 rte_bbdev_allocate(const char *name)
173 {
174 	int ret;
175 	struct rte_bbdev *bbdev;
176 	uint16_t dev_id;
177 
178 	if (name == NULL) {
179 		rte_bbdev_log(ERR, "Invalid null device name");
180 		return NULL;
181 	}
182 
183 	if (rte_bbdev_get_named_dev(name) != NULL) {
184 		rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
185 		return NULL;
186 	}
187 
188 	dev_id = find_free_dev_id();
189 	if (dev_id == RTE_BBDEV_MAX_DEVS) {
190 		rte_bbdev_log(ERR, "Reached maximum number of devices");
191 		return NULL;
192 	}
193 
194 	bbdev = &rte_bbdev_devices[dev_id];
195 
196 	if (rte_bbdev_data == NULL) {
197 		ret = rte_bbdev_data_alloc();
198 		if (ret != 0)
199 			return NULL;
200 	}
201 
202 	bbdev->data = find_bbdev_data(name);
203 	if (bbdev->data == NULL) {
204 		rte_bbdev_log(ERR,
205 				"Max BBDevs already allocated in multi-process environment!");
206 		return NULL;
207 	}
208 
209 	__atomic_add_fetch(&bbdev->data->process_cnt, 1, __ATOMIC_RELAXED);
210 	bbdev->data->dev_id = dev_id;
211 	bbdev->state = RTE_BBDEV_INITIALIZED;
212 
213 	ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
214 	if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
215 		rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
216 		return NULL;
217 	}
218 
219 	/* init user callbacks */
220 	TAILQ_INIT(&(bbdev->list_cbs));
221 
222 	num_devs++;
223 
224 	rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
225 			name, dev_id, num_devs);
226 
227 	return bbdev;
228 }
229 
230 int
231 rte_bbdev_release(struct rte_bbdev *bbdev)
232 {
233 	uint16_t dev_id;
234 	struct rte_bbdev_callback *cb, *next;
235 
236 	if (bbdev == NULL) {
237 		rte_bbdev_log(ERR, "NULL bbdev");
238 		return -ENODEV;
239 	}
240 	dev_id = bbdev->data->dev_id;
241 
242 	/* free all callbacks from the device's list */
243 	for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
244 
245 		next = TAILQ_NEXT(cb, next);
246 		TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
247 		rte_free(cb);
248 	}
249 
250 	/* clear shared BBDev Data if no process is using the device anymore */
251 	if (__atomic_sub_fetch(&bbdev->data->process_cnt, 1,
252 			      __ATOMIC_RELAXED) == 0)
253 		memset(bbdev->data, 0, sizeof(*bbdev->data));
254 
255 	memset(bbdev, 0, sizeof(*bbdev));
256 	num_devs--;
257 	bbdev->state = RTE_BBDEV_UNUSED;
258 
259 	rte_bbdev_log_debug(
260 			"Un-initialised device id = %u. Num devices = %u",
261 			dev_id, num_devs);
262 	return 0;
263 }
264 
265 struct rte_bbdev *
266 rte_bbdev_get_named_dev(const char *name)
267 {
268 	unsigned int i;
269 
270 	if (name == NULL) {
271 		rte_bbdev_log(ERR, "NULL driver name");
272 		return NULL;
273 	}
274 
275 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
276 		struct rte_bbdev *dev = get_dev(i);
277 		if (dev && (strncmp(dev->data->name,
278 				name, RTE_BBDEV_NAME_MAX_LEN) == 0))
279 			return dev;
280 	}
281 
282 	return NULL;
283 }
284 
285 uint16_t
286 rte_bbdev_count(void)
287 {
288 	return num_devs;
289 }
290 
291 bool
292 rte_bbdev_is_valid(uint16_t dev_id)
293 {
294 	if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
295 		rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
296 		return true;
297 	return false;
298 }
299 
300 uint16_t
301 rte_bbdev_find_next(uint16_t dev_id)
302 {
303 	dev_id++;
304 	for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
305 		if (rte_bbdev_is_valid(dev_id))
306 			break;
307 	return dev_id;
308 }
309 
310 int
311 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
312 {
313 	unsigned int i;
314 	int ret;
315 	struct rte_bbdev_driver_info dev_info;
316 	struct rte_bbdev *dev = get_dev(dev_id);
317 	VALID_DEV_OR_RET_ERR(dev, dev_id);
318 
319 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
320 
321 	if (dev->data->started) {
322 		rte_bbdev_log(ERR,
323 				"Device %u cannot be configured when started",
324 				dev_id);
325 		return -EBUSY;
326 	}
327 
328 	/* Get device driver information to get max number of queues */
329 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
330 	memset(&dev_info, 0, sizeof(dev_info));
331 	dev->dev_ops->info_get(dev, &dev_info);
332 
333 	if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
334 		rte_bbdev_log(ERR,
335 				"Device %u supports 0 < N <= %u queues, not %u",
336 				dev_id, dev_info.max_num_queues, num_queues);
337 		return -EINVAL;
338 	}
339 
340 	/* If re-configuration, get driver to free existing internal memory */
341 	if (dev->data->queues != NULL) {
342 		VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
343 		for (i = 0; i < dev->data->num_queues; i++) {
344 			int ret = dev->dev_ops->queue_release(dev, i);
345 			if (ret < 0) {
346 				rte_bbdev_log(ERR,
347 						"Device %u queue %u release failed",
348 						dev_id, i);
349 				return ret;
350 			}
351 		}
352 		/* Call optional device close */
353 		if (dev->dev_ops->close) {
354 			ret = dev->dev_ops->close(dev);
355 			if (ret < 0) {
356 				rte_bbdev_log(ERR,
357 						"Device %u couldn't be closed",
358 						dev_id);
359 				return ret;
360 			}
361 		}
362 		rte_free(dev->data->queues);
363 	}
364 
365 	/* Allocate queue pointers */
366 	dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
367 			sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
368 				dev->data->socket_id);
369 	if (dev->data->queues == NULL) {
370 		rte_bbdev_log(ERR,
371 				"calloc of %u queues for device %u on socket %i failed",
372 				num_queues, dev_id, dev->data->socket_id);
373 		return -ENOMEM;
374 	}
375 
376 	dev->data->num_queues = num_queues;
377 
378 	/* Call optional device configuration */
379 	if (dev->dev_ops->setup_queues) {
380 		ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
381 		if (ret < 0) {
382 			rte_bbdev_log(ERR,
383 					"Device %u memory configuration failed",
384 					dev_id);
385 			goto error;
386 		}
387 	}
388 
389 	rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
390 			num_queues);
391 	return 0;
392 
393 error:
394 	dev->data->num_queues = 0;
395 	rte_free(dev->data->queues);
396 	dev->data->queues = NULL;
397 	return ret;
398 }
399 
400 int
401 rte_bbdev_intr_enable(uint16_t dev_id)
402 {
403 	int ret;
404 	struct rte_bbdev *dev = get_dev(dev_id);
405 	VALID_DEV_OR_RET_ERR(dev, dev_id);
406 
407 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
408 
409 	if (dev->data->started) {
410 		rte_bbdev_log(ERR,
411 				"Device %u cannot be configured when started",
412 				dev_id);
413 		return -EBUSY;
414 	}
415 
416 	if (dev->dev_ops->intr_enable) {
417 		ret = dev->dev_ops->intr_enable(dev);
418 		if (ret < 0) {
419 			rte_bbdev_log(ERR,
420 					"Device %u interrupts configuration failed",
421 					dev_id);
422 			return ret;
423 		}
424 		rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
425 		return 0;
426 	}
427 
428 	rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
429 	return -ENOTSUP;
430 }
431 
432 int
433 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
434 		const struct rte_bbdev_queue_conf *conf)
435 {
436 	int ret = 0;
437 	struct rte_bbdev_driver_info dev_info;
438 	struct rte_bbdev *dev = get_dev(dev_id);
439 	const struct rte_bbdev_op_cap *p;
440 	struct rte_bbdev_queue_conf *stored_conf;
441 	const char *op_type_str;
442 	VALID_DEV_OR_RET_ERR(dev, dev_id);
443 
444 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
445 
446 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
447 
448 	if (dev->data->queues[queue_id].started || dev->data->started) {
449 		rte_bbdev_log(ERR,
450 				"Queue %u of device %u cannot be configured when started",
451 				queue_id, dev_id);
452 		return -EBUSY;
453 	}
454 
455 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
456 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
457 
458 	/* Get device driver information to verify config is valid */
459 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
460 	memset(&dev_info, 0, sizeof(dev_info));
461 	dev->dev_ops->info_get(dev, &dev_info);
462 
463 	/* Check configuration is valid */
464 	if (conf != NULL) {
465 		if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
466 				(dev_info.capabilities[0].type ==
467 				RTE_BBDEV_OP_NONE)) {
468 			ret = 1;
469 		} else {
470 			for (p = dev_info.capabilities;
471 					p->type != RTE_BBDEV_OP_NONE; p++) {
472 				if (conf->op_type == p->type) {
473 					ret = 1;
474 					break;
475 				}
476 			}
477 		}
478 		if (ret == 0) {
479 			rte_bbdev_log(ERR, "Invalid operation type");
480 			return -EINVAL;
481 		}
482 		if (conf->queue_size > dev_info.queue_size_lim) {
483 			rte_bbdev_log(ERR,
484 					"Size (%u) of queue %u of device %u must be: <= %u",
485 					conf->queue_size, queue_id, dev_id,
486 					dev_info.queue_size_lim);
487 			return -EINVAL;
488 		}
489 		if (!rte_is_power_of_2(conf->queue_size)) {
490 			rte_bbdev_log(ERR,
491 					"Size (%u) of queue %u of device %u must be a power of 2",
492 					conf->queue_size, queue_id, dev_id);
493 			return -EINVAL;
494 		}
495 		if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
496 			conf->priority > dev_info.max_ul_queue_priority) {
497 			rte_bbdev_log(ERR,
498 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
499 					conf->priority, queue_id, dev_id,
500 					dev_info.max_ul_queue_priority);
501 			return -EINVAL;
502 		}
503 		if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
504 			conf->priority > dev_info.max_dl_queue_priority) {
505 			rte_bbdev_log(ERR,
506 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
507 					conf->priority, queue_id, dev_id,
508 					dev_info.max_dl_queue_priority);
509 			return -EINVAL;
510 		}
511 	}
512 
513 	/* Release existing queue (in case of queue reconfiguration) */
514 	if (dev->data->queues[queue_id].queue_private != NULL) {
515 		ret = dev->dev_ops->queue_release(dev, queue_id);
516 		if (ret < 0) {
517 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
518 					dev_id, queue_id);
519 			return ret;
520 		}
521 	}
522 
523 	/* Get driver to setup the queue */
524 	ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
525 			conf : &dev_info.default_queue_conf);
526 	if (ret < 0) {
527 		/* This may happen when trying different priority levels */
528 		rte_bbdev_log(INFO,
529 				"Device %u queue %u setup failed",
530 				dev_id, queue_id);
531 		return ret;
532 	}
533 
534 	/* Store configuration */
535 	stored_conf = &dev->data->queues[queue_id].conf;
536 	memcpy(stored_conf,
537 			(conf != NULL) ? conf : &dev_info.default_queue_conf,
538 			sizeof(*stored_conf));
539 
540 	op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
541 	if (op_type_str == NULL)
542 		return -EINVAL;
543 
544 	rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
545 			dev_id, queue_id, stored_conf->queue_size, op_type_str,
546 			stored_conf->priority);
547 
548 	return 0;
549 }
550 
551 int
552 rte_bbdev_start(uint16_t dev_id)
553 {
554 	int i;
555 	struct rte_bbdev *dev = get_dev(dev_id);
556 	VALID_DEV_OR_RET_ERR(dev, dev_id);
557 
558 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
559 
560 	if (dev->data->started) {
561 		rte_bbdev_log_debug("Device %u is already started", dev_id);
562 		return 0;
563 	}
564 
565 	if (dev->dev_ops->start) {
566 		int ret = dev->dev_ops->start(dev);
567 		if (ret < 0) {
568 			rte_bbdev_log(ERR, "Device %u start failed", dev_id);
569 			return ret;
570 		}
571 	}
572 
573 	/* Store new state */
574 	for (i = 0; i < dev->data->num_queues; i++)
575 		if (!dev->data->queues[i].conf.deferred_start)
576 			dev->data->queues[i].started = true;
577 	dev->data->started = true;
578 
579 	rte_bbdev_log_debug("Started device %u", dev_id);
580 	return 0;
581 }
582 
583 int
584 rte_bbdev_stop(uint16_t dev_id)
585 {
586 	struct rte_bbdev *dev = get_dev(dev_id);
587 	VALID_DEV_OR_RET_ERR(dev, dev_id);
588 
589 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
590 
591 	if (!dev->data->started) {
592 		rte_bbdev_log_debug("Device %u is already stopped", dev_id);
593 		return 0;
594 	}
595 
596 	if (dev->dev_ops->stop)
597 		dev->dev_ops->stop(dev);
598 	dev->data->started = false;
599 
600 	rte_bbdev_log_debug("Stopped device %u", dev_id);
601 	return 0;
602 }
603 
604 int
605 rte_bbdev_close(uint16_t dev_id)
606 {
607 	int ret;
608 	uint16_t i;
609 	struct rte_bbdev *dev = get_dev(dev_id);
610 	VALID_DEV_OR_RET_ERR(dev, dev_id);
611 
612 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
613 
614 	if (dev->data->started) {
615 		ret = rte_bbdev_stop(dev_id);
616 		if (ret < 0) {
617 			rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
618 			return ret;
619 		}
620 	}
621 
622 	/* Free memory used by queues */
623 	for (i = 0; i < dev->data->num_queues; i++) {
624 		ret = dev->dev_ops->queue_release(dev, i);
625 		if (ret < 0) {
626 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
627 					dev_id, i);
628 			return ret;
629 		}
630 	}
631 	rte_free(dev->data->queues);
632 
633 	if (dev->dev_ops->close) {
634 		ret = dev->dev_ops->close(dev);
635 		if (ret < 0) {
636 			rte_bbdev_log(ERR, "Device %u close failed", dev_id);
637 			return ret;
638 		}
639 	}
640 
641 	/* Clear configuration */
642 	dev->data->queues = NULL;
643 	dev->data->num_queues = 0;
644 
645 	rte_bbdev_log_debug("Closed device %u", dev_id);
646 	return 0;
647 }
648 
649 int
650 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
651 {
652 	struct rte_bbdev *dev = get_dev(dev_id);
653 	VALID_DEV_OR_RET_ERR(dev, dev_id);
654 
655 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
656 
657 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
658 
659 	if (dev->data->queues[queue_id].started) {
660 		rte_bbdev_log_debug("Queue %u of device %u already started",
661 				queue_id, dev_id);
662 		return 0;
663 	}
664 
665 	if (dev->dev_ops->queue_start) {
666 		int ret = dev->dev_ops->queue_start(dev, queue_id);
667 		if (ret < 0) {
668 			rte_bbdev_log(ERR, "Device %u queue %u start failed",
669 					dev_id, queue_id);
670 			return ret;
671 		}
672 	}
673 	dev->data->queues[queue_id].started = true;
674 
675 	rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
676 	return 0;
677 }
678 
679 int
680 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
681 {
682 	struct rte_bbdev *dev = get_dev(dev_id);
683 	VALID_DEV_OR_RET_ERR(dev, dev_id);
684 
685 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
686 
687 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
688 
689 	if (!dev->data->queues[queue_id].started) {
690 		rte_bbdev_log_debug("Queue %u of device %u already stopped",
691 				queue_id, dev_id);
692 		return 0;
693 	}
694 
695 	if (dev->dev_ops->queue_stop) {
696 		int ret = dev->dev_ops->queue_stop(dev, queue_id);
697 		if (ret < 0) {
698 			rte_bbdev_log(ERR, "Device %u queue %u stop failed",
699 					dev_id, queue_id);
700 			return ret;
701 		}
702 	}
703 	dev->data->queues[queue_id].started = false;
704 
705 	rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
706 	return 0;
707 }
708 
709 /* Get device statistics */
710 static void
711 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
712 {
713 	unsigned int q_id;
714 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
715 		struct rte_bbdev_stats *q_stats =
716 				&dev->data->queues[q_id].queue_stats;
717 
718 		stats->enqueued_count += q_stats->enqueued_count;
719 		stats->dequeued_count += q_stats->dequeued_count;
720 		stats->enqueue_err_count += q_stats->enqueue_err_count;
721 		stats->dequeue_err_count += q_stats->dequeue_err_count;
722 	}
723 	rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
724 }
725 
726 static void
727 reset_stats_in_queues(struct rte_bbdev *dev)
728 {
729 	unsigned int q_id;
730 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
731 		struct rte_bbdev_stats *q_stats =
732 				&dev->data->queues[q_id].queue_stats;
733 
734 		memset(q_stats, 0, sizeof(*q_stats));
735 	}
736 	rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
737 }
738 
739 int
740 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
741 {
742 	struct rte_bbdev *dev = get_dev(dev_id);
743 	VALID_DEV_OR_RET_ERR(dev, dev_id);
744 
745 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
746 
747 	if (stats == NULL) {
748 		rte_bbdev_log(ERR, "NULL stats structure");
749 		return -EINVAL;
750 	}
751 
752 	memset(stats, 0, sizeof(*stats));
753 	if (dev->dev_ops->stats_get != NULL)
754 		dev->dev_ops->stats_get(dev, stats);
755 	else
756 		get_stats_from_queues(dev, stats);
757 
758 	rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
759 	return 0;
760 }
761 
762 int
763 rte_bbdev_stats_reset(uint16_t dev_id)
764 {
765 	struct rte_bbdev *dev = get_dev(dev_id);
766 	VALID_DEV_OR_RET_ERR(dev, dev_id);
767 
768 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
769 
770 	if (dev->dev_ops->stats_reset != NULL)
771 		dev->dev_ops->stats_reset(dev);
772 	else
773 		reset_stats_in_queues(dev);
774 
775 	rte_bbdev_log_debug("Reset stats of device %u", dev_id);
776 	return 0;
777 }
778 
779 int
780 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
781 {
782 	struct rte_bbdev *dev = get_dev(dev_id);
783 	VALID_DEV_OR_RET_ERR(dev, dev_id);
784 
785 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
786 
787 	if (dev_info == NULL) {
788 		rte_bbdev_log(ERR, "NULL dev info structure");
789 		return -EINVAL;
790 	}
791 
792 	/* Copy data maintained by device interface layer */
793 	memset(dev_info, 0, sizeof(*dev_info));
794 	dev_info->dev_name = dev->data->name;
795 	dev_info->num_queues = dev->data->num_queues;
796 	dev_info->device = dev->device;
797 	dev_info->socket_id = dev->data->socket_id;
798 	dev_info->started = dev->data->started;
799 
800 	/* Copy data maintained by device driver layer */
801 	dev->dev_ops->info_get(dev, &dev_info->drv);
802 
803 	rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
804 	return 0;
805 }
806 
807 int
808 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
809 		struct rte_bbdev_queue_info *queue_info)
810 {
811 	struct rte_bbdev *dev = get_dev(dev_id);
812 	VALID_DEV_OR_RET_ERR(dev, dev_id);
813 
814 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
815 
816 	if (queue_info == NULL) {
817 		rte_bbdev_log(ERR, "NULL queue info structure");
818 		return -EINVAL;
819 	}
820 
821 	/* Copy data to output */
822 	memset(queue_info, 0, sizeof(*queue_info));
823 	queue_info->conf = dev->data->queues[queue_id].conf;
824 	queue_info->started = dev->data->queues[queue_id].started;
825 
826 	rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
827 			queue_id, dev_id);
828 	return 0;
829 }
830 
831 /* Calculate size needed to store bbdev_op, depending on type */
832 static unsigned int
833 get_bbdev_op_size(enum rte_bbdev_op_type type)
834 {
835 	unsigned int result = 0;
836 	switch (type) {
837 	case RTE_BBDEV_OP_NONE:
838 		result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
839 				sizeof(struct rte_bbdev_enc_op));
840 		break;
841 	case RTE_BBDEV_OP_TURBO_DEC:
842 		result = sizeof(struct rte_bbdev_dec_op);
843 		break;
844 	case RTE_BBDEV_OP_TURBO_ENC:
845 		result = sizeof(struct rte_bbdev_enc_op);
846 		break;
847 	case RTE_BBDEV_OP_LDPC_DEC:
848 		result = sizeof(struct rte_bbdev_dec_op);
849 		break;
850 	case RTE_BBDEV_OP_LDPC_ENC:
851 		result = sizeof(struct rte_bbdev_enc_op);
852 		break;
853 	default:
854 		break;
855 	}
856 
857 	return result;
858 }
859 
860 /* Initialise a bbdev_op structure */
861 static void
862 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
863 		__rte_unused unsigned int n)
864 {
865 	enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
866 
867 	if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
868 		struct rte_bbdev_dec_op *op = element;
869 		memset(op, 0, mempool->elt_size);
870 		op->mempool = mempool;
871 	} else if (type == RTE_BBDEV_OP_TURBO_ENC ||
872 			type == RTE_BBDEV_OP_LDPC_ENC) {
873 		struct rte_bbdev_enc_op *op = element;
874 		memset(op, 0, mempool->elt_size);
875 		op->mempool = mempool;
876 	}
877 }
878 
879 struct rte_mempool *
880 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
881 		unsigned int num_elements, unsigned int cache_size,
882 		int socket_id)
883 {
884 	struct rte_bbdev_op_pool_private *priv;
885 	struct rte_mempool *mp;
886 	const char *op_type_str;
887 
888 	if (name == NULL) {
889 		rte_bbdev_log(ERR, "NULL name for op pool");
890 		return NULL;
891 	}
892 
893 	if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
894 		rte_bbdev_log(ERR,
895 				"Invalid op type (%u), should be less than %u",
896 				type, RTE_BBDEV_OP_TYPE_COUNT);
897 		return NULL;
898 	}
899 
900 	mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
901 			cache_size, sizeof(struct rte_bbdev_op_pool_private),
902 			NULL, NULL, bbdev_op_init, &type, socket_id, 0);
903 	if (mp == NULL) {
904 		rte_bbdev_log(ERR,
905 				"Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
906 				name, num_elements, get_bbdev_op_size(type),
907 				rte_strerror(rte_errno));
908 		return NULL;
909 	}
910 
911 	op_type_str = rte_bbdev_op_type_str(type);
912 	if (op_type_str == NULL)
913 		return NULL;
914 
915 	rte_bbdev_log_debug(
916 			"Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
917 			name, num_elements, op_type_str, cache_size, socket_id,
918 			get_bbdev_op_size(type));
919 
920 	priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
921 	priv->type = type;
922 
923 	return mp;
924 }
925 
926 int
927 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
928 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
929 {
930 	struct rte_bbdev_callback *user_cb;
931 	struct rte_bbdev *dev = get_dev(dev_id);
932 	VALID_DEV_OR_RET_ERR(dev, dev_id);
933 
934 	if (event >= RTE_BBDEV_EVENT_MAX) {
935 		rte_bbdev_log(ERR,
936 				"Invalid event type (%u), should be less than %u",
937 				event, RTE_BBDEV_EVENT_MAX);
938 		return -EINVAL;
939 	}
940 
941 	if (cb_fn == NULL) {
942 		rte_bbdev_log(ERR, "NULL callback function");
943 		return -EINVAL;
944 	}
945 
946 	rte_spinlock_lock(&rte_bbdev_cb_lock);
947 
948 	TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
949 		if (user_cb->cb_fn == cb_fn &&
950 				user_cb->cb_arg == cb_arg &&
951 				user_cb->event == event)
952 			break;
953 	}
954 
955 	/* create a new callback. */
956 	if (user_cb == NULL) {
957 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
958 				sizeof(struct rte_bbdev_callback), 0);
959 		if (user_cb != NULL) {
960 			user_cb->cb_fn = cb_fn;
961 			user_cb->cb_arg = cb_arg;
962 			user_cb->event = event;
963 			TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
964 		}
965 	}
966 
967 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
968 	return (user_cb == NULL) ? -ENOMEM : 0;
969 }
970 
971 int
972 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
973 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
974 {
975 	int ret = 0;
976 	struct rte_bbdev_callback *cb, *next;
977 	struct rte_bbdev *dev = get_dev(dev_id);
978 	VALID_DEV_OR_RET_ERR(dev, dev_id);
979 
980 	if (event >= RTE_BBDEV_EVENT_MAX) {
981 		rte_bbdev_log(ERR,
982 				"Invalid event type (%u), should be less than %u",
983 				event, RTE_BBDEV_EVENT_MAX);
984 		return -EINVAL;
985 	}
986 
987 	if (cb_fn == NULL) {
988 		rte_bbdev_log(ERR,
989 				"NULL callback function cannot be unregistered");
990 		return -EINVAL;
991 	}
992 
993 	dev = &rte_bbdev_devices[dev_id];
994 	rte_spinlock_lock(&rte_bbdev_cb_lock);
995 
996 	for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
997 
998 		next = TAILQ_NEXT(cb, next);
999 
1000 		if (cb->cb_fn != cb_fn || cb->event != event ||
1001 				(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1002 			continue;
1003 
1004 		/* If this callback is not executing right now, remove it. */
1005 		if (cb->active == 0) {
1006 			TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1007 			rte_free(cb);
1008 		} else
1009 			ret = -EAGAIN;
1010 	}
1011 
1012 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1013 	return ret;
1014 }
1015 
1016 void
1017 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1018 	enum rte_bbdev_event_type event, void *ret_param)
1019 {
1020 	struct rte_bbdev_callback *cb_lst;
1021 	struct rte_bbdev_callback dev_cb;
1022 
1023 	if (dev == NULL) {
1024 		rte_bbdev_log(ERR, "NULL device");
1025 		return;
1026 	}
1027 
1028 	if (dev->data == NULL) {
1029 		rte_bbdev_log(ERR, "NULL data structure");
1030 		return;
1031 	}
1032 
1033 	if (event >= RTE_BBDEV_EVENT_MAX) {
1034 		rte_bbdev_log(ERR,
1035 				"Invalid event type (%u), should be less than %u",
1036 				event, RTE_BBDEV_EVENT_MAX);
1037 		return;
1038 	}
1039 
1040 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1041 	TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1042 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1043 			continue;
1044 		dev_cb = *cb_lst;
1045 		cb_lst->active = 1;
1046 		if (ret_param != NULL)
1047 			dev_cb.ret_param = ret_param;
1048 
1049 		rte_spinlock_unlock(&rte_bbdev_cb_lock);
1050 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1051 				dev_cb.cb_arg, dev_cb.ret_param);
1052 		rte_spinlock_lock(&rte_bbdev_cb_lock);
1053 		cb_lst->active = 0;
1054 	}
1055 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1056 }
1057 
1058 int
1059 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1060 {
1061 	struct rte_bbdev *dev = get_dev(dev_id);
1062 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1063 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1064 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1065 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1066 	return dev->dev_ops->queue_intr_enable(dev, queue_id);
1067 }
1068 
1069 int
1070 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1071 {
1072 	struct rte_bbdev *dev = get_dev(dev_id);
1073 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1074 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1075 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1076 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1077 	return dev->dev_ops->queue_intr_disable(dev, queue_id);
1078 }
1079 
1080 int
1081 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1082 		void *data)
1083 {
1084 	uint32_t vec;
1085 	struct rte_bbdev *dev = get_dev(dev_id);
1086 	struct rte_intr_handle *intr_handle;
1087 	int ret;
1088 
1089 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1090 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1091 
1092 	intr_handle = dev->intr_handle;
1093 	if (intr_handle == NULL) {
1094 		rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1095 		return -ENOTSUP;
1096 	}
1097 
1098 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1099 		rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1100 				dev_id, queue_id);
1101 		return -ENOTSUP;
1102 	}
1103 
1104 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
1105 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1106 	if (ret && (ret != -EEXIST)) {
1107 		rte_bbdev_log(ERR,
1108 				"dev %u q %u int ctl error op %d epfd %d vec %u\n",
1109 				dev_id, queue_id, op, epfd, vec);
1110 		return ret;
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 
1117 const char *
1118 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1119 {
1120 	static const char * const op_types[] = {
1121 		"RTE_BBDEV_OP_NONE",
1122 		"RTE_BBDEV_OP_TURBO_DEC",
1123 		"RTE_BBDEV_OP_TURBO_ENC",
1124 		"RTE_BBDEV_OP_LDPC_DEC",
1125 		"RTE_BBDEV_OP_LDPC_ENC",
1126 	};
1127 
1128 	if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1129 		return op_types[op_type];
1130 
1131 	rte_bbdev_log(ERR, "Invalid operation type");
1132 	return NULL;
1133 }
1134