xref: /dpdk/lib/bbdev/rte_bbdev.c (revision 9fa82d287f6505a4ce24dd5d6e57b9b62b52501c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8 
9 #include <rte_compat.h>
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_log.h>
13 #include <rte_debug.h>
14 #include <rte_eal.h>
15 #include <rte_malloc.h>
16 #include <rte_mempool.h>
17 #include <rte_memzone.h>
18 #include <rte_lcore.h>
19 #include <rte_dev.h>
20 #include <rte_spinlock.h>
21 #include <rte_tailq.h>
22 #include <rte_interrupts.h>
23 
24 #include "rte_bbdev_op.h"
25 #include "rte_bbdev.h"
26 #include "rte_bbdev_pmd.h"
27 
28 #define DEV_NAME "BBDEV"
29 
30 
31 /* BBDev library logging ID */
32 RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE);
33 
34 /* Helper macro for logging */
35 #define rte_bbdev_log(level, fmt, ...) \
36 	rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
37 
38 #define rte_bbdev_log_debug(fmt, ...) \
39 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
40 		##__VA_ARGS__)
41 
42 /* Helper macro to check dev_id is valid */
43 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
44 	if (dev == NULL) { \
45 		rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
46 		return -ENODEV; \
47 	} \
48 } while (0)
49 
50 /* Helper macro to check dev_ops is valid */
51 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
52 	if (dev->dev_ops == NULL) { \
53 		rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
54 				dev_id); \
55 		return -ENODEV; \
56 	} \
57 } while (0)
58 
59 /* Helper macro to check that driver implements required function pointer */
60 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
61 	if (func == NULL) { \
62 		rte_bbdev_log(ERR, "device %u does not support %s", \
63 				dev_id, #func); \
64 		return -ENOTSUP; \
65 	} \
66 } while (0)
67 
68 /* Helper macro to check that queue is valid */
69 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
70 	if (queue_id >= dev->data->num_queues) { \
71 		rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
72 				queue_id, dev->data->dev_id); \
73 		return -ERANGE; \
74 	} \
75 } while (0)
76 
77 /* List of callback functions registered by an application */
78 struct rte_bbdev_callback {
79 	TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
80 	rte_bbdev_cb_fn cb_fn;  /* Callback address */
81 	void *cb_arg;  /* Parameter for callback */
82 	void *ret_param;  /* Return parameter */
83 	enum rte_bbdev_event_type event; /* Interrupt event type */
84 	uint32_t active; /* Callback is executing */
85 };
86 
87 /* spinlock for bbdev device callbacks */
88 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
89 
90 /*
91  * Global array of all devices. This is not static because it's used by the
92  * inline enqueue and dequeue functions
93  */
94 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
95 
96 /* Global array with rte_bbdev_data structures */
97 static struct rte_bbdev_data *rte_bbdev_data;
98 
99 /* Memzone name for global bbdev data pool */
100 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
101 
102 /* Number of currently valid devices */
103 static uint16_t num_devs;
104 
105 /* Return pointer to device structure, with validity check */
106 static struct rte_bbdev *
107 get_dev(uint16_t dev_id)
108 {
109 	if (rte_bbdev_is_valid(dev_id))
110 		return &rte_bbdev_devices[dev_id];
111 	return NULL;
112 }
113 
114 /* Allocate global data array */
115 static int
116 rte_bbdev_data_alloc(void)
117 {
118 	const unsigned int flags = 0;
119 	const struct rte_memzone *mz;
120 
121 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
122 		mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
123 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
124 				rte_socket_id(), flags);
125 	} else
126 		mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
127 	if (mz == NULL) {
128 		rte_bbdev_log(CRIT,
129 				"Cannot allocate memzone for bbdev port data");
130 		return -ENOMEM;
131 	}
132 
133 	rte_bbdev_data = mz->addr;
134 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
135 		memset(rte_bbdev_data, 0,
136 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
137 	return 0;
138 }
139 
140 /*
141  * Find data alocated for the device or if not found return first unused bbdev
142  * data. If all structures are in use and none is used by the device return
143  * NULL.
144  */
145 static struct rte_bbdev_data *
146 find_bbdev_data(const char *name)
147 {
148 	uint16_t data_id;
149 
150 	for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
151 		if (strlen(rte_bbdev_data[data_id].name) == 0) {
152 			memset(&rte_bbdev_data[data_id], 0,
153 					sizeof(struct rte_bbdev_data));
154 			return &rte_bbdev_data[data_id];
155 		} else if (strncmp(rte_bbdev_data[data_id].name, name,
156 				RTE_BBDEV_NAME_MAX_LEN) == 0)
157 			return &rte_bbdev_data[data_id];
158 	}
159 
160 	return NULL;
161 }
162 
163 /* Find lowest device id with no attached device */
164 static uint16_t
165 find_free_dev_id(void)
166 {
167 	uint16_t i;
168 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
169 		if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
170 			return i;
171 	}
172 	return RTE_BBDEV_MAX_DEVS;
173 }
174 
175 struct rte_bbdev *
176 rte_bbdev_allocate(const char *name)
177 {
178 	int ret;
179 	struct rte_bbdev *bbdev;
180 	uint16_t dev_id;
181 
182 	if (name == NULL) {
183 		rte_bbdev_log(ERR, "Invalid null device name");
184 		return NULL;
185 	}
186 
187 	if (rte_bbdev_get_named_dev(name) != NULL) {
188 		rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
189 		return NULL;
190 	}
191 
192 	dev_id = find_free_dev_id();
193 	if (dev_id == RTE_BBDEV_MAX_DEVS) {
194 		rte_bbdev_log(ERR, "Reached maximum number of devices");
195 		return NULL;
196 	}
197 
198 	bbdev = &rte_bbdev_devices[dev_id];
199 
200 	if (rte_bbdev_data == NULL) {
201 		ret = rte_bbdev_data_alloc();
202 		if (ret != 0)
203 			return NULL;
204 	}
205 
206 	bbdev->data = find_bbdev_data(name);
207 	if (bbdev->data == NULL) {
208 		rte_bbdev_log(ERR,
209 				"Max BBDevs already allocated in multi-process environment!");
210 		return NULL;
211 	}
212 
213 	__atomic_add_fetch(&bbdev->data->process_cnt, 1, __ATOMIC_RELAXED);
214 	bbdev->data->dev_id = dev_id;
215 	bbdev->state = RTE_BBDEV_INITIALIZED;
216 
217 	ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
218 	if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
219 		rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
220 		return NULL;
221 	}
222 
223 	/* init user callbacks */
224 	TAILQ_INIT(&(bbdev->list_cbs));
225 
226 	num_devs++;
227 
228 	rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
229 			name, dev_id, num_devs);
230 
231 	return bbdev;
232 }
233 
234 int
235 rte_bbdev_release(struct rte_bbdev *bbdev)
236 {
237 	uint16_t dev_id;
238 	struct rte_bbdev_callback *cb, *next;
239 
240 	if (bbdev == NULL) {
241 		rte_bbdev_log(ERR, "NULL bbdev");
242 		return -ENODEV;
243 	}
244 	dev_id = bbdev->data->dev_id;
245 
246 	/* free all callbacks from the device's list */
247 	for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
248 
249 		next = TAILQ_NEXT(cb, next);
250 		TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
251 		rte_free(cb);
252 	}
253 
254 	/* clear shared BBDev Data if no process is using the device anymore */
255 	if (__atomic_sub_fetch(&bbdev->data->process_cnt, 1,
256 			      __ATOMIC_RELAXED) == 0)
257 		memset(bbdev->data, 0, sizeof(*bbdev->data));
258 
259 	memset(bbdev, 0, sizeof(*bbdev));
260 	num_devs--;
261 	bbdev->state = RTE_BBDEV_UNUSED;
262 
263 	rte_bbdev_log_debug(
264 			"Un-initialised device id = %u. Num devices = %u",
265 			dev_id, num_devs);
266 	return 0;
267 }
268 
269 struct rte_bbdev *
270 rte_bbdev_get_named_dev(const char *name)
271 {
272 	unsigned int i;
273 
274 	if (name == NULL) {
275 		rte_bbdev_log(ERR, "NULL driver name");
276 		return NULL;
277 	}
278 
279 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
280 		struct rte_bbdev *dev = get_dev(i);
281 		if (dev && (strncmp(dev->data->name,
282 				name, RTE_BBDEV_NAME_MAX_LEN) == 0))
283 			return dev;
284 	}
285 
286 	return NULL;
287 }
288 
289 uint16_t
290 rte_bbdev_count(void)
291 {
292 	return num_devs;
293 }
294 
295 bool
296 rte_bbdev_is_valid(uint16_t dev_id)
297 {
298 	if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
299 		rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
300 		return true;
301 	return false;
302 }
303 
304 uint16_t
305 rte_bbdev_find_next(uint16_t dev_id)
306 {
307 	dev_id++;
308 	for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
309 		if (rte_bbdev_is_valid(dev_id))
310 			break;
311 	return dev_id;
312 }
313 
314 int
315 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
316 {
317 	unsigned int i;
318 	int ret;
319 	struct rte_bbdev_driver_info dev_info;
320 	struct rte_bbdev *dev = get_dev(dev_id);
321 	VALID_DEV_OR_RET_ERR(dev, dev_id);
322 
323 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
324 
325 	if (dev->data->started) {
326 		rte_bbdev_log(ERR,
327 				"Device %u cannot be configured when started",
328 				dev_id);
329 		return -EBUSY;
330 	}
331 
332 	/* Get device driver information to get max number of queues */
333 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
334 	memset(&dev_info, 0, sizeof(dev_info));
335 	dev->dev_ops->info_get(dev, &dev_info);
336 
337 	if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
338 		rte_bbdev_log(ERR,
339 				"Device %u supports 0 < N <= %u queues, not %u",
340 				dev_id, dev_info.max_num_queues, num_queues);
341 		return -EINVAL;
342 	}
343 
344 	/* If re-configuration, get driver to free existing internal memory */
345 	if (dev->data->queues != NULL) {
346 		VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
347 		for (i = 0; i < dev->data->num_queues; i++) {
348 			int ret = dev->dev_ops->queue_release(dev, i);
349 			if (ret < 0) {
350 				rte_bbdev_log(ERR,
351 						"Device %u queue %u release failed",
352 						dev_id, i);
353 				return ret;
354 			}
355 		}
356 		/* Call optional device close */
357 		if (dev->dev_ops->close) {
358 			ret = dev->dev_ops->close(dev);
359 			if (ret < 0) {
360 				rte_bbdev_log(ERR,
361 						"Device %u couldn't be closed",
362 						dev_id);
363 				return ret;
364 			}
365 		}
366 		rte_free(dev->data->queues);
367 	}
368 
369 	/* Allocate queue pointers */
370 	dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
371 			sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
372 				dev->data->socket_id);
373 	if (dev->data->queues == NULL) {
374 		rte_bbdev_log(ERR,
375 				"calloc of %u queues for device %u on socket %i failed",
376 				num_queues, dev_id, dev->data->socket_id);
377 		return -ENOMEM;
378 	}
379 
380 	dev->data->num_queues = num_queues;
381 
382 	/* Call optional device configuration */
383 	if (dev->dev_ops->setup_queues) {
384 		ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
385 		if (ret < 0) {
386 			rte_bbdev_log(ERR,
387 					"Device %u memory configuration failed",
388 					dev_id);
389 			goto error;
390 		}
391 	}
392 
393 	rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
394 			num_queues);
395 	return 0;
396 
397 error:
398 	dev->data->num_queues = 0;
399 	rte_free(dev->data->queues);
400 	dev->data->queues = NULL;
401 	return ret;
402 }
403 
404 int
405 rte_bbdev_intr_enable(uint16_t dev_id)
406 {
407 	int ret;
408 	struct rte_bbdev *dev = get_dev(dev_id);
409 	VALID_DEV_OR_RET_ERR(dev, dev_id);
410 
411 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
412 
413 	if (dev->data->started) {
414 		rte_bbdev_log(ERR,
415 				"Device %u cannot be configured when started",
416 				dev_id);
417 		return -EBUSY;
418 	}
419 
420 	if (dev->dev_ops->intr_enable) {
421 		ret = dev->dev_ops->intr_enable(dev);
422 		if (ret < 0) {
423 			rte_bbdev_log(ERR,
424 					"Device %u interrupts configuration failed",
425 					dev_id);
426 			return ret;
427 		}
428 		rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
429 		return 0;
430 	}
431 
432 	rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
433 	return -ENOTSUP;
434 }
435 
436 int
437 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
438 		const struct rte_bbdev_queue_conf *conf)
439 {
440 	int ret = 0;
441 	struct rte_bbdev_driver_info dev_info;
442 	struct rte_bbdev *dev = get_dev(dev_id);
443 	const struct rte_bbdev_op_cap *p;
444 	struct rte_bbdev_queue_conf *stored_conf;
445 	const char *op_type_str;
446 	VALID_DEV_OR_RET_ERR(dev, dev_id);
447 
448 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
449 
450 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
451 
452 	if (dev->data->queues[queue_id].started || dev->data->started) {
453 		rte_bbdev_log(ERR,
454 				"Queue %u of device %u cannot be configured when started",
455 				queue_id, dev_id);
456 		return -EBUSY;
457 	}
458 
459 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
460 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
461 
462 	/* Get device driver information to verify config is valid */
463 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
464 	memset(&dev_info, 0, sizeof(dev_info));
465 	dev->dev_ops->info_get(dev, &dev_info);
466 
467 	/* Check configuration is valid */
468 	if (conf != NULL) {
469 		if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
470 				(dev_info.capabilities[0].type ==
471 				RTE_BBDEV_OP_NONE)) {
472 			ret = 1;
473 		} else {
474 			for (p = dev_info.capabilities;
475 					p->type != RTE_BBDEV_OP_NONE; p++) {
476 				if (conf->op_type == p->type) {
477 					ret = 1;
478 					break;
479 				}
480 			}
481 		}
482 		if (ret == 0) {
483 			rte_bbdev_log(ERR, "Invalid operation type");
484 			return -EINVAL;
485 		}
486 		if (conf->queue_size > dev_info.queue_size_lim) {
487 			rte_bbdev_log(ERR,
488 					"Size (%u) of queue %u of device %u must be: <= %u",
489 					conf->queue_size, queue_id, dev_id,
490 					dev_info.queue_size_lim);
491 			return -EINVAL;
492 		}
493 		if (!rte_is_power_of_2(conf->queue_size)) {
494 			rte_bbdev_log(ERR,
495 					"Size (%u) of queue %u of device %u must be a power of 2",
496 					conf->queue_size, queue_id, dev_id);
497 			return -EINVAL;
498 		}
499 		if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
500 			conf->priority > dev_info.max_ul_queue_priority) {
501 			rte_bbdev_log(ERR,
502 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
503 					conf->priority, queue_id, dev_id,
504 					dev_info.max_ul_queue_priority);
505 			return -EINVAL;
506 		}
507 		if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
508 			conf->priority > dev_info.max_dl_queue_priority) {
509 			rte_bbdev_log(ERR,
510 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
511 					conf->priority, queue_id, dev_id,
512 					dev_info.max_dl_queue_priority);
513 			return -EINVAL;
514 		}
515 	}
516 
517 	/* Release existing queue (in case of queue reconfiguration) */
518 	if (dev->data->queues[queue_id].queue_private != NULL) {
519 		ret = dev->dev_ops->queue_release(dev, queue_id);
520 		if (ret < 0) {
521 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
522 					dev_id, queue_id);
523 			return ret;
524 		}
525 	}
526 
527 	/* Get driver to setup the queue */
528 	ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
529 			conf : &dev_info.default_queue_conf);
530 	if (ret < 0) {
531 		/* This may happen when trying different priority levels */
532 		rte_bbdev_log(INFO,
533 				"Device %u queue %u setup failed",
534 				dev_id, queue_id);
535 		return ret;
536 	}
537 
538 	/* Store configuration */
539 	stored_conf = &dev->data->queues[queue_id].conf;
540 	memcpy(stored_conf,
541 			(conf != NULL) ? conf : &dev_info.default_queue_conf,
542 			sizeof(*stored_conf));
543 
544 	op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
545 	if (op_type_str == NULL)
546 		return -EINVAL;
547 
548 	rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
549 			dev_id, queue_id, stored_conf->queue_size, op_type_str,
550 			stored_conf->priority);
551 
552 	return 0;
553 }
554 
555 int
556 rte_bbdev_start(uint16_t dev_id)
557 {
558 	int i;
559 	struct rte_bbdev *dev = get_dev(dev_id);
560 	VALID_DEV_OR_RET_ERR(dev, dev_id);
561 
562 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
563 
564 	if (dev->data->started) {
565 		rte_bbdev_log_debug("Device %u is already started", dev_id);
566 		return 0;
567 	}
568 
569 	if (dev->dev_ops->start) {
570 		int ret = dev->dev_ops->start(dev);
571 		if (ret < 0) {
572 			rte_bbdev_log(ERR, "Device %u start failed", dev_id);
573 			return ret;
574 		}
575 	}
576 
577 	/* Store new state */
578 	for (i = 0; i < dev->data->num_queues; i++)
579 		if (!dev->data->queues[i].conf.deferred_start)
580 			dev->data->queues[i].started = true;
581 	dev->data->started = true;
582 
583 	rte_bbdev_log_debug("Started device %u", dev_id);
584 	return 0;
585 }
586 
587 int
588 rte_bbdev_stop(uint16_t dev_id)
589 {
590 	struct rte_bbdev *dev = get_dev(dev_id);
591 	VALID_DEV_OR_RET_ERR(dev, dev_id);
592 
593 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
594 
595 	if (!dev->data->started) {
596 		rte_bbdev_log_debug("Device %u is already stopped", dev_id);
597 		return 0;
598 	}
599 
600 	if (dev->dev_ops->stop)
601 		dev->dev_ops->stop(dev);
602 	dev->data->started = false;
603 
604 	rte_bbdev_log_debug("Stopped device %u", dev_id);
605 	return 0;
606 }
607 
608 int
609 rte_bbdev_close(uint16_t dev_id)
610 {
611 	int ret;
612 	uint16_t i;
613 	struct rte_bbdev *dev = get_dev(dev_id);
614 	VALID_DEV_OR_RET_ERR(dev, dev_id);
615 
616 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
617 
618 	if (dev->data->started) {
619 		ret = rte_bbdev_stop(dev_id);
620 		if (ret < 0) {
621 			rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
622 			return ret;
623 		}
624 	}
625 
626 	/* Free memory used by queues */
627 	for (i = 0; i < dev->data->num_queues; i++) {
628 		ret = dev->dev_ops->queue_release(dev, i);
629 		if (ret < 0) {
630 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
631 					dev_id, i);
632 			return ret;
633 		}
634 	}
635 	rte_free(dev->data->queues);
636 
637 	if (dev->dev_ops->close) {
638 		ret = dev->dev_ops->close(dev);
639 		if (ret < 0) {
640 			rte_bbdev_log(ERR, "Device %u close failed", dev_id);
641 			return ret;
642 		}
643 	}
644 
645 	/* Clear configuration */
646 	dev->data->queues = NULL;
647 	dev->data->num_queues = 0;
648 
649 	rte_bbdev_log_debug("Closed device %u", dev_id);
650 	return 0;
651 }
652 
653 int
654 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
655 {
656 	struct rte_bbdev *dev = get_dev(dev_id);
657 	VALID_DEV_OR_RET_ERR(dev, dev_id);
658 
659 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
660 
661 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
662 
663 	if (dev->data->queues[queue_id].started) {
664 		rte_bbdev_log_debug("Queue %u of device %u already started",
665 				queue_id, dev_id);
666 		return 0;
667 	}
668 
669 	if (dev->dev_ops->queue_start) {
670 		int ret = dev->dev_ops->queue_start(dev, queue_id);
671 		if (ret < 0) {
672 			rte_bbdev_log(ERR, "Device %u queue %u start failed",
673 					dev_id, queue_id);
674 			return ret;
675 		}
676 	}
677 	dev->data->queues[queue_id].started = true;
678 
679 	rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
680 	return 0;
681 }
682 
683 int
684 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
685 {
686 	struct rte_bbdev *dev = get_dev(dev_id);
687 	VALID_DEV_OR_RET_ERR(dev, dev_id);
688 
689 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
690 
691 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
692 
693 	if (!dev->data->queues[queue_id].started) {
694 		rte_bbdev_log_debug("Queue %u of device %u already stopped",
695 				queue_id, dev_id);
696 		return 0;
697 	}
698 
699 	if (dev->dev_ops->queue_stop) {
700 		int ret = dev->dev_ops->queue_stop(dev, queue_id);
701 		if (ret < 0) {
702 			rte_bbdev_log(ERR, "Device %u queue %u stop failed",
703 					dev_id, queue_id);
704 			return ret;
705 		}
706 	}
707 	dev->data->queues[queue_id].started = false;
708 
709 	rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
710 	return 0;
711 }
712 
713 /* Get device statistics */
714 static void
715 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
716 {
717 	unsigned int q_id;
718 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
719 		struct rte_bbdev_stats *q_stats =
720 				&dev->data->queues[q_id].queue_stats;
721 
722 		stats->enqueued_count += q_stats->enqueued_count;
723 		stats->dequeued_count += q_stats->dequeued_count;
724 		stats->enqueue_err_count += q_stats->enqueue_err_count;
725 		stats->dequeue_err_count += q_stats->dequeue_err_count;
726 	}
727 	rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
728 }
729 
730 static void
731 reset_stats_in_queues(struct rte_bbdev *dev)
732 {
733 	unsigned int q_id;
734 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
735 		struct rte_bbdev_stats *q_stats =
736 				&dev->data->queues[q_id].queue_stats;
737 
738 		memset(q_stats, 0, sizeof(*q_stats));
739 	}
740 	rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
741 }
742 
743 int
744 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
745 {
746 	struct rte_bbdev *dev = get_dev(dev_id);
747 	VALID_DEV_OR_RET_ERR(dev, dev_id);
748 
749 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
750 
751 	if (stats == NULL) {
752 		rte_bbdev_log(ERR, "NULL stats structure");
753 		return -EINVAL;
754 	}
755 
756 	memset(stats, 0, sizeof(*stats));
757 	if (dev->dev_ops->stats_get != NULL)
758 		dev->dev_ops->stats_get(dev, stats);
759 	else
760 		get_stats_from_queues(dev, stats);
761 
762 	rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
763 	return 0;
764 }
765 
766 int
767 rte_bbdev_stats_reset(uint16_t dev_id)
768 {
769 	struct rte_bbdev *dev = get_dev(dev_id);
770 	VALID_DEV_OR_RET_ERR(dev, dev_id);
771 
772 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
773 
774 	if (dev->dev_ops->stats_reset != NULL)
775 		dev->dev_ops->stats_reset(dev);
776 	else
777 		reset_stats_in_queues(dev);
778 
779 	rte_bbdev_log_debug("Reset stats of device %u", dev_id);
780 	return 0;
781 }
782 
783 int
784 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
785 {
786 	struct rte_bbdev *dev = get_dev(dev_id);
787 	VALID_DEV_OR_RET_ERR(dev, dev_id);
788 
789 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
790 
791 	if (dev_info == NULL) {
792 		rte_bbdev_log(ERR, "NULL dev info structure");
793 		return -EINVAL;
794 	}
795 
796 	/* Copy data maintained by device interface layer */
797 	memset(dev_info, 0, sizeof(*dev_info));
798 	dev_info->dev_name = dev->data->name;
799 	dev_info->num_queues = dev->data->num_queues;
800 	dev_info->device = dev->device;
801 	dev_info->socket_id = dev->data->socket_id;
802 	dev_info->started = dev->data->started;
803 
804 	/* Copy data maintained by device driver layer */
805 	dev->dev_ops->info_get(dev, &dev_info->drv);
806 
807 	rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
808 	return 0;
809 }
810 
811 int
812 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
813 		struct rte_bbdev_queue_info *queue_info)
814 {
815 	struct rte_bbdev *dev = get_dev(dev_id);
816 	VALID_DEV_OR_RET_ERR(dev, dev_id);
817 
818 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
819 
820 	if (queue_info == NULL) {
821 		rte_bbdev_log(ERR, "NULL queue info structure");
822 		return -EINVAL;
823 	}
824 
825 	/* Copy data to output */
826 	memset(queue_info, 0, sizeof(*queue_info));
827 	queue_info->conf = dev->data->queues[queue_id].conf;
828 	queue_info->started = dev->data->queues[queue_id].started;
829 
830 	rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
831 			queue_id, dev_id);
832 	return 0;
833 }
834 
835 /* Calculate size needed to store bbdev_op, depending on type */
836 static unsigned int
837 get_bbdev_op_size(enum rte_bbdev_op_type type)
838 {
839 	unsigned int result = 0;
840 	switch (type) {
841 	case RTE_BBDEV_OP_NONE:
842 		result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
843 				sizeof(struct rte_bbdev_enc_op));
844 		break;
845 	case RTE_BBDEV_OP_TURBO_DEC:
846 		result = sizeof(struct rte_bbdev_dec_op);
847 		break;
848 	case RTE_BBDEV_OP_TURBO_ENC:
849 		result = sizeof(struct rte_bbdev_enc_op);
850 		break;
851 	case RTE_BBDEV_OP_LDPC_DEC:
852 		result = sizeof(struct rte_bbdev_dec_op);
853 		break;
854 	case RTE_BBDEV_OP_LDPC_ENC:
855 		result = sizeof(struct rte_bbdev_enc_op);
856 		break;
857 	default:
858 		break;
859 	}
860 
861 	return result;
862 }
863 
864 /* Initialise a bbdev_op structure */
865 static void
866 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
867 		__rte_unused unsigned int n)
868 {
869 	enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
870 
871 	if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
872 		struct rte_bbdev_dec_op *op = element;
873 		memset(op, 0, mempool->elt_size);
874 		op->mempool = mempool;
875 	} else if (type == RTE_BBDEV_OP_TURBO_ENC ||
876 			type == RTE_BBDEV_OP_LDPC_ENC) {
877 		struct rte_bbdev_enc_op *op = element;
878 		memset(op, 0, mempool->elt_size);
879 		op->mempool = mempool;
880 	}
881 }
882 
883 struct rte_mempool *
884 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
885 		unsigned int num_elements, unsigned int cache_size,
886 		int socket_id)
887 {
888 	struct rte_bbdev_op_pool_private *priv;
889 	struct rte_mempool *mp;
890 	const char *op_type_str;
891 
892 	if (name == NULL) {
893 		rte_bbdev_log(ERR, "NULL name for op pool");
894 		return NULL;
895 	}
896 
897 	if (type >= RTE_BBDEV_OP_TYPE_COUNT) {
898 		rte_bbdev_log(ERR,
899 				"Invalid op type (%u), should be less than %u",
900 				type, RTE_BBDEV_OP_TYPE_COUNT);
901 		return NULL;
902 	}
903 
904 	mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
905 			cache_size, sizeof(struct rte_bbdev_op_pool_private),
906 			NULL, NULL, bbdev_op_init, &type, socket_id, 0);
907 	if (mp == NULL) {
908 		rte_bbdev_log(ERR,
909 				"Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
910 				name, num_elements, get_bbdev_op_size(type),
911 				rte_strerror(rte_errno));
912 		return NULL;
913 	}
914 
915 	op_type_str = rte_bbdev_op_type_str(type);
916 	if (op_type_str == NULL)
917 		return NULL;
918 
919 	rte_bbdev_log_debug(
920 			"Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
921 			name, num_elements, op_type_str, cache_size, socket_id,
922 			get_bbdev_op_size(type));
923 
924 	priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
925 	priv->type = type;
926 
927 	return mp;
928 }
929 
930 int
931 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
932 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
933 {
934 	struct rte_bbdev_callback *user_cb;
935 	struct rte_bbdev *dev = get_dev(dev_id);
936 	VALID_DEV_OR_RET_ERR(dev, dev_id);
937 
938 	if (event >= RTE_BBDEV_EVENT_MAX) {
939 		rte_bbdev_log(ERR,
940 				"Invalid event type (%u), should be less than %u",
941 				event, RTE_BBDEV_EVENT_MAX);
942 		return -EINVAL;
943 	}
944 
945 	if (cb_fn == NULL) {
946 		rte_bbdev_log(ERR, "NULL callback function");
947 		return -EINVAL;
948 	}
949 
950 	rte_spinlock_lock(&rte_bbdev_cb_lock);
951 
952 	TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
953 		if (user_cb->cb_fn == cb_fn &&
954 				user_cb->cb_arg == cb_arg &&
955 				user_cb->event == event)
956 			break;
957 	}
958 
959 	/* create a new callback. */
960 	if (user_cb == NULL) {
961 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
962 				sizeof(struct rte_bbdev_callback), 0);
963 		if (user_cb != NULL) {
964 			user_cb->cb_fn = cb_fn;
965 			user_cb->cb_arg = cb_arg;
966 			user_cb->event = event;
967 			TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
968 		}
969 	}
970 
971 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
972 	return (user_cb == NULL) ? -ENOMEM : 0;
973 }
974 
975 int
976 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
977 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
978 {
979 	int ret = 0;
980 	struct rte_bbdev_callback *cb, *next;
981 	struct rte_bbdev *dev = get_dev(dev_id);
982 	VALID_DEV_OR_RET_ERR(dev, dev_id);
983 
984 	if (event >= RTE_BBDEV_EVENT_MAX) {
985 		rte_bbdev_log(ERR,
986 				"Invalid event type (%u), should be less than %u",
987 				event, RTE_BBDEV_EVENT_MAX);
988 		return -EINVAL;
989 	}
990 
991 	if (cb_fn == NULL) {
992 		rte_bbdev_log(ERR,
993 				"NULL callback function cannot be unregistered");
994 		return -EINVAL;
995 	}
996 
997 	dev = &rte_bbdev_devices[dev_id];
998 	rte_spinlock_lock(&rte_bbdev_cb_lock);
999 
1000 	for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
1001 
1002 		next = TAILQ_NEXT(cb, next);
1003 
1004 		if (cb->cb_fn != cb_fn || cb->event != event ||
1005 				(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1006 			continue;
1007 
1008 		/* If this callback is not executing right now, remove it. */
1009 		if (cb->active == 0) {
1010 			TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1011 			rte_free(cb);
1012 		} else
1013 			ret = -EAGAIN;
1014 	}
1015 
1016 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1017 	return ret;
1018 }
1019 
1020 void
1021 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1022 	enum rte_bbdev_event_type event, void *ret_param)
1023 {
1024 	struct rte_bbdev_callback *cb_lst;
1025 	struct rte_bbdev_callback dev_cb;
1026 
1027 	if (dev == NULL) {
1028 		rte_bbdev_log(ERR, "NULL device");
1029 		return;
1030 	}
1031 
1032 	if (dev->data == NULL) {
1033 		rte_bbdev_log(ERR, "NULL data structure");
1034 		return;
1035 	}
1036 
1037 	if (event >= RTE_BBDEV_EVENT_MAX) {
1038 		rte_bbdev_log(ERR,
1039 				"Invalid event type (%u), should be less than %u",
1040 				event, RTE_BBDEV_EVENT_MAX);
1041 		return;
1042 	}
1043 
1044 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1045 	TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1046 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1047 			continue;
1048 		dev_cb = *cb_lst;
1049 		cb_lst->active = 1;
1050 		if (ret_param != NULL)
1051 			dev_cb.ret_param = ret_param;
1052 
1053 		rte_spinlock_unlock(&rte_bbdev_cb_lock);
1054 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1055 				dev_cb.cb_arg, dev_cb.ret_param);
1056 		rte_spinlock_lock(&rte_bbdev_cb_lock);
1057 		cb_lst->active = 0;
1058 	}
1059 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1060 }
1061 
1062 int
1063 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1064 {
1065 	struct rte_bbdev *dev = get_dev(dev_id);
1066 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1067 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1068 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1069 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1070 	return dev->dev_ops->queue_intr_enable(dev, queue_id);
1071 }
1072 
1073 int
1074 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1075 {
1076 	struct rte_bbdev *dev = get_dev(dev_id);
1077 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1078 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1079 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1080 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1081 	return dev->dev_ops->queue_intr_disable(dev, queue_id);
1082 }
1083 
1084 int
1085 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1086 		void *data)
1087 {
1088 	uint32_t vec;
1089 	struct rte_bbdev *dev = get_dev(dev_id);
1090 	struct rte_intr_handle *intr_handle;
1091 	int ret;
1092 
1093 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1094 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1095 
1096 	intr_handle = dev->intr_handle;
1097 	if (intr_handle == NULL) {
1098 		rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1099 		return -ENOTSUP;
1100 	}
1101 
1102 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1103 		rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1104 				dev_id, queue_id);
1105 		return -ENOTSUP;
1106 	}
1107 
1108 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
1109 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1110 	if (ret && (ret != -EEXIST)) {
1111 		rte_bbdev_log(ERR,
1112 				"dev %u q %u int ctl error op %d epfd %d vec %u\n",
1113 				dev_id, queue_id, op, epfd, vec);
1114 		return ret;
1115 	}
1116 
1117 	return 0;
1118 }
1119 
1120 
1121 const char *
1122 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1123 {
1124 	static const char * const op_types[] = {
1125 		"RTE_BBDEV_OP_NONE",
1126 		"RTE_BBDEV_OP_TURBO_DEC",
1127 		"RTE_BBDEV_OP_TURBO_ENC",
1128 		"RTE_BBDEV_OP_LDPC_DEC",
1129 		"RTE_BBDEV_OP_LDPC_ENC",
1130 	};
1131 
1132 	if (op_type < RTE_BBDEV_OP_TYPE_COUNT)
1133 		return op_types[op_type];
1134 
1135 	rte_bbdev_log(ERR, "Invalid operation type");
1136 	return NULL;
1137 }
1138