xref: /dpdk/lib/bbdev/rte_bbdev.c (revision 927cb43fe90f95eb696e890682749749d8149bb2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8 
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12 #include <rte_eal.h>
13 #include <rte_malloc.h>
14 #include <rte_mempool.h>
15 #include <rte_memzone.h>
16 #include <rte_lcore.h>
17 #include <rte_spinlock.h>
18 #include <rte_interrupts.h>
19 
20 #include "rte_bbdev_op.h"
21 #include "rte_bbdev.h"
22 #include "rte_bbdev_pmd.h"
23 
24 #define DEV_NAME "BBDEV"
25 
26 /* Number of supported operation types in *rte_bbdev_op_type*. */
27 #define BBDEV_OP_TYPE_COUNT 6
28 
29 /* BBDev library logging ID */
30 RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE);
31 
32 /* Helper macro for logging */
33 #define rte_bbdev_log(level, fmt, ...) \
34 	rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
35 
36 #define rte_bbdev_log_debug(fmt, ...) \
37 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
38 		##__VA_ARGS__)
39 
40 /* Helper macro to check dev_id is valid */
41 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
42 	if (dev == NULL) { \
43 		rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
44 		return -ENODEV; \
45 	} \
46 } while (0)
47 
48 /* Helper macro to check dev_ops is valid */
49 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
50 	if (dev->dev_ops == NULL) { \
51 		rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
52 				dev_id); \
53 		return -ENODEV; \
54 	} \
55 } while (0)
56 
57 /* Helper macro to check that driver implements required function pointer */
58 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
59 	if (func == NULL) { \
60 		rte_bbdev_log(ERR, "device %u does not support %s", \
61 				dev_id, #func); \
62 		return -ENOTSUP; \
63 	} \
64 } while (0)
65 
66 /* Helper macro to check that queue is valid */
67 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
68 	if (queue_id >= dev->data->num_queues) { \
69 		rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
70 				queue_id, dev->data->dev_id); \
71 		return -ERANGE; \
72 	} \
73 } while (0)
74 
75 /* List of callback functions registered by an application */
76 struct rte_bbdev_callback {
77 	TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
78 	rte_bbdev_cb_fn cb_fn;  /* Callback address */
79 	void *cb_arg;  /* Parameter for callback */
80 	void *ret_param;  /* Return parameter */
81 	enum rte_bbdev_event_type event; /* Interrupt event type */
82 	uint32_t active; /* Callback is executing */
83 };
84 
85 /* spinlock for bbdev device callbacks */
86 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
87 
88 /*
89  * Global array of all devices. This is not static because it's used by the
90  * inline enqueue and dequeue functions
91  */
92 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
93 
94 /* Global array with rte_bbdev_data structures */
95 static struct rte_bbdev_data *rte_bbdev_data;
96 
97 /* Memzone name for global bbdev data pool */
98 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
99 
100 /* Number of currently valid devices */
101 static uint16_t num_devs;
102 
103 /* Return pointer to device structure, with validity check */
104 static struct rte_bbdev *
105 get_dev(uint16_t dev_id)
106 {
107 	if (rte_bbdev_is_valid(dev_id))
108 		return &rte_bbdev_devices[dev_id];
109 	return NULL;
110 }
111 
112 /* Allocate global data array */
113 static int
114 rte_bbdev_data_alloc(void)
115 {
116 	const unsigned int flags = 0;
117 	const struct rte_memzone *mz;
118 
119 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
120 		mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
121 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
122 				rte_socket_id(), flags);
123 	} else
124 		mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
125 	if (mz == NULL) {
126 		rte_bbdev_log(CRIT,
127 				"Cannot allocate memzone for bbdev port data");
128 		return -ENOMEM;
129 	}
130 
131 	rte_bbdev_data = mz->addr;
132 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
133 		memset(rte_bbdev_data, 0,
134 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
135 	return 0;
136 }
137 
138 /*
139  * Find data allocated for the device or if not found return first unused bbdev
140  * data. If all structures are in use and none is used by the device return
141  * NULL.
142  */
143 static struct rte_bbdev_data *
144 find_bbdev_data(const char *name)
145 {
146 	uint16_t data_id;
147 
148 	for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
149 		if (strlen(rte_bbdev_data[data_id].name) == 0) {
150 			memset(&rte_bbdev_data[data_id], 0,
151 					sizeof(struct rte_bbdev_data));
152 			return &rte_bbdev_data[data_id];
153 		} else if (strncmp(rte_bbdev_data[data_id].name, name,
154 				RTE_BBDEV_NAME_MAX_LEN) == 0)
155 			return &rte_bbdev_data[data_id];
156 	}
157 
158 	return NULL;
159 }
160 
161 /* Find lowest device id with no attached device */
162 static uint16_t
163 find_free_dev_id(void)
164 {
165 	uint16_t i;
166 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
167 		if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
168 			return i;
169 	}
170 	return RTE_BBDEV_MAX_DEVS;
171 }
172 
173 struct rte_bbdev *
174 rte_bbdev_allocate(const char *name)
175 {
176 	int ret;
177 	struct rte_bbdev *bbdev;
178 	uint16_t dev_id;
179 
180 	if (name == NULL) {
181 		rte_bbdev_log(ERR, "Invalid null device name");
182 		return NULL;
183 	}
184 
185 	if (rte_bbdev_get_named_dev(name) != NULL) {
186 		rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
187 		return NULL;
188 	}
189 
190 	dev_id = find_free_dev_id();
191 	if (dev_id == RTE_BBDEV_MAX_DEVS) {
192 		rte_bbdev_log(ERR, "Reached maximum number of devices");
193 		return NULL;
194 	}
195 
196 	bbdev = &rte_bbdev_devices[dev_id];
197 
198 	if (rte_bbdev_data == NULL) {
199 		ret = rte_bbdev_data_alloc();
200 		if (ret != 0)
201 			return NULL;
202 	}
203 
204 	bbdev->data = find_bbdev_data(name);
205 	if (bbdev->data == NULL) {
206 		rte_bbdev_log(ERR,
207 				"Max BBDevs already allocated in multi-process environment!");
208 		return NULL;
209 	}
210 
211 	__atomic_add_fetch(&bbdev->data->process_cnt, 1, __ATOMIC_RELAXED);
212 	bbdev->data->dev_id = dev_id;
213 	bbdev->state = RTE_BBDEV_INITIALIZED;
214 
215 	ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
216 	if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
217 		rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
218 		return NULL;
219 	}
220 
221 	/* init user callbacks */
222 	TAILQ_INIT(&(bbdev->list_cbs));
223 
224 	num_devs++;
225 
226 	rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
227 			name, dev_id, num_devs);
228 
229 	return bbdev;
230 }
231 
232 int
233 rte_bbdev_release(struct rte_bbdev *bbdev)
234 {
235 	uint16_t dev_id;
236 	struct rte_bbdev_callback *cb, *next;
237 
238 	if (bbdev == NULL) {
239 		rte_bbdev_log(ERR, "NULL bbdev");
240 		return -ENODEV;
241 	}
242 	dev_id = bbdev->data->dev_id;
243 
244 	/* free all callbacks from the device's list */
245 	for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
246 
247 		next = TAILQ_NEXT(cb, next);
248 		TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
249 		rte_free(cb);
250 	}
251 
252 	/* clear shared BBDev Data if no process is using the device anymore */
253 	if (__atomic_sub_fetch(&bbdev->data->process_cnt, 1,
254 			      __ATOMIC_RELAXED) == 0)
255 		memset(bbdev->data, 0, sizeof(*bbdev->data));
256 
257 	memset(bbdev, 0, sizeof(*bbdev));
258 	num_devs--;
259 	bbdev->state = RTE_BBDEV_UNUSED;
260 
261 	rte_bbdev_log_debug(
262 			"Un-initialised device id = %u. Num devices = %u",
263 			dev_id, num_devs);
264 	return 0;
265 }
266 
267 struct rte_bbdev *
268 rte_bbdev_get_named_dev(const char *name)
269 {
270 	unsigned int i;
271 
272 	if (name == NULL) {
273 		rte_bbdev_log(ERR, "NULL driver name");
274 		return NULL;
275 	}
276 
277 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
278 		struct rte_bbdev *dev = get_dev(i);
279 		if (dev && (strncmp(dev->data->name,
280 				name, RTE_BBDEV_NAME_MAX_LEN) == 0))
281 			return dev;
282 	}
283 
284 	return NULL;
285 }
286 
287 uint16_t
288 rte_bbdev_count(void)
289 {
290 	return num_devs;
291 }
292 
293 bool
294 rte_bbdev_is_valid(uint16_t dev_id)
295 {
296 	if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
297 		rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
298 		return true;
299 	return false;
300 }
301 
302 uint16_t
303 rte_bbdev_find_next(uint16_t dev_id)
304 {
305 	dev_id++;
306 	for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
307 		if (rte_bbdev_is_valid(dev_id))
308 			break;
309 	return dev_id;
310 }
311 
312 int
313 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
314 {
315 	unsigned int i;
316 	int ret;
317 	struct rte_bbdev_driver_info dev_info;
318 	struct rte_bbdev *dev = get_dev(dev_id);
319 	VALID_DEV_OR_RET_ERR(dev, dev_id);
320 
321 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
322 
323 	if (dev->data->started) {
324 		rte_bbdev_log(ERR,
325 				"Device %u cannot be configured when started",
326 				dev_id);
327 		return -EBUSY;
328 	}
329 
330 	/* Get device driver information to get max number of queues */
331 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
332 	memset(&dev_info, 0, sizeof(dev_info));
333 	dev->dev_ops->info_get(dev, &dev_info);
334 
335 	if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
336 		rte_bbdev_log(ERR,
337 				"Device %u supports 0 < N <= %u queues, not %u",
338 				dev_id, dev_info.max_num_queues, num_queues);
339 		return -EINVAL;
340 	}
341 
342 	/* If re-configuration, get driver to free existing internal memory */
343 	if (dev->data->queues != NULL) {
344 		VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
345 		for (i = 0; i < dev->data->num_queues; i++) {
346 			int ret = dev->dev_ops->queue_release(dev, i);
347 			if (ret < 0) {
348 				rte_bbdev_log(ERR,
349 						"Device %u queue %u release failed",
350 						dev_id, i);
351 				return ret;
352 			}
353 		}
354 		/* Call optional device close */
355 		if (dev->dev_ops->close) {
356 			ret = dev->dev_ops->close(dev);
357 			if (ret < 0) {
358 				rte_bbdev_log(ERR,
359 						"Device %u couldn't be closed",
360 						dev_id);
361 				return ret;
362 			}
363 		}
364 		rte_free(dev->data->queues);
365 	}
366 
367 	/* Allocate queue pointers */
368 	dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
369 			sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
370 				dev->data->socket_id);
371 	if (dev->data->queues == NULL) {
372 		rte_bbdev_log(ERR,
373 				"calloc of %u queues for device %u on socket %i failed",
374 				num_queues, dev_id, dev->data->socket_id);
375 		return -ENOMEM;
376 	}
377 
378 	dev->data->num_queues = num_queues;
379 
380 	/* Call optional device configuration */
381 	if (dev->dev_ops->setup_queues) {
382 		ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
383 		if (ret < 0) {
384 			rte_bbdev_log(ERR,
385 					"Device %u memory configuration failed",
386 					dev_id);
387 			goto error;
388 		}
389 	}
390 
391 	rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
392 			num_queues);
393 	return 0;
394 
395 error:
396 	dev->data->num_queues = 0;
397 	rte_free(dev->data->queues);
398 	dev->data->queues = NULL;
399 	return ret;
400 }
401 
402 int
403 rte_bbdev_intr_enable(uint16_t dev_id)
404 {
405 	int ret;
406 	struct rte_bbdev *dev = get_dev(dev_id);
407 	VALID_DEV_OR_RET_ERR(dev, dev_id);
408 
409 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
410 
411 	if (dev->data->started) {
412 		rte_bbdev_log(ERR,
413 				"Device %u cannot be configured when started",
414 				dev_id);
415 		return -EBUSY;
416 	}
417 
418 	if (dev->dev_ops->intr_enable) {
419 		ret = dev->dev_ops->intr_enable(dev);
420 		if (ret < 0) {
421 			rte_bbdev_log(ERR,
422 					"Device %u interrupts configuration failed",
423 					dev_id);
424 			return ret;
425 		}
426 		rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
427 		return 0;
428 	}
429 
430 	rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
431 	return -ENOTSUP;
432 }
433 
434 int
435 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
436 		const struct rte_bbdev_queue_conf *conf)
437 {
438 	int ret = 0;
439 	struct rte_bbdev_driver_info dev_info;
440 	struct rte_bbdev *dev = get_dev(dev_id);
441 	const struct rte_bbdev_op_cap *p;
442 	struct rte_bbdev_queue_conf *stored_conf;
443 	const char *op_type_str;
444 	VALID_DEV_OR_RET_ERR(dev, dev_id);
445 
446 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
447 
448 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
449 
450 	if (dev->data->queues[queue_id].started || dev->data->started) {
451 		rte_bbdev_log(ERR,
452 				"Queue %u of device %u cannot be configured when started",
453 				queue_id, dev_id);
454 		return -EBUSY;
455 	}
456 
457 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
458 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
459 
460 	/* Get device driver information to verify config is valid */
461 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
462 	memset(&dev_info, 0, sizeof(dev_info));
463 	dev->dev_ops->info_get(dev, &dev_info);
464 
465 	/* Check configuration is valid */
466 	if (conf != NULL) {
467 		if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
468 				(dev_info.capabilities[0].type ==
469 				RTE_BBDEV_OP_NONE)) {
470 			ret = 1;
471 		} else {
472 			for (p = dev_info.capabilities;
473 					p->type != RTE_BBDEV_OP_NONE; p++) {
474 				if (conf->op_type == p->type) {
475 					ret = 1;
476 					break;
477 				}
478 			}
479 		}
480 		if (ret == 0) {
481 			rte_bbdev_log(ERR, "Invalid operation type");
482 			return -EINVAL;
483 		}
484 		if (conf->queue_size > dev_info.queue_size_lim) {
485 			rte_bbdev_log(ERR,
486 					"Size (%u) of queue %u of device %u must be: <= %u",
487 					conf->queue_size, queue_id, dev_id,
488 					dev_info.queue_size_lim);
489 			return -EINVAL;
490 		}
491 		if (!rte_is_power_of_2(conf->queue_size)) {
492 			rte_bbdev_log(ERR,
493 					"Size (%u) of queue %u of device %u must be a power of 2",
494 					conf->queue_size, queue_id, dev_id);
495 			return -EINVAL;
496 		}
497 		if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
498 			conf->priority > dev_info.max_ul_queue_priority) {
499 			rte_bbdev_log(ERR,
500 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
501 					conf->priority, queue_id, dev_id,
502 					dev_info.max_ul_queue_priority);
503 			return -EINVAL;
504 		}
505 		if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
506 			conf->priority > dev_info.max_dl_queue_priority) {
507 			rte_bbdev_log(ERR,
508 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
509 					conf->priority, queue_id, dev_id,
510 					dev_info.max_dl_queue_priority);
511 			return -EINVAL;
512 		}
513 	}
514 
515 	/* Release existing queue (in case of queue reconfiguration) */
516 	if (dev->data->queues[queue_id].queue_private != NULL) {
517 		ret = dev->dev_ops->queue_release(dev, queue_id);
518 		if (ret < 0) {
519 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
520 					dev_id, queue_id);
521 			return ret;
522 		}
523 	}
524 
525 	/* Get driver to setup the queue */
526 	ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
527 			conf : &dev_info.default_queue_conf);
528 	if (ret < 0) {
529 		/* This may happen when trying different priority levels */
530 		rte_bbdev_log(INFO,
531 				"Device %u queue %u setup failed",
532 				dev_id, queue_id);
533 		return ret;
534 	}
535 
536 	/* Store configuration */
537 	stored_conf = &dev->data->queues[queue_id].conf;
538 	memcpy(stored_conf,
539 			(conf != NULL) ? conf : &dev_info.default_queue_conf,
540 			sizeof(*stored_conf));
541 
542 	op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
543 	if (op_type_str == NULL)
544 		return -EINVAL;
545 
546 	rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
547 			dev_id, queue_id, stored_conf->queue_size, op_type_str,
548 			stored_conf->priority);
549 
550 	return 0;
551 }
552 
553 int
554 rte_bbdev_start(uint16_t dev_id)
555 {
556 	int i;
557 	struct rte_bbdev *dev = get_dev(dev_id);
558 	VALID_DEV_OR_RET_ERR(dev, dev_id);
559 
560 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
561 
562 	if (dev->data->started) {
563 		rte_bbdev_log_debug("Device %u is already started", dev_id);
564 		return 0;
565 	}
566 
567 	if (dev->dev_ops->start) {
568 		int ret = dev->dev_ops->start(dev);
569 		if (ret < 0) {
570 			rte_bbdev_log(ERR, "Device %u start failed", dev_id);
571 			return ret;
572 		}
573 	}
574 
575 	/* Store new state */
576 	for (i = 0; i < dev->data->num_queues; i++)
577 		if (!dev->data->queues[i].conf.deferred_start)
578 			dev->data->queues[i].started = true;
579 	dev->data->started = true;
580 
581 	rte_bbdev_log_debug("Started device %u", dev_id);
582 	return 0;
583 }
584 
585 int
586 rte_bbdev_stop(uint16_t dev_id)
587 {
588 	struct rte_bbdev *dev = get_dev(dev_id);
589 	VALID_DEV_OR_RET_ERR(dev, dev_id);
590 
591 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
592 
593 	if (!dev->data->started) {
594 		rte_bbdev_log_debug("Device %u is already stopped", dev_id);
595 		return 0;
596 	}
597 
598 	if (dev->dev_ops->stop)
599 		dev->dev_ops->stop(dev);
600 	dev->data->started = false;
601 
602 	rte_bbdev_log_debug("Stopped device %u", dev_id);
603 	return 0;
604 }
605 
606 int
607 rte_bbdev_close(uint16_t dev_id)
608 {
609 	int ret;
610 	uint16_t i;
611 	struct rte_bbdev *dev = get_dev(dev_id);
612 	VALID_DEV_OR_RET_ERR(dev, dev_id);
613 
614 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
615 
616 	if (dev->data->started) {
617 		ret = rte_bbdev_stop(dev_id);
618 		if (ret < 0) {
619 			rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
620 			return ret;
621 		}
622 	}
623 
624 	/* Free memory used by queues */
625 	for (i = 0; i < dev->data->num_queues; i++) {
626 		ret = dev->dev_ops->queue_release(dev, i);
627 		if (ret < 0) {
628 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
629 					dev_id, i);
630 			return ret;
631 		}
632 	}
633 	rte_free(dev->data->queues);
634 
635 	if (dev->dev_ops->close) {
636 		ret = dev->dev_ops->close(dev);
637 		if (ret < 0) {
638 			rte_bbdev_log(ERR, "Device %u close failed", dev_id);
639 			return ret;
640 		}
641 	}
642 
643 	/* Clear configuration */
644 	dev->data->queues = NULL;
645 	dev->data->num_queues = 0;
646 
647 	rte_bbdev_log_debug("Closed device %u", dev_id);
648 	return 0;
649 }
650 
651 int
652 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
653 {
654 	struct rte_bbdev *dev = get_dev(dev_id);
655 	VALID_DEV_OR_RET_ERR(dev, dev_id);
656 
657 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
658 
659 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
660 
661 	if (dev->data->queues[queue_id].started) {
662 		rte_bbdev_log_debug("Queue %u of device %u already started",
663 				queue_id, dev_id);
664 		return 0;
665 	}
666 
667 	if (dev->dev_ops->queue_start) {
668 		int ret = dev->dev_ops->queue_start(dev, queue_id);
669 		if (ret < 0) {
670 			rte_bbdev_log(ERR, "Device %u queue %u start failed",
671 					dev_id, queue_id);
672 			return ret;
673 		}
674 	}
675 	dev->data->queues[queue_id].started = true;
676 
677 	rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
678 	return 0;
679 }
680 
681 int
682 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
683 {
684 	struct rte_bbdev *dev = get_dev(dev_id);
685 	VALID_DEV_OR_RET_ERR(dev, dev_id);
686 
687 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
688 
689 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
690 
691 	if (!dev->data->queues[queue_id].started) {
692 		rte_bbdev_log_debug("Queue %u of device %u already stopped",
693 				queue_id, dev_id);
694 		return 0;
695 	}
696 
697 	if (dev->dev_ops->queue_stop) {
698 		int ret = dev->dev_ops->queue_stop(dev, queue_id);
699 		if (ret < 0) {
700 			rte_bbdev_log(ERR, "Device %u queue %u stop failed",
701 					dev_id, queue_id);
702 			return ret;
703 		}
704 	}
705 	dev->data->queues[queue_id].started = false;
706 
707 	rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
708 	return 0;
709 }
710 
711 /* Get device statistics */
712 static void
713 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
714 {
715 	unsigned int q_id;
716 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
717 		struct rte_bbdev_stats *q_stats =
718 				&dev->data->queues[q_id].queue_stats;
719 
720 		stats->enqueued_count += q_stats->enqueued_count;
721 		stats->dequeued_count += q_stats->dequeued_count;
722 		stats->enqueue_err_count += q_stats->enqueue_err_count;
723 		stats->dequeue_err_count += q_stats->dequeue_err_count;
724 		stats->enqueue_warn_count += q_stats->enqueue_warn_count;
725 		stats->dequeue_warn_count += q_stats->dequeue_warn_count;
726 	}
727 	rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
728 }
729 
730 static void
731 reset_stats_in_queues(struct rte_bbdev *dev)
732 {
733 	unsigned int q_id;
734 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
735 		struct rte_bbdev_stats *q_stats =
736 				&dev->data->queues[q_id].queue_stats;
737 
738 		memset(q_stats, 0, sizeof(*q_stats));
739 	}
740 	rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
741 }
742 
743 int
744 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
745 {
746 	struct rte_bbdev *dev = get_dev(dev_id);
747 	VALID_DEV_OR_RET_ERR(dev, dev_id);
748 
749 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
750 
751 	if (stats == NULL) {
752 		rte_bbdev_log(ERR, "NULL stats structure");
753 		return -EINVAL;
754 	}
755 
756 	memset(stats, 0, sizeof(*stats));
757 	if (dev->dev_ops->stats_get != NULL)
758 		dev->dev_ops->stats_get(dev, stats);
759 	else
760 		get_stats_from_queues(dev, stats);
761 
762 	rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
763 	return 0;
764 }
765 
766 int
767 rte_bbdev_stats_reset(uint16_t dev_id)
768 {
769 	struct rte_bbdev *dev = get_dev(dev_id);
770 	VALID_DEV_OR_RET_ERR(dev, dev_id);
771 
772 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
773 
774 	if (dev->dev_ops->stats_reset != NULL)
775 		dev->dev_ops->stats_reset(dev);
776 	else
777 		reset_stats_in_queues(dev);
778 
779 	rte_bbdev_log_debug("Reset stats of device %u", dev_id);
780 	return 0;
781 }
782 
783 int
784 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
785 {
786 	struct rte_bbdev *dev = get_dev(dev_id);
787 	VALID_DEV_OR_RET_ERR(dev, dev_id);
788 
789 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
790 
791 	if (dev_info == NULL) {
792 		rte_bbdev_log(ERR, "NULL dev info structure");
793 		return -EINVAL;
794 	}
795 
796 	/* Copy data maintained by device interface layer */
797 	memset(dev_info, 0, sizeof(*dev_info));
798 	dev_info->dev_name = dev->data->name;
799 	dev_info->num_queues = dev->data->num_queues;
800 	dev_info->device = dev->device;
801 	dev_info->socket_id = dev->data->socket_id;
802 	dev_info->started = dev->data->started;
803 
804 	/* Copy data maintained by device driver layer */
805 	dev->dev_ops->info_get(dev, &dev_info->drv);
806 
807 	rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
808 	return 0;
809 }
810 
811 int
812 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
813 		struct rte_bbdev_queue_info *queue_info)
814 {
815 	struct rte_bbdev *dev = get_dev(dev_id);
816 	VALID_DEV_OR_RET_ERR(dev, dev_id);
817 
818 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
819 
820 	if (queue_info == NULL) {
821 		rte_bbdev_log(ERR, "NULL queue info structure");
822 		return -EINVAL;
823 	}
824 
825 	/* Copy data to output */
826 	memset(queue_info, 0, sizeof(*queue_info));
827 	queue_info->conf = dev->data->queues[queue_id].conf;
828 	queue_info->started = dev->data->queues[queue_id].started;
829 
830 	rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
831 			queue_id, dev_id);
832 	return 0;
833 }
834 
835 /* Calculate size needed to store bbdev_op, depending on type */
836 static unsigned int
837 get_bbdev_op_size(enum rte_bbdev_op_type type)
838 {
839 	unsigned int result = 0;
840 	switch (type) {
841 	case RTE_BBDEV_OP_NONE:
842 		result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
843 				sizeof(struct rte_bbdev_enc_op));
844 		break;
845 	case RTE_BBDEV_OP_TURBO_DEC:
846 		result = sizeof(struct rte_bbdev_dec_op);
847 		break;
848 	case RTE_BBDEV_OP_TURBO_ENC:
849 		result = sizeof(struct rte_bbdev_enc_op);
850 		break;
851 	case RTE_BBDEV_OP_LDPC_DEC:
852 		result = sizeof(struct rte_bbdev_dec_op);
853 		break;
854 	case RTE_BBDEV_OP_LDPC_ENC:
855 		result = sizeof(struct rte_bbdev_enc_op);
856 		break;
857 	case RTE_BBDEV_OP_FFT:
858 		result = sizeof(struct rte_bbdev_fft_op);
859 		break;
860 	default:
861 		break;
862 	}
863 
864 	return result;
865 }
866 
867 /* Initialise a bbdev_op structure */
868 static void
869 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
870 		__rte_unused unsigned int n)
871 {
872 	enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
873 
874 	if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
875 		struct rte_bbdev_dec_op *op = element;
876 		memset(op, 0, mempool->elt_size);
877 		op->mempool = mempool;
878 	} else if (type == RTE_BBDEV_OP_TURBO_ENC ||
879 			type == RTE_BBDEV_OP_LDPC_ENC) {
880 		struct rte_bbdev_enc_op *op = element;
881 		memset(op, 0, mempool->elt_size);
882 		op->mempool = mempool;
883 	} else if (type == RTE_BBDEV_OP_FFT) {
884 		struct rte_bbdev_fft_op *op = element;
885 		memset(op, 0, mempool->elt_size);
886 		op->mempool = mempool;
887 	}
888 }
889 
890 struct rte_mempool *
891 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
892 		unsigned int num_elements, unsigned int cache_size,
893 		int socket_id)
894 {
895 	struct rte_bbdev_op_pool_private *priv;
896 	struct rte_mempool *mp;
897 	const char *op_type_str;
898 
899 	if (name == NULL) {
900 		rte_bbdev_log(ERR, "NULL name for op pool");
901 		return NULL;
902 	}
903 
904 	if (type >= BBDEV_OP_TYPE_COUNT) {
905 		rte_bbdev_log(ERR,
906 				"Invalid op type (%u), should be less than %u",
907 				type, BBDEV_OP_TYPE_COUNT);
908 		return NULL;
909 	}
910 
911 	mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
912 			cache_size, sizeof(struct rte_bbdev_op_pool_private),
913 			NULL, NULL, bbdev_op_init, &type, socket_id, 0);
914 	if (mp == NULL) {
915 		rte_bbdev_log(ERR,
916 				"Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
917 				name, num_elements, get_bbdev_op_size(type),
918 				rte_strerror(rte_errno));
919 		return NULL;
920 	}
921 
922 	op_type_str = rte_bbdev_op_type_str(type);
923 	if (op_type_str == NULL)
924 		return NULL;
925 
926 	rte_bbdev_log_debug(
927 			"Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
928 			name, num_elements, op_type_str, cache_size, socket_id,
929 			get_bbdev_op_size(type));
930 
931 	priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
932 	priv->type = type;
933 
934 	return mp;
935 }
936 
937 int
938 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
939 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
940 {
941 	struct rte_bbdev_callback *user_cb;
942 	struct rte_bbdev *dev = get_dev(dev_id);
943 	VALID_DEV_OR_RET_ERR(dev, dev_id);
944 
945 	if (event >= RTE_BBDEV_EVENT_MAX) {
946 		rte_bbdev_log(ERR,
947 				"Invalid event type (%u), should be less than %u",
948 				event, RTE_BBDEV_EVENT_MAX);
949 		return -EINVAL;
950 	}
951 
952 	if (cb_fn == NULL) {
953 		rte_bbdev_log(ERR, "NULL callback function");
954 		return -EINVAL;
955 	}
956 
957 	rte_spinlock_lock(&rte_bbdev_cb_lock);
958 
959 	TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
960 		if (user_cb->cb_fn == cb_fn &&
961 				user_cb->cb_arg == cb_arg &&
962 				user_cb->event == event)
963 			break;
964 	}
965 
966 	/* create a new callback. */
967 	if (user_cb == NULL) {
968 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
969 				sizeof(struct rte_bbdev_callback), 0);
970 		if (user_cb != NULL) {
971 			user_cb->cb_fn = cb_fn;
972 			user_cb->cb_arg = cb_arg;
973 			user_cb->event = event;
974 			TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
975 		}
976 	}
977 
978 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
979 	return (user_cb == NULL) ? -ENOMEM : 0;
980 }
981 
982 int
983 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
984 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
985 {
986 	int ret = 0;
987 	struct rte_bbdev_callback *cb, *next;
988 	struct rte_bbdev *dev = get_dev(dev_id);
989 	VALID_DEV_OR_RET_ERR(dev, dev_id);
990 
991 	if (event >= RTE_BBDEV_EVENT_MAX) {
992 		rte_bbdev_log(ERR,
993 				"Invalid event type (%u), should be less than %u",
994 				event, RTE_BBDEV_EVENT_MAX);
995 		return -EINVAL;
996 	}
997 
998 	if (cb_fn == NULL) {
999 		rte_bbdev_log(ERR,
1000 				"NULL callback function cannot be unregistered");
1001 		return -EINVAL;
1002 	}
1003 
1004 	dev = &rte_bbdev_devices[dev_id];
1005 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1006 
1007 	for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
1008 
1009 		next = TAILQ_NEXT(cb, next);
1010 
1011 		if (cb->cb_fn != cb_fn || cb->event != event ||
1012 				(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1013 			continue;
1014 
1015 		/* If this callback is not executing right now, remove it. */
1016 		if (cb->active == 0) {
1017 			TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1018 			rte_free(cb);
1019 		} else
1020 			ret = -EAGAIN;
1021 	}
1022 
1023 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1024 	return ret;
1025 }
1026 
1027 void
1028 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1029 	enum rte_bbdev_event_type event, void *ret_param)
1030 {
1031 	struct rte_bbdev_callback *cb_lst;
1032 	struct rte_bbdev_callback dev_cb;
1033 
1034 	if (dev == NULL) {
1035 		rte_bbdev_log(ERR, "NULL device");
1036 		return;
1037 	}
1038 
1039 	if (dev->data == NULL) {
1040 		rte_bbdev_log(ERR, "NULL data structure");
1041 		return;
1042 	}
1043 
1044 	if (event >= RTE_BBDEV_EVENT_MAX) {
1045 		rte_bbdev_log(ERR,
1046 				"Invalid event type (%u), should be less than %u",
1047 				event, RTE_BBDEV_EVENT_MAX);
1048 		return;
1049 	}
1050 
1051 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1052 	TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1053 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1054 			continue;
1055 		dev_cb = *cb_lst;
1056 		cb_lst->active = 1;
1057 		if (ret_param != NULL)
1058 			dev_cb.ret_param = ret_param;
1059 
1060 		rte_spinlock_unlock(&rte_bbdev_cb_lock);
1061 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1062 				dev_cb.cb_arg, dev_cb.ret_param);
1063 		rte_spinlock_lock(&rte_bbdev_cb_lock);
1064 		cb_lst->active = 0;
1065 	}
1066 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1067 }
1068 
1069 int
1070 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1071 {
1072 	struct rte_bbdev *dev = get_dev(dev_id);
1073 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1074 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1075 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1076 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1077 	return dev->dev_ops->queue_intr_enable(dev, queue_id);
1078 }
1079 
1080 int
1081 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1082 {
1083 	struct rte_bbdev *dev = get_dev(dev_id);
1084 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1085 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1086 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1087 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1088 	return dev->dev_ops->queue_intr_disable(dev, queue_id);
1089 }
1090 
1091 int
1092 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1093 		void *data)
1094 {
1095 	uint32_t vec;
1096 	struct rte_bbdev *dev = get_dev(dev_id);
1097 	struct rte_intr_handle *intr_handle;
1098 	int ret;
1099 
1100 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1101 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1102 
1103 	intr_handle = dev->intr_handle;
1104 	if (intr_handle == NULL) {
1105 		rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1106 		return -ENOTSUP;
1107 	}
1108 
1109 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1110 		rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1111 				dev_id, queue_id);
1112 		return -ENOTSUP;
1113 	}
1114 
1115 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
1116 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1117 	if (ret && (ret != -EEXIST)) {
1118 		rte_bbdev_log(ERR,
1119 				"dev %u q %u int ctl error op %d epfd %d vec %u\n",
1120 				dev_id, queue_id, op, epfd, vec);
1121 		return ret;
1122 	}
1123 
1124 	return 0;
1125 }
1126 
1127 
1128 const char *
1129 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1130 {
1131 	static const char * const op_types[] = {
1132 		"RTE_BBDEV_OP_NONE",
1133 		"RTE_BBDEV_OP_TURBO_DEC",
1134 		"RTE_BBDEV_OP_TURBO_ENC",
1135 		"RTE_BBDEV_OP_LDPC_DEC",
1136 		"RTE_BBDEV_OP_LDPC_ENC",
1137 		"RTE_BBDEV_OP_FFT",
1138 	};
1139 
1140 	if (op_type < BBDEV_OP_TYPE_COUNT)
1141 		return op_types[op_type];
1142 
1143 	rte_bbdev_log(ERR, "Invalid operation type");
1144 	return NULL;
1145 }
1146 
1147 const char *
1148 rte_bbdev_device_status_str(enum rte_bbdev_device_status status)
1149 {
1150 	static const char * const dev_sta_string[] = {
1151 		"RTE_BBDEV_DEV_NOSTATUS",
1152 		"RTE_BBDEV_DEV_NOT_SUPPORTED",
1153 		"RTE_BBDEV_DEV_RESET",
1154 		"RTE_BBDEV_DEV_CONFIGURED",
1155 		"RTE_BBDEV_DEV_ACTIVE",
1156 		"RTE_BBDEV_DEV_FATAL_ERR",
1157 		"RTE_BBDEV_DEV_RESTART_REQ",
1158 		"RTE_BBDEV_DEV_RECONFIG_REQ",
1159 		"RTE_BBDEV_DEV_CORRECT_ERR",
1160 	};
1161 
1162 	/* Cast from enum required for clang. */
1163 	if ((uint8_t)status < sizeof(dev_sta_string) / sizeof(char *))
1164 		return dev_sta_string[status];
1165 
1166 	rte_bbdev_log(ERR, "Invalid device status");
1167 	return NULL;
1168 }
1169 
1170 const char *
1171 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status)
1172 {
1173 	static const char * const enq_sta_string[] = {
1174 		"RTE_BBDEV_ENQ_STATUS_NONE",
1175 		"RTE_BBDEV_ENQ_STATUS_QUEUE_FULL",
1176 		"RTE_BBDEV_ENQ_STATUS_RING_FULL",
1177 		"RTE_BBDEV_ENQ_STATUS_INVALID_OP",
1178 	};
1179 
1180 	/* Cast from enum required for clang. */
1181 	if ((uint8_t)status < sizeof(enq_sta_string) / sizeof(char *))
1182 		return enq_sta_string[status];
1183 
1184 	rte_bbdev_log(ERR, "Invalid enqueue status");
1185 	return NULL;
1186 }
1187