xref: /dpdk/lib/bbdev/rte_bbdev.c (revision 60531a2c53f4d2b4b96ebb10ca813f62d0a5508d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdbool.h>
8 
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_log.h>
12 #include <rte_eal.h>
13 #include <rte_malloc.h>
14 #include <rte_mempool.h>
15 #include <rte_memzone.h>
16 #include <rte_lcore.h>
17 #include <rte_spinlock.h>
18 #include <rte_interrupts.h>
19 
20 #include "rte_bbdev_op.h"
21 #include "rte_bbdev.h"
22 #include "rte_bbdev_pmd.h"
23 
24 #define DEV_NAME "BBDEV"
25 
26 /* Number of supported operation types in *rte_bbdev_op_type*. */
27 #define BBDEV_OP_TYPE_COUNT 7
28 
29 /* BBDev library logging ID */
30 RTE_LOG_REGISTER_DEFAULT(bbdev_logtype, NOTICE);
31 
32 /* Helper macro for logging */
33 #define rte_bbdev_log(level, fmt, ...) \
34 	rte_log(RTE_LOG_ ## level, bbdev_logtype, fmt "\n", ##__VA_ARGS__)
35 
36 #define rte_bbdev_log_debug(fmt, ...) \
37 	rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
38 		##__VA_ARGS__)
39 
40 /* Helper macro to check dev_id is valid */
41 #define VALID_DEV_OR_RET_ERR(dev, dev_id) do { \
42 	if (dev == NULL) { \
43 		rte_bbdev_log(ERR, "device %u is invalid", dev_id); \
44 		return -ENODEV; \
45 	} \
46 } while (0)
47 
48 /* Helper macro to check dev_ops is valid */
49 #define VALID_DEV_OPS_OR_RET_ERR(dev, dev_id) do { \
50 	if (dev->dev_ops == NULL) { \
51 		rte_bbdev_log(ERR, "NULL dev_ops structure in device %u", \
52 				dev_id); \
53 		return -ENODEV; \
54 	} \
55 } while (0)
56 
57 /* Helper macro to check that driver implements required function pointer */
58 #define VALID_FUNC_OR_RET_ERR(func, dev_id) do { \
59 	if (func == NULL) { \
60 		rte_bbdev_log(ERR, "device %u does not support %s", \
61 				dev_id, #func); \
62 		return -ENOTSUP; \
63 	} \
64 } while (0)
65 
66 /* Helper macro to check that queue is valid */
67 #define VALID_QUEUE_OR_RET_ERR(queue_id, dev) do { \
68 	if (queue_id >= dev->data->num_queues) { \
69 		rte_bbdev_log(ERR, "Invalid queue_id %u for device %u", \
70 				queue_id, dev->data->dev_id); \
71 		return -ERANGE; \
72 	} \
73 } while (0)
74 
75 /* List of callback functions registered by an application */
76 struct rte_bbdev_callback {
77 	TAILQ_ENTRY(rte_bbdev_callback) next;  /* Callbacks list */
78 	rte_bbdev_cb_fn cb_fn;  /* Callback address */
79 	void *cb_arg;  /* Parameter for callback */
80 	void *ret_param;  /* Return parameter */
81 	enum rte_bbdev_event_type event; /* Interrupt event type */
82 	uint32_t active; /* Callback is executing */
83 };
84 
85 /* spinlock for bbdev device callbacks */
86 static rte_spinlock_t rte_bbdev_cb_lock = RTE_SPINLOCK_INITIALIZER;
87 
88 /*
89  * Global array of all devices. This is not static because it's used by the
90  * inline enqueue and dequeue functions
91  */
92 struct rte_bbdev rte_bbdev_devices[RTE_BBDEV_MAX_DEVS];
93 
94 /* Global array with rte_bbdev_data structures */
95 static struct rte_bbdev_data *rte_bbdev_data;
96 
97 /* Memzone name for global bbdev data pool */
98 static const char *MZ_RTE_BBDEV_DATA = "rte_bbdev_data";
99 
100 /* Number of currently valid devices */
101 static uint16_t num_devs;
102 
103 /* Return pointer to device structure, with validity check */
104 static struct rte_bbdev *
105 get_dev(uint16_t dev_id)
106 {
107 	if (rte_bbdev_is_valid(dev_id))
108 		return &rte_bbdev_devices[dev_id];
109 	return NULL;
110 }
111 
112 /* Allocate global data array */
113 static int
114 rte_bbdev_data_alloc(void)
115 {
116 	const unsigned int flags = 0;
117 	const struct rte_memzone *mz;
118 
119 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
120 		mz = rte_memzone_reserve(MZ_RTE_BBDEV_DATA,
121 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data),
122 				rte_socket_id(), flags);
123 	} else
124 		mz = rte_memzone_lookup(MZ_RTE_BBDEV_DATA);
125 	if (mz == NULL) {
126 		rte_bbdev_log(CRIT,
127 				"Cannot allocate memzone for bbdev port data");
128 		return -ENOMEM;
129 	}
130 
131 	rte_bbdev_data = mz->addr;
132 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
133 		memset(rte_bbdev_data, 0,
134 				RTE_BBDEV_MAX_DEVS * sizeof(*rte_bbdev_data));
135 	return 0;
136 }
137 
138 /*
139  * Find data allocated for the device or if not found return first unused bbdev
140  * data. If all structures are in use and none is used by the device return
141  * NULL.
142  */
143 static struct rte_bbdev_data *
144 find_bbdev_data(const char *name)
145 {
146 	uint16_t data_id;
147 
148 	for (data_id = 0; data_id < RTE_BBDEV_MAX_DEVS; ++data_id) {
149 		if (strlen(rte_bbdev_data[data_id].name) == 0) {
150 			memset(&rte_bbdev_data[data_id], 0,
151 					sizeof(struct rte_bbdev_data));
152 			return &rte_bbdev_data[data_id];
153 		} else if (strncmp(rte_bbdev_data[data_id].name, name,
154 				RTE_BBDEV_NAME_MAX_LEN) == 0)
155 			return &rte_bbdev_data[data_id];
156 	}
157 
158 	return NULL;
159 }
160 
161 /* Find lowest device id with no attached device */
162 static uint16_t
163 find_free_dev_id(void)
164 {
165 	uint16_t i;
166 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
167 		if (rte_bbdev_devices[i].state == RTE_BBDEV_UNUSED)
168 			return i;
169 	}
170 	return RTE_BBDEV_MAX_DEVS;
171 }
172 
173 struct rte_bbdev *
174 rte_bbdev_allocate(const char *name)
175 {
176 	int ret;
177 	struct rte_bbdev *bbdev;
178 	uint16_t dev_id;
179 
180 	if (name == NULL) {
181 		rte_bbdev_log(ERR, "Invalid null device name");
182 		return NULL;
183 	}
184 
185 	if (rte_bbdev_get_named_dev(name) != NULL) {
186 		rte_bbdev_log(ERR, "Device \"%s\" is already allocated", name);
187 		return NULL;
188 	}
189 
190 	dev_id = find_free_dev_id();
191 	if (dev_id == RTE_BBDEV_MAX_DEVS) {
192 		rte_bbdev_log(ERR, "Reached maximum number of devices");
193 		return NULL;
194 	}
195 
196 	bbdev = &rte_bbdev_devices[dev_id];
197 
198 	if (rte_bbdev_data == NULL) {
199 		ret = rte_bbdev_data_alloc();
200 		if (ret != 0)
201 			return NULL;
202 	}
203 
204 	bbdev->data = find_bbdev_data(name);
205 	if (bbdev->data == NULL) {
206 		rte_bbdev_log(ERR,
207 				"Max BBDevs already allocated in multi-process environment!");
208 		return NULL;
209 	}
210 
211 	rte_atomic_fetch_add_explicit(&bbdev->data->process_cnt, 1, rte_memory_order_relaxed);
212 	bbdev->data->dev_id = dev_id;
213 	bbdev->state = RTE_BBDEV_INITIALIZED;
214 
215 	ret = snprintf(bbdev->data->name, RTE_BBDEV_NAME_MAX_LEN, "%s", name);
216 	if ((ret < 0) || (ret >= RTE_BBDEV_NAME_MAX_LEN)) {
217 		rte_bbdev_log(ERR, "Copying device name \"%s\" failed", name);
218 		return NULL;
219 	}
220 
221 	/* init user callbacks */
222 	TAILQ_INIT(&(bbdev->list_cbs));
223 
224 	num_devs++;
225 
226 	rte_bbdev_log_debug("Initialised device %s (id = %u). Num devices = %u",
227 			name, dev_id, num_devs);
228 
229 	return bbdev;
230 }
231 
232 int
233 rte_bbdev_release(struct rte_bbdev *bbdev)
234 {
235 	uint16_t dev_id;
236 	struct rte_bbdev_callback *cb, *next;
237 
238 	if (bbdev == NULL) {
239 		rte_bbdev_log(ERR, "NULL bbdev");
240 		return -ENODEV;
241 	}
242 	dev_id = bbdev->data->dev_id;
243 
244 	/* free all callbacks from the device's list */
245 	for (cb = TAILQ_FIRST(&bbdev->list_cbs); cb != NULL; cb = next) {
246 
247 		next = TAILQ_NEXT(cb, next);
248 		TAILQ_REMOVE(&(bbdev->list_cbs), cb, next);
249 		rte_free(cb);
250 	}
251 
252 	/* clear shared BBDev Data if no process is using the device anymore */
253 	if (rte_atomic_fetch_sub_explicit(&bbdev->data->process_cnt, 1,
254 			      rte_memory_order_relaxed) - 1 == 0)
255 		memset(bbdev->data, 0, sizeof(*bbdev->data));
256 
257 	memset(bbdev, 0, sizeof(*bbdev));
258 	num_devs--;
259 	bbdev->state = RTE_BBDEV_UNUSED;
260 
261 	rte_bbdev_log_debug(
262 			"Un-initialised device id = %u. Num devices = %u",
263 			dev_id, num_devs);
264 	return 0;
265 }
266 
267 struct rte_bbdev *
268 rte_bbdev_get_named_dev(const char *name)
269 {
270 	unsigned int i;
271 
272 	if (name == NULL) {
273 		rte_bbdev_log(ERR, "NULL driver name");
274 		return NULL;
275 	}
276 
277 	for (i = 0; i < RTE_BBDEV_MAX_DEVS; i++) {
278 		struct rte_bbdev *dev = get_dev(i);
279 		if (dev && (strncmp(dev->data->name,
280 				name, RTE_BBDEV_NAME_MAX_LEN) == 0))
281 			return dev;
282 	}
283 
284 	return NULL;
285 }
286 
287 uint16_t
288 rte_bbdev_count(void)
289 {
290 	return num_devs;
291 }
292 
293 bool
294 rte_bbdev_is_valid(uint16_t dev_id)
295 {
296 	if ((dev_id < RTE_BBDEV_MAX_DEVS) &&
297 		rte_bbdev_devices[dev_id].state == RTE_BBDEV_INITIALIZED)
298 		return true;
299 	return false;
300 }
301 
302 uint16_t
303 rte_bbdev_find_next(uint16_t dev_id)
304 {
305 	dev_id++;
306 	for (; dev_id < RTE_BBDEV_MAX_DEVS; dev_id++)
307 		if (rte_bbdev_is_valid(dev_id))
308 			break;
309 	return dev_id;
310 }
311 
312 int
313 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
314 {
315 	unsigned int i;
316 	int ret;
317 	struct rte_bbdev_driver_info dev_info;
318 	struct rte_bbdev *dev = get_dev(dev_id);
319 	VALID_DEV_OR_RET_ERR(dev, dev_id);
320 
321 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
322 
323 	if (dev->data->started) {
324 		rte_bbdev_log(ERR,
325 				"Device %u cannot be configured when started",
326 				dev_id);
327 		return -EBUSY;
328 	}
329 
330 	/* Get device driver information to get max number of queues */
331 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
332 	memset(&dev_info, 0, sizeof(dev_info));
333 	dev->dev_ops->info_get(dev, &dev_info);
334 
335 	if ((num_queues == 0) || (num_queues > dev_info.max_num_queues)) {
336 		rte_bbdev_log(ERR,
337 				"Device %u supports 0 < N <= %u queues, not %u",
338 				dev_id, dev_info.max_num_queues, num_queues);
339 		return -EINVAL;
340 	}
341 
342 	/* If re-configuration, get driver to free existing internal memory */
343 	if (dev->data->queues != NULL) {
344 		VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
345 		for (i = 0; i < dev->data->num_queues; i++) {
346 			int ret = dev->dev_ops->queue_release(dev, i);
347 			if (ret < 0) {
348 				rte_bbdev_log(ERR,
349 						"Device %u queue %u release failed",
350 						dev_id, i);
351 				return ret;
352 			}
353 		}
354 		/* Call optional device close */
355 		if (dev->dev_ops->close) {
356 			ret = dev->dev_ops->close(dev);
357 			if (ret < 0) {
358 				rte_bbdev_log(ERR,
359 						"Device %u couldn't be closed",
360 						dev_id);
361 				return ret;
362 			}
363 		}
364 		rte_free(dev->data->queues);
365 	}
366 
367 	/* Allocate queue pointers */
368 	dev->data->queues = rte_calloc_socket(DEV_NAME, num_queues,
369 			sizeof(dev->data->queues[0]), RTE_CACHE_LINE_SIZE,
370 				dev->data->socket_id);
371 	if (dev->data->queues == NULL) {
372 		rte_bbdev_log(ERR,
373 				"calloc of %u queues for device %u on socket %i failed",
374 				num_queues, dev_id, dev->data->socket_id);
375 		return -ENOMEM;
376 	}
377 
378 	dev->data->num_queues = num_queues;
379 
380 	/* Call optional device configuration */
381 	if (dev->dev_ops->setup_queues) {
382 		ret = dev->dev_ops->setup_queues(dev, num_queues, socket_id);
383 		if (ret < 0) {
384 			rte_bbdev_log(ERR,
385 					"Device %u memory configuration failed",
386 					dev_id);
387 			goto error;
388 		}
389 	}
390 
391 	rte_bbdev_log_debug("Device %u set up with %u queues", dev_id,
392 			num_queues);
393 	return 0;
394 
395 error:
396 	dev->data->num_queues = 0;
397 	rte_free(dev->data->queues);
398 	dev->data->queues = NULL;
399 	return ret;
400 }
401 
402 int
403 rte_bbdev_intr_enable(uint16_t dev_id)
404 {
405 	int ret;
406 	struct rte_bbdev *dev = get_dev(dev_id);
407 	VALID_DEV_OR_RET_ERR(dev, dev_id);
408 
409 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
410 
411 	if (dev->data->started) {
412 		rte_bbdev_log(ERR,
413 				"Device %u cannot be configured when started",
414 				dev_id);
415 		return -EBUSY;
416 	}
417 
418 	if (dev->dev_ops->intr_enable) {
419 		ret = dev->dev_ops->intr_enable(dev);
420 		if (ret < 0) {
421 			rte_bbdev_log(ERR,
422 					"Device %u interrupts configuration failed",
423 					dev_id);
424 			return ret;
425 		}
426 		rte_bbdev_log_debug("Enabled interrupts for dev %u", dev_id);
427 		return 0;
428 	}
429 
430 	rte_bbdev_log(ERR, "Device %u doesn't support interrupts", dev_id);
431 	return -ENOTSUP;
432 }
433 
434 int
435 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
436 		const struct rte_bbdev_queue_conf *conf)
437 {
438 	int ret = 0;
439 	struct rte_bbdev_driver_info dev_info;
440 	struct rte_bbdev *dev = get_dev(dev_id);
441 	const struct rte_bbdev_op_cap *p;
442 	struct rte_bbdev_queue_conf *stored_conf;
443 	const char *op_type_str;
444 	unsigned int max_priority;
445 	VALID_DEV_OR_RET_ERR(dev, dev_id);
446 
447 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
448 
449 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
450 
451 	if (dev->data->queues[queue_id].started || dev->data->started) {
452 		rte_bbdev_log(ERR,
453 				"Queue %u of device %u cannot be configured when started",
454 				queue_id, dev_id);
455 		return -EBUSY;
456 	}
457 
458 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_release, dev_id);
459 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_setup, dev_id);
460 
461 	/* Get device driver information to verify config is valid */
462 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
463 	memset(&dev_info, 0, sizeof(dev_info));
464 	dev->dev_ops->info_get(dev, &dev_info);
465 
466 	/* Check configuration is valid */
467 	if (conf != NULL) {
468 		if ((conf->op_type == RTE_BBDEV_OP_NONE) &&
469 				(dev_info.capabilities[0].type ==
470 				RTE_BBDEV_OP_NONE)) {
471 			ret = 1;
472 		} else {
473 			for (p = dev_info.capabilities;
474 					p->type != RTE_BBDEV_OP_NONE; p++) {
475 				if (conf->op_type == p->type) {
476 					ret = 1;
477 					break;
478 				}
479 			}
480 		}
481 		if (ret == 0) {
482 			rte_bbdev_log(ERR, "Invalid operation type");
483 			return -EINVAL;
484 		}
485 		if (conf->queue_size > dev_info.queue_size_lim) {
486 			rte_bbdev_log(ERR,
487 					"Size (%u) of queue %u of device %u must be: <= %u",
488 					conf->queue_size, queue_id, dev_id,
489 					dev_info.queue_size_lim);
490 			return -EINVAL;
491 		}
492 		if (!rte_is_power_of_2(conf->queue_size)) {
493 			rte_bbdev_log(ERR,
494 					"Size (%u) of queue %u of device %u must be a power of 2",
495 					conf->queue_size, queue_id, dev_id);
496 			return -EINVAL;
497 		}
498 		if ((uint8_t)conf->op_type >= RTE_BBDEV_OP_TYPE_SIZE_MAX) {
499 			rte_bbdev_log(ERR,
500 					"Invalid operation type (%u) ", conf->op_type);
501 			return -EINVAL;
502 		}
503 		max_priority = dev_info.queue_priority[conf->op_type];
504 		if (conf->priority > max_priority) {
505 			rte_bbdev_log(ERR,
506 					"Priority (%u) of queue %u of bbdev %u must be <= %u",
507 					conf->priority, queue_id, dev_id, max_priority);
508 			return -EINVAL;
509 		}
510 	}
511 
512 	/* Release existing queue (in case of queue reconfiguration) */
513 	if (dev->data->queues[queue_id].queue_private != NULL) {
514 		ret = dev->dev_ops->queue_release(dev, queue_id);
515 		if (ret < 0) {
516 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
517 					dev_id, queue_id);
518 			return ret;
519 		}
520 	}
521 
522 	/* Get driver to setup the queue */
523 	ret = dev->dev_ops->queue_setup(dev, queue_id, (conf != NULL) ?
524 			conf : &dev_info.default_queue_conf);
525 	if (ret < 0) {
526 		/* This may happen when trying different priority levels */
527 		rte_bbdev_log(INFO,
528 				"Device %u queue %u setup failed",
529 				dev_id, queue_id);
530 		return ret;
531 	}
532 
533 	/* Store configuration */
534 	stored_conf = &dev->data->queues[queue_id].conf;
535 	memcpy(stored_conf,
536 			(conf != NULL) ? conf : &dev_info.default_queue_conf,
537 			sizeof(*stored_conf));
538 
539 	op_type_str = rte_bbdev_op_type_str(stored_conf->op_type);
540 	if (op_type_str == NULL)
541 		return -EINVAL;
542 
543 	rte_bbdev_log_debug("Configured dev%uq%u (size=%u, type=%s, prio=%u)",
544 			dev_id, queue_id, stored_conf->queue_size, op_type_str,
545 			stored_conf->priority);
546 
547 	return 0;
548 }
549 
550 int
551 rte_bbdev_start(uint16_t dev_id)
552 {
553 	int i;
554 	struct rte_bbdev *dev = get_dev(dev_id);
555 	VALID_DEV_OR_RET_ERR(dev, dev_id);
556 
557 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
558 
559 	if (dev->data->started) {
560 		rte_bbdev_log_debug("Device %u is already started", dev_id);
561 		return 0;
562 	}
563 
564 	if (dev->dev_ops->start) {
565 		int ret = dev->dev_ops->start(dev);
566 		if (ret < 0) {
567 			rte_bbdev_log(ERR, "Device %u start failed", dev_id);
568 			return ret;
569 		}
570 	}
571 
572 	/* Store new state */
573 	for (i = 0; i < dev->data->num_queues; i++)
574 		if (!dev->data->queues[i].conf.deferred_start)
575 			dev->data->queues[i].started = true;
576 	dev->data->started = true;
577 
578 	rte_bbdev_log_debug("Started device %u", dev_id);
579 	return 0;
580 }
581 
582 int
583 rte_bbdev_stop(uint16_t dev_id)
584 {
585 	struct rte_bbdev *dev = get_dev(dev_id);
586 	VALID_DEV_OR_RET_ERR(dev, dev_id);
587 
588 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
589 
590 	if (!dev->data->started) {
591 		rte_bbdev_log_debug("Device %u is already stopped", dev_id);
592 		return 0;
593 	}
594 
595 	if (dev->dev_ops->stop)
596 		dev->dev_ops->stop(dev);
597 	dev->data->started = false;
598 
599 	rte_bbdev_log_debug("Stopped device %u", dev_id);
600 	return 0;
601 }
602 
603 int
604 rte_bbdev_close(uint16_t dev_id)
605 {
606 	int ret;
607 	uint16_t i;
608 	struct rte_bbdev *dev = get_dev(dev_id);
609 	VALID_DEV_OR_RET_ERR(dev, dev_id);
610 
611 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
612 
613 	if (dev->data->started) {
614 		ret = rte_bbdev_stop(dev_id);
615 		if (ret < 0) {
616 			rte_bbdev_log(ERR, "Device %u stop failed", dev_id);
617 			return ret;
618 		}
619 	}
620 
621 	/* Free memory used by queues */
622 	for (i = 0; i < dev->data->num_queues; i++) {
623 		ret = dev->dev_ops->queue_release(dev, i);
624 		if (ret < 0) {
625 			rte_bbdev_log(ERR, "Device %u queue %u release failed",
626 					dev_id, i);
627 			return ret;
628 		}
629 	}
630 	rte_free(dev->data->queues);
631 
632 	if (dev->dev_ops->close) {
633 		ret = dev->dev_ops->close(dev);
634 		if (ret < 0) {
635 			rte_bbdev_log(ERR, "Device %u close failed", dev_id);
636 			return ret;
637 		}
638 	}
639 
640 	/* Clear configuration */
641 	dev->data->queues = NULL;
642 	dev->data->num_queues = 0;
643 
644 	rte_bbdev_log_debug("Closed device %u", dev_id);
645 	return 0;
646 }
647 
648 int
649 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
650 {
651 	struct rte_bbdev *dev = get_dev(dev_id);
652 	VALID_DEV_OR_RET_ERR(dev, dev_id);
653 
654 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
655 
656 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
657 
658 	if (dev->data->queues[queue_id].started) {
659 		rte_bbdev_log_debug("Queue %u of device %u already started",
660 				queue_id, dev_id);
661 		return 0;
662 	}
663 
664 	if (dev->dev_ops->queue_start) {
665 		int ret = dev->dev_ops->queue_start(dev, queue_id);
666 		if (ret < 0) {
667 			rte_bbdev_log(ERR, "Device %u queue %u start failed",
668 					dev_id, queue_id);
669 			return ret;
670 		}
671 	}
672 	dev->data->queues[queue_id].started = true;
673 
674 	rte_bbdev_log_debug("Started queue %u of device %u", queue_id, dev_id);
675 	return 0;
676 }
677 
678 int
679 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
680 {
681 	struct rte_bbdev *dev = get_dev(dev_id);
682 	VALID_DEV_OR_RET_ERR(dev, dev_id);
683 
684 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
685 
686 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
687 
688 	if (!dev->data->queues[queue_id].started) {
689 		rte_bbdev_log_debug("Queue %u of device %u already stopped",
690 				queue_id, dev_id);
691 		return 0;
692 	}
693 
694 	if (dev->dev_ops->queue_stop) {
695 		int ret = dev->dev_ops->queue_stop(dev, queue_id);
696 		if (ret < 0) {
697 			rte_bbdev_log(ERR, "Device %u queue %u stop failed",
698 					dev_id, queue_id);
699 			return ret;
700 		}
701 	}
702 	dev->data->queues[queue_id].started = false;
703 
704 	rte_bbdev_log_debug("Stopped queue %u of device %u", queue_id, dev_id);
705 	return 0;
706 }
707 
708 /* Get device statistics */
709 static void
710 get_stats_from_queues(struct rte_bbdev *dev, struct rte_bbdev_stats *stats)
711 {
712 	unsigned int q_id;
713 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
714 		struct rte_bbdev_stats *q_stats =
715 				&dev->data->queues[q_id].queue_stats;
716 
717 		stats->enqueued_count += q_stats->enqueued_count;
718 		stats->dequeued_count += q_stats->dequeued_count;
719 		stats->enqueue_err_count += q_stats->enqueue_err_count;
720 		stats->dequeue_err_count += q_stats->dequeue_err_count;
721 		stats->enqueue_warn_count += q_stats->enqueue_warn_count;
722 		stats->dequeue_warn_count += q_stats->dequeue_warn_count;
723 	}
724 	rte_bbdev_log_debug("Got stats on %u", dev->data->dev_id);
725 }
726 
727 static void
728 reset_stats_in_queues(struct rte_bbdev *dev)
729 {
730 	unsigned int q_id;
731 	for (q_id = 0; q_id < dev->data->num_queues; q_id++) {
732 		struct rte_bbdev_stats *q_stats =
733 				&dev->data->queues[q_id].queue_stats;
734 
735 		memset(q_stats, 0, sizeof(*q_stats));
736 	}
737 	rte_bbdev_log_debug("Reset stats on %u", dev->data->dev_id);
738 }
739 
740 int
741 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
742 {
743 	struct rte_bbdev *dev = get_dev(dev_id);
744 	VALID_DEV_OR_RET_ERR(dev, dev_id);
745 
746 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
747 
748 	if (stats == NULL) {
749 		rte_bbdev_log(ERR, "NULL stats structure");
750 		return -EINVAL;
751 	}
752 
753 	memset(stats, 0, sizeof(*stats));
754 	if (dev->dev_ops->stats_get != NULL)
755 		dev->dev_ops->stats_get(dev, stats);
756 	else
757 		get_stats_from_queues(dev, stats);
758 
759 	rte_bbdev_log_debug("Retrieved stats of device %u", dev_id);
760 	return 0;
761 }
762 
763 int
764 rte_bbdev_stats_reset(uint16_t dev_id)
765 {
766 	struct rte_bbdev *dev = get_dev(dev_id);
767 	VALID_DEV_OR_RET_ERR(dev, dev_id);
768 
769 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
770 
771 	if (dev->dev_ops->stats_reset != NULL)
772 		dev->dev_ops->stats_reset(dev);
773 	else
774 		reset_stats_in_queues(dev);
775 
776 	rte_bbdev_log_debug("Reset stats of device %u", dev_id);
777 	return 0;
778 }
779 
780 int
781 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
782 {
783 	struct rte_bbdev *dev = get_dev(dev_id);
784 	VALID_DEV_OR_RET_ERR(dev, dev_id);
785 
786 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->info_get, dev_id);
787 
788 	if (dev_info == NULL) {
789 		rte_bbdev_log(ERR, "NULL dev info structure");
790 		return -EINVAL;
791 	}
792 
793 	/* Copy data maintained by device interface layer */
794 	memset(dev_info, 0, sizeof(*dev_info));
795 	dev_info->dev_name = dev->data->name;
796 	dev_info->num_queues = dev->data->num_queues;
797 	dev_info->device = dev->device;
798 	dev_info->socket_id = dev->data->socket_id;
799 	dev_info->started = dev->data->started;
800 
801 	/* Copy data maintained by device driver layer */
802 	dev->dev_ops->info_get(dev, &dev_info->drv);
803 
804 	rte_bbdev_log_debug("Retrieved info of device %u", dev_id);
805 	return 0;
806 }
807 
808 int
809 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
810 		struct rte_bbdev_queue_info *queue_info)
811 {
812 	struct rte_bbdev *dev = get_dev(dev_id);
813 	VALID_DEV_OR_RET_ERR(dev, dev_id);
814 
815 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
816 
817 	if (queue_info == NULL) {
818 		rte_bbdev_log(ERR, "NULL queue info structure");
819 		return -EINVAL;
820 	}
821 
822 	/* Copy data to output */
823 	memset(queue_info, 0, sizeof(*queue_info));
824 	queue_info->conf = dev->data->queues[queue_id].conf;
825 	queue_info->started = dev->data->queues[queue_id].started;
826 
827 	rte_bbdev_log_debug("Retrieved info of queue %u of device %u",
828 			queue_id, dev_id);
829 	return 0;
830 }
831 
832 /* Calculate size needed to store bbdev_op, depending on type */
833 static unsigned int
834 get_bbdev_op_size(enum rte_bbdev_op_type type)
835 {
836 	unsigned int result = 0;
837 	switch (type) {
838 	case RTE_BBDEV_OP_NONE:
839 		result = RTE_MAX(sizeof(struct rte_bbdev_dec_op),
840 				sizeof(struct rte_bbdev_enc_op));
841 		break;
842 	case RTE_BBDEV_OP_TURBO_DEC:
843 		result = sizeof(struct rte_bbdev_dec_op);
844 		break;
845 	case RTE_BBDEV_OP_TURBO_ENC:
846 		result = sizeof(struct rte_bbdev_enc_op);
847 		break;
848 	case RTE_BBDEV_OP_LDPC_DEC:
849 		result = sizeof(struct rte_bbdev_dec_op);
850 		break;
851 	case RTE_BBDEV_OP_LDPC_ENC:
852 		result = sizeof(struct rte_bbdev_enc_op);
853 		break;
854 	case RTE_BBDEV_OP_FFT:
855 		result = sizeof(struct rte_bbdev_fft_op);
856 		break;
857 	case RTE_BBDEV_OP_MLDTS:
858 		result = sizeof(struct rte_bbdev_mldts_op);
859 		break;
860 	default:
861 		break;
862 	}
863 
864 	return result;
865 }
866 
867 /* Initialise a bbdev_op structure */
868 static void
869 bbdev_op_init(struct rte_mempool *mempool, void *arg, void *element,
870 		__rte_unused unsigned int n)
871 {
872 	enum rte_bbdev_op_type type = *(enum rte_bbdev_op_type *)arg;
873 
874 	if (type == RTE_BBDEV_OP_TURBO_DEC || type == RTE_BBDEV_OP_LDPC_DEC) {
875 		struct rte_bbdev_dec_op *op = element;
876 		memset(op, 0, mempool->elt_size);
877 		op->mempool = mempool;
878 	} else if (type == RTE_BBDEV_OP_TURBO_ENC ||
879 			type == RTE_BBDEV_OP_LDPC_ENC) {
880 		struct rte_bbdev_enc_op *op = element;
881 		memset(op, 0, mempool->elt_size);
882 		op->mempool = mempool;
883 	} else if (type == RTE_BBDEV_OP_FFT) {
884 		struct rte_bbdev_fft_op *op = element;
885 		memset(op, 0, mempool->elt_size);
886 		op->mempool = mempool;
887 	} else if (type == RTE_BBDEV_OP_MLDTS) {
888 		struct rte_bbdev_mldts_op *op = element;
889 		memset(op, 0, mempool->elt_size);
890 		op->mempool = mempool;
891 	}
892 }
893 
894 struct rte_mempool *
895 rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
896 		unsigned int num_elements, unsigned int cache_size,
897 		int socket_id)
898 {
899 	struct rte_bbdev_op_pool_private *priv;
900 	struct rte_mempool *mp;
901 	const char *op_type_str;
902 
903 	if (name == NULL) {
904 		rte_bbdev_log(ERR, "NULL name for op pool");
905 		return NULL;
906 	}
907 
908 	if (type >= BBDEV_OP_TYPE_COUNT) {
909 		rte_bbdev_log(ERR,
910 				"Invalid op type (%u), should be less than %u",
911 				type, BBDEV_OP_TYPE_COUNT);
912 		return NULL;
913 	}
914 
915 	mp = rte_mempool_create(name, num_elements, get_bbdev_op_size(type),
916 			cache_size, sizeof(struct rte_bbdev_op_pool_private),
917 			NULL, NULL, bbdev_op_init, &type, socket_id, 0);
918 	if (mp == NULL) {
919 		rte_bbdev_log(ERR,
920 				"Failed to create op pool %s (num ops=%u, op size=%u) with error: %s",
921 				name, num_elements, get_bbdev_op_size(type),
922 				rte_strerror(rte_errno));
923 		return NULL;
924 	}
925 
926 	op_type_str = rte_bbdev_op_type_str(type);
927 	if (op_type_str == NULL)
928 		return NULL;
929 
930 	rte_bbdev_log_debug(
931 			"Op pool %s created for %u ops (type=%s, cache=%u, socket=%u, size=%u)",
932 			name, num_elements, op_type_str, cache_size, socket_id,
933 			get_bbdev_op_size(type));
934 
935 	priv = (struct rte_bbdev_op_pool_private *)rte_mempool_get_priv(mp);
936 	priv->type = type;
937 
938 	return mp;
939 }
940 
941 int
942 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
943 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
944 {
945 	struct rte_bbdev_callback *user_cb;
946 	struct rte_bbdev *dev = get_dev(dev_id);
947 	VALID_DEV_OR_RET_ERR(dev, dev_id);
948 
949 	if (event >= RTE_BBDEV_EVENT_MAX) {
950 		rte_bbdev_log(ERR,
951 				"Invalid event type (%u), should be less than %u",
952 				event, RTE_BBDEV_EVENT_MAX);
953 		return -EINVAL;
954 	}
955 
956 	if (cb_fn == NULL) {
957 		rte_bbdev_log(ERR, "NULL callback function");
958 		return -EINVAL;
959 	}
960 
961 	rte_spinlock_lock(&rte_bbdev_cb_lock);
962 
963 	TAILQ_FOREACH(user_cb, &(dev->list_cbs), next) {
964 		if (user_cb->cb_fn == cb_fn &&
965 				user_cb->cb_arg == cb_arg &&
966 				user_cb->event == event)
967 			break;
968 	}
969 
970 	/* create a new callback. */
971 	if (user_cb == NULL) {
972 		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
973 				sizeof(struct rte_bbdev_callback), 0);
974 		if (user_cb != NULL) {
975 			user_cb->cb_fn = cb_fn;
976 			user_cb->cb_arg = cb_arg;
977 			user_cb->event = event;
978 			TAILQ_INSERT_TAIL(&(dev->list_cbs), user_cb, next);
979 		}
980 	}
981 
982 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
983 	return (user_cb == NULL) ? -ENOMEM : 0;
984 }
985 
986 int
987 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
988 		rte_bbdev_cb_fn cb_fn, void *cb_arg)
989 {
990 	int ret = 0;
991 	struct rte_bbdev_callback *cb, *next;
992 	struct rte_bbdev *dev = get_dev(dev_id);
993 	VALID_DEV_OR_RET_ERR(dev, dev_id);
994 
995 	if (event >= RTE_BBDEV_EVENT_MAX) {
996 		rte_bbdev_log(ERR,
997 				"Invalid event type (%u), should be less than %u",
998 				event, RTE_BBDEV_EVENT_MAX);
999 		return -EINVAL;
1000 	}
1001 
1002 	if (cb_fn == NULL) {
1003 		rte_bbdev_log(ERR,
1004 				"NULL callback function cannot be unregistered");
1005 		return -EINVAL;
1006 	}
1007 
1008 	dev = &rte_bbdev_devices[dev_id];
1009 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1010 
1011 	for (cb = TAILQ_FIRST(&dev->list_cbs); cb != NULL; cb = next) {
1012 
1013 		next = TAILQ_NEXT(cb, next);
1014 
1015 		if (cb->cb_fn != cb_fn || cb->event != event ||
1016 				(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
1017 			continue;
1018 
1019 		/* If this callback is not executing right now, remove it. */
1020 		if (cb->active == 0) {
1021 			TAILQ_REMOVE(&(dev->list_cbs), cb, next);
1022 			rte_free(cb);
1023 		} else
1024 			ret = -EAGAIN;
1025 	}
1026 
1027 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1028 	return ret;
1029 }
1030 
1031 void
1032 rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
1033 	enum rte_bbdev_event_type event, void *ret_param)
1034 {
1035 	struct rte_bbdev_callback *cb_lst;
1036 	struct rte_bbdev_callback dev_cb;
1037 
1038 	if (dev == NULL) {
1039 		rte_bbdev_log(ERR, "NULL device");
1040 		return;
1041 	}
1042 
1043 	if (dev->data == NULL) {
1044 		rte_bbdev_log(ERR, "NULL data structure");
1045 		return;
1046 	}
1047 
1048 	if (event >= RTE_BBDEV_EVENT_MAX) {
1049 		rte_bbdev_log(ERR,
1050 				"Invalid event type (%u), should be less than %u",
1051 				event, RTE_BBDEV_EVENT_MAX);
1052 		return;
1053 	}
1054 
1055 	rte_spinlock_lock(&rte_bbdev_cb_lock);
1056 	TAILQ_FOREACH(cb_lst, &(dev->list_cbs), next) {
1057 		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1058 			continue;
1059 		dev_cb = *cb_lst;
1060 		cb_lst->active = 1;
1061 		if (ret_param != NULL)
1062 			dev_cb.ret_param = ret_param;
1063 
1064 		rte_spinlock_unlock(&rte_bbdev_cb_lock);
1065 		dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1066 				dev_cb.cb_arg, dev_cb.ret_param);
1067 		rte_spinlock_lock(&rte_bbdev_cb_lock);
1068 		cb_lst->active = 0;
1069 	}
1070 	rte_spinlock_unlock(&rte_bbdev_cb_lock);
1071 }
1072 
1073 int
1074 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
1075 {
1076 	struct rte_bbdev *dev = get_dev(dev_id);
1077 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1078 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1079 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1080 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_enable, dev_id);
1081 	return dev->dev_ops->queue_intr_enable(dev, queue_id);
1082 }
1083 
1084 int
1085 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
1086 {
1087 	struct rte_bbdev *dev = get_dev(dev_id);
1088 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1089 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1090 	VALID_DEV_OPS_OR_RET_ERR(dev, dev_id);
1091 	VALID_FUNC_OR_RET_ERR(dev->dev_ops->queue_intr_disable, dev_id);
1092 	return dev->dev_ops->queue_intr_disable(dev, queue_id);
1093 }
1094 
1095 int
1096 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1097 		void *data)
1098 {
1099 	uint32_t vec;
1100 	struct rte_bbdev *dev = get_dev(dev_id);
1101 	struct rte_intr_handle *intr_handle;
1102 	int ret;
1103 
1104 	VALID_DEV_OR_RET_ERR(dev, dev_id);
1105 	VALID_QUEUE_OR_RET_ERR(queue_id, dev);
1106 
1107 	intr_handle = dev->intr_handle;
1108 	if (intr_handle == NULL) {
1109 		rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id);
1110 		return -ENOTSUP;
1111 	}
1112 
1113 	if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) {
1114 		rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n",
1115 				dev_id, queue_id);
1116 		return -ENOTSUP;
1117 	}
1118 
1119 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
1120 	ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
1121 	if (ret && (ret != -EEXIST)) {
1122 		rte_bbdev_log(ERR,
1123 				"dev %u q %u int ctl error op %d epfd %d vec %u\n",
1124 				dev_id, queue_id, op, epfd, vec);
1125 		return ret;
1126 	}
1127 
1128 	return 0;
1129 }
1130 
1131 
1132 const char *
1133 rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
1134 {
1135 	static const char * const op_types[] = {
1136 		"RTE_BBDEV_OP_NONE",
1137 		"RTE_BBDEV_OP_TURBO_DEC",
1138 		"RTE_BBDEV_OP_TURBO_ENC",
1139 		"RTE_BBDEV_OP_LDPC_DEC",
1140 		"RTE_BBDEV_OP_LDPC_ENC",
1141 		"RTE_BBDEV_OP_FFT",
1142 		"RTE_BBDEV_OP_MLDTS",
1143 	};
1144 
1145 	if (op_type < BBDEV_OP_TYPE_COUNT)
1146 		return op_types[op_type];
1147 
1148 	rte_bbdev_log(ERR, "Invalid operation type");
1149 	return NULL;
1150 }
1151 
1152 const char *
1153 rte_bbdev_device_status_str(enum rte_bbdev_device_status status)
1154 {
1155 	static const char * const dev_sta_string[] = {
1156 		"RTE_BBDEV_DEV_NOSTATUS",
1157 		"RTE_BBDEV_DEV_NOT_SUPPORTED",
1158 		"RTE_BBDEV_DEV_RESET",
1159 		"RTE_BBDEV_DEV_CONFIGURED",
1160 		"RTE_BBDEV_DEV_ACTIVE",
1161 		"RTE_BBDEV_DEV_FATAL_ERR",
1162 		"RTE_BBDEV_DEV_RESTART_REQ",
1163 		"RTE_BBDEV_DEV_RECONFIG_REQ",
1164 		"RTE_BBDEV_DEV_CORRECT_ERR",
1165 	};
1166 
1167 	/* Cast from enum required for clang. */
1168 	if ((uint8_t)status < sizeof(dev_sta_string) / sizeof(char *))
1169 		return dev_sta_string[status];
1170 
1171 	rte_bbdev_log(ERR, "Invalid device status");
1172 	return NULL;
1173 }
1174 
1175 const char *
1176 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status)
1177 {
1178 	static const char * const enq_sta_string[] = {
1179 		"RTE_BBDEV_ENQ_STATUS_NONE",
1180 		"RTE_BBDEV_ENQ_STATUS_QUEUE_FULL",
1181 		"RTE_BBDEV_ENQ_STATUS_RING_FULL",
1182 		"RTE_BBDEV_ENQ_STATUS_INVALID_OP",
1183 	};
1184 
1185 	/* Cast from enum required for clang. */
1186 	if ((uint8_t)status < sizeof(enq_sta_string) / sizeof(char *))
1187 		return enq_sta_string[status];
1188 
1189 	rte_bbdev_log(ERR, "Invalid enqueue status");
1190 	return NULL;
1191 }
1192