xref: /dpdk/lib/bbdev/rte_bbdev.h (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * @warning
14  * @b EXPERIMENTAL:
15  * All functions in this file may be changed or removed without prior notice.
16  *
17  * This API allows an application to discover, configure and use a device to
18  * process operations. An asynchronous API (enqueue, followed by later dequeue)
19  * is used for processing operations.
20  *
21  * The functions in this API are not thread-safe when called on the same
22  * target object (a device, or a queue on a device), with the exception that
23  * one thread can enqueue operations to a queue while another thread dequeues
24  * from the same queue.
25  */
26 
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30 
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <string.h>
34 
35 #include <rte_compat.h>
36 #include <rte_bus.h>
37 #include <rte_cpuflags.h>
38 #include <rte_memory.h>
39 
40 #include "rte_bbdev_op.h"
41 
42 #ifndef RTE_BBDEV_MAX_DEVS
43 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
44 #endif
45 
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
48 	RTE_BBDEV_UNUSED,
49 	RTE_BBDEV_INITIALIZED
50 };
51 
52 /**
53  * Get the total number of devices that have been successfully initialised.
54  *
55  * @return
56  *   The total number of usable devices.
57  */
58 __rte_experimental
59 uint16_t
60 rte_bbdev_count(void);
61 
62 /**
63  * Check if a device is valid.
64  *
65  * @param dev_id
66  *   The identifier of the device.
67  *
68  * @return
69  *   true if device ID is valid and device is attached, false otherwise.
70  */
71 __rte_experimental
72 bool
73 rte_bbdev_is_valid(uint16_t dev_id);
74 
75 /**
76  * Get the next enabled device.
77  *
78  * @param dev_id
79  *   The current device
80  *
81  * @return
82  *   - The next device, or
83  *   - RTE_BBDEV_MAX_DEVS if none found
84  */
85 __rte_experimental
86 uint16_t
87 rte_bbdev_find_next(uint16_t dev_id);
88 
89 /** Iterate through all enabled devices */
90 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
91 		i < RTE_BBDEV_MAX_DEVS; \
92 		i = rte_bbdev_find_next(i))
93 
94 /**
95  * Setup up device queues.
96  * This function must be called on a device before setting up the queues and
97  * starting the device. It can also be called when a device is in the stopped
98  * state. If any device queues have been configured their configuration will be
99  * cleared by a call to this function.
100  *
101  * @param dev_id
102  *   The identifier of the device.
103  * @param num_queues
104  *   Number of queues to configure on device.
105  * @param socket_id
106  *   ID of a socket which will be used to allocate memory.
107  *
108  * @return
109  *   - 0 on success
110  *   - -ENODEV if dev_id is invalid or the device is corrupted
111  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
112  *   - -EBUSY if the identified device has already started
113  *   - -ENOMEM if unable to allocate memory
114  */
115 __rte_experimental
116 int
117 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
118 
119 /**
120  * Enable interrupts.
121  * This function may be called before starting the device to enable the
122  * interrupts if they are available.
123  *
124  * @param dev_id
125  *   The identifier of the device.
126  *
127  * @return
128  *   - 0 on success
129  *   - -ENODEV if dev_id is invalid or the device is corrupted
130  *   - -EBUSY if the identified device has already started
131  *   - -ENOTSUP if the interrupts are not supported by the device
132  */
133 __rte_experimental
134 int
135 rte_bbdev_intr_enable(uint16_t dev_id);
136 
137 /** Device queue configuration structure */
138 struct rte_bbdev_queue_conf {
139 	int socket;  /**< NUMA socket used for memory allocation */
140 	uint32_t queue_size;  /**< Size of queue */
141 	uint8_t priority;  /**< Queue priority */
142 	bool deferred_start; /**< Do not start queue when device is started. */
143 	enum rte_bbdev_op_type op_type; /**< Operation type */
144 };
145 
146 /**
147  * Configure a queue on a device.
148  * This function can be called after device configuration, and before starting.
149  * It can also be called when the device or the queue is in the stopped state.
150  *
151  * @param dev_id
152  *   The identifier of the device.
153  * @param queue_id
154  *   The index of the queue.
155  * @param conf
156  *   The queue configuration. If NULL, a default configuration will be used.
157  *
158  * @return
159  *   - 0 on success
160  *   - EINVAL if the identified queue size or priority are invalid
161  *   - EBUSY if the identified queue or its device have already started
162  */
163 __rte_experimental
164 int
165 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
166 		const struct rte_bbdev_queue_conf *conf);
167 
168 /**
169  * Start a device.
170  * This is the last step needed before enqueueing operations is possible.
171  *
172  * @param dev_id
173  *   The identifier of the device.
174  *
175  * @return
176  *   - 0 on success
177  *   - negative value on failure - as returned from PMD driver
178  */
179 __rte_experimental
180 int
181 rte_bbdev_start(uint16_t dev_id);
182 
183 /**
184  * Stop a device.
185  * The device can be reconfigured, and restarted after being stopped.
186  *
187  * @param dev_id
188  *   The identifier of the device.
189  *
190  * @return
191  *   - 0 on success
192  */
193 __rte_experimental
194 int
195 rte_bbdev_stop(uint16_t dev_id);
196 
197 /**
198  * Close a device.
199  * The device cannot be restarted without reconfiguration!
200  *
201  * @param dev_id
202  *   The identifier of the device.
203  *
204  * @return
205  *   - 0 on success
206  */
207 __rte_experimental
208 int
209 rte_bbdev_close(uint16_t dev_id);
210 
211 /**
212  * Start a specified queue on a device.
213  * This is only needed if the queue has been stopped, or if the deferred_start
214  * flag has been set when configuring the queue.
215  *
216  * @param dev_id
217  *   The identifier of the device.
218  * @param queue_id
219  *   The index of the queue.
220  *
221  * @return
222  *   - 0 on success
223  *   - negative value on failure - as returned from PMD driver
224  */
225 __rte_experimental
226 int
227 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
228 
229 /**
230  * Stop a specified queue on a device, to allow re configuration.
231  *
232  * @param dev_id
233  *   The identifier of the device.
234  * @param queue_id
235  *   The index of the queue.
236  *
237  * @return
238  *   - 0 on success
239  *   - negative value on failure - as returned from PMD driver
240  */
241 __rte_experimental
242 int
243 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
244 
245 /** Device statistics. */
246 struct rte_bbdev_stats {
247 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
248 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
249 	/** Total error count on operations enqueued */
250 	uint64_t enqueue_err_count;
251 	/** Total error count on operations dequeued */
252 	uint64_t dequeue_err_count;
253 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
254 	 *  the enqueue request to its internal queues.
255 	 *  - For a HW device this is the cycles consumed in MMIO write
256 	 *  - For a SW (vdev) device, this is the processing time of the
257 	 *     bbdev operation
258 	 */
259 	uint64_t acc_offload_cycles;
260 };
261 
262 /**
263  * Retrieve the general I/O statistics of a device.
264  *
265  * @param dev_id
266  *   The identifier of the device.
267  * @param stats
268  *   Pointer to structure to where statistics will be copied. On error, this
269  *   location may or may not have been modified.
270  *
271  * @return
272  *   - 0 on success
273  *   - EINVAL if invalid parameter pointer is provided
274  */
275 __rte_experimental
276 int
277 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
278 
279 /**
280  * Reset the statistics of a device.
281  *
282  * @param dev_id
283  *   The identifier of the device.
284  * @return
285  *   - 0 on success
286  */
287 __rte_experimental
288 int
289 rte_bbdev_stats_reset(uint16_t dev_id);
290 
291 /** Device information supplied by the device's driver */
292 struct rte_bbdev_driver_info {
293 	/** Driver name */
294 	const char *driver_name;
295 
296 	/** Maximum number of queues supported by the device */
297 	unsigned int max_num_queues;
298 	/** Queue size limit (queue size must also be power of 2) */
299 	uint32_t queue_size_lim;
300 	/** Set if device off-loads operation to hardware  */
301 	bool hardware_accelerated;
302 	/** Max value supported by queue priority for DL */
303 	uint8_t max_dl_queue_priority;
304 	/** Max value supported by queue priority for UL */
305 	uint8_t max_ul_queue_priority;
306 	/** Set if device supports per-queue interrupts */
307 	bool queue_intr_supported;
308 	/** Minimum alignment of buffers, in bytes */
309 	uint16_t min_alignment;
310 	/** HARQ memory available in kB */
311 	uint32_t harq_buffer_size;
312 	/** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
313 	 *  for input/output data
314 	 */
315 	uint8_t data_endianness;
316 	/** Default queue configuration used if none is supplied  */
317 	struct rte_bbdev_queue_conf default_queue_conf;
318 	/** Device operation capabilities */
319 	const struct rte_bbdev_op_cap *capabilities;
320 	/** Device cpu_flag requirements */
321 	const enum rte_cpu_flag_t *cpu_flag_reqs;
322 };
323 
324 /** Macro used at end of bbdev PMD list */
325 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
326 	{ RTE_BBDEV_OP_NONE }
327 
328 /**
329  * Device information structure used by an application to discover a devices
330  * capabilities and current configuration
331  */
332 struct rte_bbdev_info {
333 	int socket_id;  /**< NUMA socket that device is on */
334 	const char *dev_name;  /**< Unique device name */
335 	const struct rte_device *device; /**< Device Information */
336 	uint16_t num_queues;  /**< Number of queues currently configured */
337 	bool started;  /**< Set if device is currently started */
338 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
339 };
340 
341 /**
342  * Retrieve information about a device.
343  *
344  * @param dev_id
345  *   The identifier of the device.
346  * @param dev_info
347  *   Pointer to structure to where information will be copied. On error, this
348  *   location may or may not have been modified.
349  *
350  * @return
351  *   - 0 on success
352  *   - EINVAL if invalid parameter pointer is provided
353  */
354 __rte_experimental
355 int
356 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
357 
358 /** Queue information */
359 struct rte_bbdev_queue_info {
360 	/** Current device configuration */
361 	struct rte_bbdev_queue_conf conf;
362 	/** Set if queue is currently started */
363 	bool started;
364 };
365 
366 /**
367  * Retrieve information about a specific queue on a device.
368  *
369  * @param dev_id
370  *   The identifier of the device.
371  * @param queue_id
372  *   The index of the queue.
373  * @param queue_info
374  *   Pointer to structure to where information will be copied. On error, this
375  *   location may or may not have been modified.
376  *
377  * @return
378  *   - 0 on success
379  *   - EINVAL if invalid parameter pointer is provided
380  */
381 __rte_experimental
382 int
383 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
384 		struct rte_bbdev_queue_info *queue_info);
385 
386 /** @internal The data structure associated with each queue of a device. */
387 struct rte_bbdev_queue_data {
388 	void *queue_private;  /**< Driver-specific per-queue data */
389 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
390 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
391 	bool started;  /**< Queue state */
392 };
393 
394 /** @internal Enqueue encode operations for processing on queue of a device. */
395 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
396 		struct rte_bbdev_queue_data *q_data,
397 		struct rte_bbdev_enc_op **ops,
398 		uint16_t num);
399 
400 /** @internal Enqueue decode operations for processing on queue of a device. */
401 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
402 		struct rte_bbdev_queue_data *q_data,
403 		struct rte_bbdev_dec_op **ops,
404 		uint16_t num);
405 
406 /** @internal Dequeue encode operations from a queue of a device. */
407 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
408 		struct rte_bbdev_queue_data *q_data,
409 		struct rte_bbdev_enc_op **ops, uint16_t num);
410 
411 /** @internal Dequeue decode operations from a queue of a device. */
412 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
413 		struct rte_bbdev_queue_data *q_data,
414 		struct rte_bbdev_dec_op **ops, uint16_t num);
415 
416 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
417 
418 /**
419  * @internal The data associated with a device, with no function pointers.
420  * This structure is safe to place in shared memory to be common among
421  * different processes in a multi-process configuration. Drivers can access
422  * these fields, but should never write to them!
423  */
424 struct rte_bbdev_data {
425 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
426 	void *dev_private;  /**< Driver-specific private data */
427 	uint16_t num_queues;  /**< Number of currently configured queues */
428 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
429 	uint16_t dev_id;  /**< Device ID */
430 	int socket_id;  /**< NUMA socket that device is on */
431 	bool started;  /**< Device run-time state */
432 	uint16_t process_cnt;  /** Counter of processes using the device */
433 };
434 
435 /* Forward declarations */
436 struct rte_bbdev_ops;
437 struct rte_bbdev_callback;
438 struct rte_intr_handle;
439 
440 /** Structure to keep track of registered callbacks */
441 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
442 
443 /**
444  * @internal The data structure associated with a device. Drivers can access
445  * these fields, but should only write to the *_ops fields.
446  */
447 struct __rte_cache_aligned rte_bbdev {
448 	/** Enqueue encode function */
449 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
450 	/** Enqueue decode function */
451 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
452 	/** Dequeue encode function */
453 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
454 	/** Dequeue decode function */
455 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
456 	/** Enqueue encode function */
457 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
458 	/** Enqueue decode function */
459 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
460 	/** Dequeue encode function */
461 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
462 	/** Dequeue decode function */
463 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
464 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
465 	struct rte_bbdev_data *data;  /**< Pointer to device data */
466 	enum rte_bbdev_state state;  /**< If device is currently used or not */
467 	struct rte_device *device; /**< Backing device */
468 	/** User application callback for interrupts if present */
469 	struct rte_bbdev_cb_list list_cbs;
470 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
471 };
472 
473 /** @internal array of all devices */
474 extern struct rte_bbdev rte_bbdev_devices[];
475 
476 /**
477  * Enqueue a burst of processed encode operations to a queue of the device.
478  * This functions only enqueues as many operations as currently possible and
479  * does not block until @p num_ops entries in the queue are available.
480  * This function does not provide any error notification to avoid the
481  * corresponding overhead.
482  *
483  * @param dev_id
484  *   The identifier of the device.
485  * @param queue_id
486  *   The index of the queue.
487  * @param ops
488  *   Pointer array containing operations to be enqueued Must have at least
489  *   @p num_ops entries
490  * @param num_ops
491  *   The maximum number of operations to enqueue.
492  *
493  * @return
494  *   The number of operations actually enqueued (this is the number of processed
495  *   entries in the @p ops array).
496  */
497 __rte_experimental
498 static inline uint16_t
499 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
500 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
501 {
502 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
503 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
504 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
505 }
506 
507 /**
508  * Enqueue a burst of processed decode operations to a queue of the device.
509  * This functions only enqueues as many operations as currently possible and
510  * does not block until @p num_ops entries in the queue are available.
511  * This function does not provide any error notification to avoid the
512  * corresponding overhead.
513  *
514  * @param dev_id
515  *   The identifier of the device.
516  * @param queue_id
517  *   The index of the queue.
518  * @param ops
519  *   Pointer array containing operations to be enqueued Must have at least
520  *   @p num_ops entries
521  * @param num_ops
522  *   The maximum number of operations to enqueue.
523  *
524  * @return
525  *   The number of operations actually enqueued (this is the number of processed
526  *   entries in the @p ops array).
527  */
528 __rte_experimental
529 static inline uint16_t
530 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
531 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
532 {
533 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
534 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
535 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
536 }
537 
538 /**
539  * Enqueue a burst of processed encode operations to a queue of the device.
540  * This functions only enqueues as many operations as currently possible and
541  * does not block until @p num_ops entries in the queue are available.
542  * This function does not provide any error notification to avoid the
543  * corresponding overhead.
544  *
545  * @param dev_id
546  *   The identifier of the device.
547  * @param queue_id
548  *   The index of the queue.
549  * @param ops
550  *   Pointer array containing operations to be enqueued Must have at least
551  *   @p num_ops entries
552  * @param num_ops
553  *   The maximum number of operations to enqueue.
554  *
555  * @return
556  *   The number of operations actually enqueued (this is the number of processed
557  *   entries in the @p ops array).
558  */
559 __rte_experimental
560 static inline uint16_t
561 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
562 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
563 {
564 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
565 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
566 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
567 }
568 
569 /**
570  * Enqueue a burst of processed decode operations to a queue of the device.
571  * This functions only enqueues as many operations as currently possible and
572  * does not block until @p num_ops entries in the queue are available.
573  * This function does not provide any error notification to avoid the
574  * corresponding overhead.
575  *
576  * @param dev_id
577  *   The identifier of the device.
578  * @param queue_id
579  *   The index of the queue.
580  * @param ops
581  *   Pointer array containing operations to be enqueued Must have at least
582  *   @p num_ops entries
583  * @param num_ops
584  *   The maximum number of operations to enqueue.
585  *
586  * @return
587  *   The number of operations actually enqueued (this is the number of processed
588  *   entries in the @p ops array).
589  */
590 __rte_experimental
591 static inline uint16_t
592 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
593 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
594 {
595 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
596 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
597 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
598 }
599 
600 
601 /**
602  * Dequeue a burst of processed encode operations from a queue of the device.
603  * This functions returns only the current contents of the queue, and does not
604  * block until @ num_ops is available.
605  * This function does not provide any error notification to avoid the
606  * corresponding overhead.
607  *
608  * @param dev_id
609  *   The identifier of the device.
610  * @param queue_id
611  *   The index of the queue.
612  * @param ops
613  *   Pointer array where operations will be dequeued to. Must have at least
614  *   @p num_ops entries
615  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
616  * @param num_ops
617  *   The maximum number of operations to dequeue.
618  *
619  * @return
620  *   The number of operations actually dequeued (this is the number of entries
621  *   copied into the @p ops array).
622  */
623 __rte_experimental
624 static inline uint16_t
625 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
626 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
627 {
628 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
629 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
630 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
631 }
632 
633 /**
634  * Dequeue a burst of processed decode operations from a queue of the device.
635  * This functions returns only the current contents of the queue, and does not
636  * block until @ num_ops is available.
637  * This function does not provide any error notification to avoid the
638  * corresponding overhead.
639  *
640  * @param dev_id
641  *   The identifier of the device.
642  * @param queue_id
643  *   The index of the queue.
644  * @param ops
645  *   Pointer array where operations will be dequeued to. Must have at least
646  *   @p num_ops entries
647  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
648  * @param num_ops
649  *   The maximum number of operations to dequeue.
650  *
651  * @return
652  *   The number of operations actually dequeued (this is the number of entries
653  *   copied into the @p ops array).
654  */
655 
656 __rte_experimental
657 static inline uint16_t
658 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
659 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
660 {
661 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
662 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
663 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
664 }
665 
666 
667 /**
668  * Dequeue a burst of processed encode operations from a queue of the device.
669  * This functions returns only the current contents of the queue, and does not
670  * block until @ num_ops is available.
671  * This function does not provide any error notification to avoid the
672  * corresponding overhead.
673  *
674  * @param dev_id
675  *   The identifier of the device.
676  * @param queue_id
677  *   The index of the queue.
678  * @param ops
679  *   Pointer array where operations will be dequeued to. Must have at least
680  *   @p num_ops entries
681  * @param num_ops
682  *   The maximum number of operations to dequeue.
683  *
684  * @return
685  *   The number of operations actually dequeued (this is the number of entries
686  *   copied into the @p ops array).
687  */
688 __rte_experimental
689 static inline uint16_t
690 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
691 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
692 {
693 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
694 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
695 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
696 }
697 
698 /**
699  * Dequeue a burst of processed decode operations from a queue of the device.
700  * This functions returns only the current contents of the queue, and does not
701  * block until @ num_ops is available.
702  * This function does not provide any error notification to avoid the
703  * corresponding overhead.
704  *
705  * @param dev_id
706  *   The identifier of the device.
707  * @param queue_id
708  *   The index of the queue.
709  * @param ops
710  *   Pointer array where operations will be dequeued to. Must have at least
711  *   @p num_ops entries
712  * @param num_ops
713  *   The maximum number of operations to dequeue.
714  *
715  * @return
716  *   The number of operations actually dequeued (this is the number of entries
717  *   copied into the @p ops array).
718  */
719 __rte_experimental
720 static inline uint16_t
721 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
722 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
723 {
724 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
725 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
726 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
727 }
728 
729 /** Definitions of device event types */
730 enum rte_bbdev_event_type {
731 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
732 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
733 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
734 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
735 };
736 
737 /**
738  * Typedef for application callback function registered by application
739  * software for notification of device events
740  *
741  * @param dev_id
742  *   Device identifier
743  * @param event
744  *   Device event to register for notification of.
745  * @param cb_arg
746  *   User specified parameter to be passed to user's callback function.
747  * @param ret_param
748  *   To pass data back to user application.
749  */
750 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
751 		enum rte_bbdev_event_type event, void *cb_arg,
752 		void *ret_param);
753 
754 /**
755  * Register a callback function for specific device id. Multiple callbacks can
756  * be added and will be called in the order they are added when an event is
757  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
758  *
759  * @param dev_id
760  *   Device id.
761  * @param event
762  *   The event that the callback will be registered for.
763  * @param cb_fn
764  *   User supplied callback function to be called.
765  * @param cb_arg
766  *   Pointer to parameter that will be passed to the callback.
767  *
768  * @return
769  *   Zero on success, negative value on failure.
770  */
771 __rte_experimental
772 int
773 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
774 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
775 
776 /**
777  * Unregister a callback function for specific device id.
778  *
779  * @param dev_id
780  *   The device identifier.
781  * @param event
782  *   The event that the callback will be unregistered for.
783  * @param cb_fn
784  *   User supplied callback function to be unregistered.
785  * @param cb_arg
786  *   Pointer to the parameter supplied when registering the callback.
787  *   (void *)-1 means to remove all registered callbacks with the specified
788  *   function address.
789  *
790  * @return
791  *   - 0 on success
792  *   - EINVAL if invalid parameter pointer is provided
793  *   - EAGAIN if the provided callback pointer does not exist
794  */
795 __rte_experimental
796 int
797 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
798 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
799 
800 /**
801  * Enable a one-shot interrupt on the next operation enqueued to a particular
802  * queue. The interrupt will be triggered when the operation is ready to be
803  * dequeued. To handle the interrupt, an epoll file descriptor must be
804  * registered using rte_bbdev_queue_intr_ctl(), and then an application
805  * thread/lcore can wait for the interrupt using rte_epoll_wait().
806  *
807  * @param dev_id
808  *   The device identifier.
809  * @param queue_id
810  *   The index of the queue.
811  *
812  * @return
813  *   - 0 on success
814  *   - negative value on failure - as returned from PMD driver
815  */
816 __rte_experimental
817 int
818 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
819 
820 /**
821  * Disable a one-shot interrupt on the next operation enqueued to a particular
822  * queue (if it has been enabled).
823  *
824  * @param dev_id
825  *   The device identifier.
826  * @param queue_id
827  *   The index of the queue.
828  *
829  * @return
830  *   - 0 on success
831  *   - negative value on failure - as returned from PMD driver
832  */
833 __rte_experimental
834 int
835 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
836 
837 /**
838  * Control interface for per-queue interrupts.
839  *
840  * @param dev_id
841  *   The device identifier.
842  * @param queue_id
843  *   The index of the queue.
844  * @param epfd
845  *   Epoll file descriptor that will be associated with the interrupt source.
846  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
847  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
848  *   be used when calling rte_epoll_wait()).
849  * @param op
850  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
851  *   RTE_INTR_EVENT_DEL.
852  * @param data
853  *   User context, that will be returned in the epdata.data field of the
854  *   rte_epoll_event structure filled in by rte_epoll_wait().
855  *
856  * @return
857  *   - 0 on success
858  *   - ENOTSUP if interrupts are not supported by the identified device
859  *   - negative value on failure - as returned from PMD driver
860  */
861 __rte_experimental
862 int
863 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
864 		void *data);
865 
866 #ifdef __cplusplus
867 }
868 #endif
869 
870 #endif /* _RTE_BBDEV_H_ */
871