xref: /dpdk/lib/bbdev/rte_bbdev.h (revision 60531a2c53f4d2b4b96ebb10ca813f62d0a5508d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * This API allows an application to discover, configure and use a device to
14  * process operations. An asynchronous API (enqueue, followed by later dequeue)
15  * is used for processing operations.
16  *
17  * The functions in this API are not thread-safe when called on the same
18  * target object (a device, or a queue on a device), with the exception that
19  * one thread can enqueue operations to a queue while another thread dequeues
20  * from the same queue.
21  */
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26 
27 #include <stdint.h>
28 #include <stdbool.h>
29 
30 #include <rte_compat.h>
31 #include <rte_cpuflags.h>
32 
33 #include "rte_bbdev_op.h"
34 
35 #ifndef RTE_BBDEV_MAX_DEVS
36 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
37 #endif
38 
39 /*
40  * Maximum size to be used to manage the enum rte_bbdev_enqueue_status
41  * including padding for future enum insertion.
42  * The enum values must be explicitly kept smaller or equal to this padded maximum size.
43  */
44 #define RTE_BBDEV_ENQ_STATUS_SIZE_MAX 6
45 
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
48 	RTE_BBDEV_UNUSED,
49 	RTE_BBDEV_INITIALIZED
50 };
51 
52 /**
53  * Get the total number of devices that have been successfully initialised.
54  *
55  * @return
56  *   The total number of usable devices.
57  */
58 uint16_t
59 rte_bbdev_count(void);
60 
61 /**
62  * Check if a device is valid.
63  *
64  * @param dev_id
65  *   The identifier of the device.
66  *
67  * @return
68  *   true if device ID is valid and device is attached, false otherwise.
69  */
70 bool
71 rte_bbdev_is_valid(uint16_t dev_id);
72 
73 /**
74  * Get the next enabled device.
75  *
76  * @param dev_id
77  *   The current device
78  *
79  * @return
80  *   - The next device, or
81  *   - RTE_BBDEV_MAX_DEVS if none found
82  */
83 uint16_t
84 rte_bbdev_find_next(uint16_t dev_id);
85 
86 /** Iterate through all enabled devices */
87 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
88 		i < RTE_BBDEV_MAX_DEVS; \
89 		i = rte_bbdev_find_next(i))
90 
91 /**
92  * Setup up device queues.
93  * This function must be called on a device before setting up the queues and
94  * starting the device. It can also be called when a device is in the stopped
95  * state. If any device queues have been configured their configuration will be
96  * cleared by a call to this function.
97  *
98  * @param dev_id
99  *   The identifier of the device.
100  * @param num_queues
101  *   Number of queues to configure on device.
102  * @param socket_id
103  *   ID of a socket which will be used to allocate memory.
104  *
105  * @return
106  *   - 0 on success
107  *   - -ENODEV if dev_id is invalid or the device is corrupted
108  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
109  *   - -EBUSY if the identified device has already started
110  *   - -ENOMEM if unable to allocate memory
111  */
112 int
113 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
114 
115 /**
116  * Enable interrupts.
117  * This function may be called before starting the device to enable the
118  * interrupts if they are available.
119  *
120  * @param dev_id
121  *   The identifier of the device.
122  *
123  * @return
124  *   - 0 on success
125  *   - -ENODEV if dev_id is invalid or the device is corrupted
126  *   - -EBUSY if the identified device has already started
127  *   - -ENOTSUP if the interrupts are not supported by the device
128  */
129 int
130 rte_bbdev_intr_enable(uint16_t dev_id);
131 
132 /** Device queue configuration structure */
133 struct rte_bbdev_queue_conf {
134 	int socket;  /**< NUMA socket used for memory allocation */
135 	uint32_t queue_size;  /**< Size of queue */
136 	uint8_t priority;  /**< Queue priority */
137 	bool deferred_start; /**< Do not start queue when device is started. */
138 	enum rte_bbdev_op_type op_type; /**< Operation type */
139 };
140 
141 /**
142  * Configure a queue on a device.
143  * This function can be called after device configuration, and before starting.
144  * It can also be called when the device or the queue is in the stopped state.
145  *
146  * @param dev_id
147  *   The identifier of the device.
148  * @param queue_id
149  *   The index of the queue.
150  * @param conf
151  *   The queue configuration. If NULL, a default configuration will be used.
152  *
153  * @return
154  *   - 0 on success
155  *   - EINVAL if the identified queue size or priority are invalid
156  *   - EBUSY if the identified queue or its device have already started
157  */
158 int
159 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
160 		const struct rte_bbdev_queue_conf *conf);
161 
162 /**
163  * Start a device.
164  * This is the last step needed before enqueueing operations is possible.
165  *
166  * @param dev_id
167  *   The identifier of the device.
168  *
169  * @return
170  *   - 0 on success
171  *   - negative value on failure - as returned from PMD
172  */
173 int
174 rte_bbdev_start(uint16_t dev_id);
175 
176 /**
177  * Stop a device.
178  * The device can be reconfigured, and restarted after being stopped.
179  *
180  * @param dev_id
181  *   The identifier of the device.
182  *
183  * @return
184  *   - 0 on success
185  */
186 int
187 rte_bbdev_stop(uint16_t dev_id);
188 
189 /**
190  * Close a device.
191  * The device cannot be restarted without reconfiguration!
192  *
193  * @param dev_id
194  *   The identifier of the device.
195  *
196  * @return
197  *   - 0 on success
198  */
199 int
200 rte_bbdev_close(uint16_t dev_id);
201 
202 /**
203  * Start a specified queue on a device.
204  * This is only needed if the queue has been stopped, or if the deferred_start
205  * flag has been set when configuring the queue.
206  *
207  * @param dev_id
208  *   The identifier of the device.
209  * @param queue_id
210  *   The index of the queue.
211  *
212  * @return
213  *   - 0 on success
214  *   - negative value on failure - as returned from PMD
215  */
216 int
217 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
218 
219 /**
220  * Stop a specified queue on a device, to allow re configuration.
221  *
222  * @param dev_id
223  *   The identifier of the device.
224  * @param queue_id
225  *   The index of the queue.
226  *
227  * @return
228  *   - 0 on success
229  *   - negative value on failure - as returned from PMD
230  */
231 int
232 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
233 
234 /**
235  * Flags to indicate the reason why a previous enqueue may not have
236  * consumed all requested operations.
237  * In case of multiple reasons the latter supersedes a previous one.
238  * The related macro RTE_BBDEV_ENQ_STATUS_SIZE_MAX can be used
239  * as an absolute maximum for notably sizing array
240  * while allowing for future enumeration insertion.
241  */
242 enum rte_bbdev_enqueue_status {
243 	RTE_BBDEV_ENQ_STATUS_NONE,             /**< Nothing to report. */
244 	RTE_BBDEV_ENQ_STATUS_QUEUE_FULL,       /**< Not enough room in queue. */
245 	RTE_BBDEV_ENQ_STATUS_RING_FULL,        /**< Not enough room in ring. */
246 	RTE_BBDEV_ENQ_STATUS_INVALID_OP,       /**< Operation was rejected as invalid. */
247 	/* Note: RTE_BBDEV_ENQ_STATUS_SIZE_MAX must be larger or equal to maximum enum value. */
248 };
249 
250 /**
251  * Flags to indicate the status of the device.
252  */
253 enum rte_bbdev_device_status {
254 	RTE_BBDEV_DEV_NOSTATUS,        /**< Nothing being reported. */
255 	RTE_BBDEV_DEV_NOT_SUPPORTED,   /**< Device status is not supported on the PMD. */
256 	RTE_BBDEV_DEV_RESET,           /**< Device in reset and un-configured state. */
257 	RTE_BBDEV_DEV_CONFIGURED,      /**< Device is configured and ready to use. */
258 	RTE_BBDEV_DEV_ACTIVE,          /**< Device is configured and VF is being used. */
259 	RTE_BBDEV_DEV_FATAL_ERR,       /**< Device has hit a fatal uncorrectable error. */
260 	RTE_BBDEV_DEV_RESTART_REQ,     /**< Device requires application to restart. */
261 	RTE_BBDEV_DEV_RECONFIG_REQ,    /**< Device requires application to reconfigure queues. */
262 	RTE_BBDEV_DEV_CORRECT_ERR,     /**< Warning of a correctable error event happened. */
263 };
264 
265 /** Device statistics. */
266 struct rte_bbdev_stats {
267 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
268 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
269 	/** Total error count on operations enqueued */
270 	uint64_t enqueue_err_count;
271 	/** Total error count on operations dequeued */
272 	uint64_t dequeue_err_count;
273 	/** Total warning count on operations enqueued. */
274 	uint64_t enqueue_warn_count;
275 	/** Total warning count on operations dequeued. */
276 	uint64_t dequeue_warn_count;
277 	/** Total enqueue status count based on *rte_bbdev_enqueue_status* enum. */
278 	uint64_t enqueue_status_count[RTE_BBDEV_ENQ_STATUS_SIZE_MAX];
279 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
280 	 *  the enqueue request to its internal queues.
281 	 *  - For a HW device this is the cycles consumed in MMIO write
282 	 *  - For a SW (vdev) device, this is the processing time of the
283 	 *     bbdev operation
284 	 */
285 	uint64_t acc_offload_cycles;
286 };
287 
288 /**
289  * Retrieve the general I/O statistics of a device.
290  *
291  * @param dev_id
292  *   The identifier of the device.
293  * @param stats
294  *   Pointer to structure to where statistics will be copied. On error, this
295  *   location may or may not have been modified.
296  *
297  * @return
298  *   - 0 on success
299  *   - EINVAL if invalid parameter pointer is provided
300  */
301 int
302 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
303 
304 /**
305  * Reset the statistics of a device.
306  *
307  * @param dev_id
308  *   The identifier of the device.
309  * @return
310  *   - 0 on success
311  */
312 int
313 rte_bbdev_stats_reset(uint16_t dev_id);
314 
315 /** Device information supplied by the device's driver */
316 
317 /* Structure rte_bbdev_driver_info 8< */
318 struct rte_bbdev_driver_info {
319 	/** Driver name */
320 	const char *driver_name;
321 
322 	/** Maximum number of queues supported by the device */
323 	unsigned int max_num_queues;
324 	/** Maximum number of queues supported per operation type */
325 	unsigned int num_queues[RTE_BBDEV_OP_TYPE_SIZE_MAX];
326 	/** Priority level supported per operation type */
327 	unsigned int queue_priority[RTE_BBDEV_OP_TYPE_SIZE_MAX];
328 	/** Queue size limit (queue size must also be power of 2) */
329 	uint32_t queue_size_lim;
330 	/** Set if device off-loads operation to hardware  */
331 	bool hardware_accelerated;
332 	/** Max value supported by queue priority for DL */
333 	uint8_t max_dl_queue_priority;
334 	/** Max value supported by queue priority for UL */
335 	uint8_t max_ul_queue_priority;
336 	/** Set if device supports per-queue interrupts */
337 	bool queue_intr_supported;
338 	/** Device Status */
339 	enum rte_bbdev_device_status device_status;
340 	/** HARQ memory available in kB */
341 	uint32_t harq_buffer_size;
342 	/** Minimum alignment of buffers, in bytes */
343 	uint16_t min_alignment;
344 	/** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
345 	 *  for input/output data
346 	 */
347 	uint8_t data_endianness;
348 	/** Default queue configuration used if none is supplied  */
349 	struct rte_bbdev_queue_conf default_queue_conf;
350 	/** Device operation capabilities */
351 	const struct rte_bbdev_op_cap *capabilities;
352 	/** Device cpu_flag requirements */
353 	const enum rte_cpu_flag_t *cpu_flag_reqs;
354 	/** FFT windowing width for 2048 FFT - size defined in capability. */
355 	uint16_t *fft_window_width;
356 };
357 /* >8 End of structure rte_bbdev_driver_info. */
358 
359 /** Macro used at end of bbdev PMD list */
360 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
361 	{ RTE_BBDEV_OP_NONE }
362 
363 /**
364  * Device information structure used by an application to discover a devices
365  * capabilities and current configuration
366  */
367 
368 /* Structure rte_bbdev_info 8< */
369 struct rte_bbdev_info {
370 	int socket_id;  /**< NUMA socket that device is on */
371 	const char *dev_name;  /**< Unique device name */
372 	const struct rte_device *device; /**< Device Information */
373 	uint16_t num_queues;  /**< Number of queues currently configured */
374 	bool started;  /**< Set if device is currently started */
375 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
376 };
377 /* >8 End of structure rte_bbdev_info. */
378 
379 /**
380  * Retrieve information about a device.
381  *
382  * @param dev_id
383  *   The identifier of the device.
384  * @param dev_info
385  *   Pointer to structure to where information will be copied. On error, this
386  *   location may or may not have been modified.
387  *
388  * @return
389  *   - 0 on success
390  *   - EINVAL if invalid parameter pointer is provided
391  */
392 int
393 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
394 
395 /** Queue information */
396 struct rte_bbdev_queue_info {
397 	/** Current device configuration */
398 	struct rte_bbdev_queue_conf conf;
399 	/** Set if queue is currently started */
400 	bool started;
401 };
402 
403 /**
404  * Retrieve information about a specific queue on a device.
405  *
406  * @param dev_id
407  *   The identifier of the device.
408  * @param queue_id
409  *   The index of the queue.
410  * @param queue_info
411  *   Pointer to structure to where information will be copied. On error, this
412  *   location may or may not have been modified.
413  *
414  * @return
415  *   - 0 on success
416  *   - EINVAL if invalid parameter pointer is provided
417  */
418 int
419 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
420 		struct rte_bbdev_queue_info *queue_info);
421 
422 /** @internal The data structure associated with each queue of a device. */
423 struct rte_bbdev_queue_data {
424 	void *queue_private;  /**< Driver-specific per-queue data */
425 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
426 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
427 	enum rte_bbdev_enqueue_status enqueue_status; /**< Enqueue status when op is rejected */
428 	bool started;  /**< Queue state */
429 };
430 
431 /** @internal Enqueue encode operations for processing on queue of a device. */
432 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
433 		struct rte_bbdev_queue_data *q_data,
434 		struct rte_bbdev_enc_op **ops,
435 		uint16_t num);
436 
437 /** @internal Enqueue decode operations for processing on queue of a device. */
438 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
439 		struct rte_bbdev_queue_data *q_data,
440 		struct rte_bbdev_dec_op **ops,
441 		uint16_t num);
442 
443 /** @internal Enqueue FFT operations for processing on queue of a device. */
444 typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)(
445 		struct rte_bbdev_queue_data *q_data,
446 		struct rte_bbdev_fft_op **ops,
447 		uint16_t num);
448 
449 /** @internal Enqueue MLD-TS operations for processing on queue of a device. */
450 typedef uint16_t (*rte_bbdev_enqueue_mldts_ops_t)(
451 		struct rte_bbdev_queue_data *q_data,
452 		struct rte_bbdev_mldts_op **ops,
453 		uint16_t num);
454 
455 /** @internal Dequeue encode operations from a queue of a device. */
456 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
457 		struct rte_bbdev_queue_data *q_data,
458 		struct rte_bbdev_enc_op **ops, uint16_t num);
459 
460 /** @internal Dequeue decode operations from a queue of a device. */
461 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
462 		struct rte_bbdev_queue_data *q_data,
463 		struct rte_bbdev_dec_op **ops, uint16_t num);
464 
465 /** @internal Dequeue FFT operations from a queue of a device. */
466 typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)(
467 		struct rte_bbdev_queue_data *q_data,
468 		struct rte_bbdev_fft_op **ops, uint16_t num);
469 
470 /** @internal Dequeue MLDTS operations from a queue of a device. */
471 typedef uint16_t (*rte_bbdev_dequeue_mldts_ops_t)(
472 		struct rte_bbdev_queue_data *q_data,
473 		struct rte_bbdev_mldts_op **ops, uint16_t num);
474 
475 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
476 
477 /**
478  * @internal The data associated with a device, with no function pointers.
479  * This structure is safe to place in shared memory to be common among
480  * different processes in a multi-process configuration. Drivers can access
481  * these fields, but should never write to them!
482  */
483 struct rte_bbdev_data {
484 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
485 	void *dev_private;  /**< Driver-specific private data */
486 	uint16_t num_queues;  /**< Number of currently configured queues */
487 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
488 	uint16_t dev_id;  /**< Device ID */
489 	int socket_id;  /**< NUMA socket that device is on */
490 	bool started;  /**< Device run-time state */
491 	RTE_ATOMIC(uint16_t) process_cnt;  /** Counter of processes using the device */
492 };
493 
494 /* Forward declarations */
495 struct rte_bbdev_ops;
496 struct rte_bbdev_callback;
497 struct rte_intr_handle;
498 
499 /** Structure to keep track of registered callbacks */
500 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
501 
502 /**
503  * @internal The data structure associated with a device. Drivers can access
504  * these fields, but should only write to the *_ops fields.
505  */
506 struct __rte_cache_aligned rte_bbdev {
507 	/** Enqueue encode function */
508 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
509 	/** Enqueue decode function */
510 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
511 	/** Dequeue encode function */
512 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
513 	/** Dequeue decode function */
514 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
515 	/** Enqueue encode function */
516 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
517 	/** Enqueue decode function */
518 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
519 	/** Dequeue encode function */
520 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
521 	/** Dequeue decode function */
522 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
523 	/** Enqueue FFT function */
524 	rte_bbdev_enqueue_fft_ops_t enqueue_fft_ops;
525 	/** Dequeue FFT function */
526 	rte_bbdev_dequeue_fft_ops_t dequeue_fft_ops;
527 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
528 	struct rte_bbdev_data *data;  /**< Pointer to device data */
529 	enum rte_bbdev_state state;  /**< If device is currently used or not */
530 	struct rte_device *device; /**< Backing device */
531 	/** User application callback for interrupts if present */
532 	struct rte_bbdev_cb_list list_cbs;
533 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
534 	/** Enqueue MLD-TS function */
535 	rte_bbdev_enqueue_mldts_ops_t enqueue_mldts_ops;
536 	/** Dequeue MLD-TS function */
537 	rte_bbdev_dequeue_mldts_ops_t dequeue_mldts_ops;
538 };
539 
540 /** @internal array of all devices */
541 extern struct rte_bbdev rte_bbdev_devices[];
542 
543 /**
544  * Enqueue a burst of processed encode operations to a queue of the device.
545  * This functions only enqueues as many operations as currently possible and
546  * does not block until @p num_ops entries in the queue are available.
547  * This function does not provide any error notification to avoid the
548  * corresponding overhead.
549  *
550  * @param dev_id
551  *   The identifier of the device.
552  * @param queue_id
553  *   The index of the queue.
554  * @param ops
555  *   Pointer array containing operations to be enqueued Must have at least
556  *   @p num_ops entries
557  * @param num_ops
558  *   The maximum number of operations to enqueue.
559  *
560  * @return
561  *   The number of operations actually enqueued (this is the number of processed
562  *   entries in the @p ops array).
563  */
564 static inline uint16_t
565 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
566 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
567 {
568 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
569 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
570 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
571 }
572 
573 /**
574  * Enqueue a burst of processed decode operations to a queue of the device.
575  * This functions only enqueues as many operations as currently possible and
576  * does not block until @p num_ops entries in the queue are available.
577  * This function does not provide any error notification to avoid the
578  * corresponding overhead.
579  *
580  * @param dev_id
581  *   The identifier of the device.
582  * @param queue_id
583  *   The index of the queue.
584  * @param ops
585  *   Pointer array containing operations to be enqueued Must have at least
586  *   @p num_ops entries
587  * @param num_ops
588  *   The maximum number of operations to enqueue.
589  *
590  * @return
591  *   The number of operations actually enqueued (this is the number of processed
592  *   entries in the @p ops array).
593  */
594 static inline uint16_t
595 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
596 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
597 {
598 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
599 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
600 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
601 }
602 
603 /**
604  * Enqueue a burst of processed encode operations to a queue of the device.
605  * This functions only enqueues as many operations as currently possible and
606  * does not block until @p num_ops entries in the queue are available.
607  * This function does not provide any error notification to avoid the
608  * corresponding overhead.
609  *
610  * @param dev_id
611  *   The identifier of the device.
612  * @param queue_id
613  *   The index of the queue.
614  * @param ops
615  *   Pointer array containing operations to be enqueued Must have at least
616  *   @p num_ops entries
617  * @param num_ops
618  *   The maximum number of operations to enqueue.
619  *
620  * @return
621  *   The number of operations actually enqueued (this is the number of processed
622  *   entries in the @p ops array).
623  */
624 static inline uint16_t
625 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
626 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
627 {
628 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
629 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
630 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
631 }
632 
633 /**
634  * Enqueue a burst of processed decode operations to a queue of the device.
635  * This functions only enqueues as many operations as currently possible and
636  * does not block until @p num_ops entries in the queue are available.
637  * This function does not provide any error notification to avoid the
638  * corresponding overhead.
639  *
640  * @param dev_id
641  *   The identifier of the device.
642  * @param queue_id
643  *   The index of the queue.
644  * @param ops
645  *   Pointer array containing operations to be enqueued Must have at least
646  *   @p num_ops entries
647  * @param num_ops
648  *   The maximum number of operations to enqueue.
649  *
650  * @return
651  *   The number of operations actually enqueued (this is the number of processed
652  *   entries in the @p ops array).
653  */
654 static inline uint16_t
655 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
656 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
657 {
658 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
659 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
660 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
661 }
662 
663 /**
664  * Enqueue a burst of FFT operations to a queue of the device.
665  * This functions only enqueues as many operations as currently possible and
666  * does not block until @p num_ops entries in the queue are available.
667  * This function does not provide any error notification to avoid the
668  * corresponding overhead.
669  *
670  * @param dev_id
671  *   The identifier of the device.
672  * @param queue_id
673  *   The index of the queue.
674  * @param ops
675  *   Pointer array containing operations to be enqueued.
676  *   Must have at least @p num_ops entries.
677  * @param num_ops
678  *   The maximum number of operations to enqueue.
679  *
680  * @return
681  *   The number of operations actually enqueued.
682  *   (This is the number of processed entries in the @p ops array.)
683  */
684 __rte_experimental
685 static inline uint16_t
686 rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id,
687 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
688 {
689 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
690 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
691 	return dev->enqueue_fft_ops(q_data, ops, num_ops);
692 }
693 
694 /**
695  * Enqueue a burst of MLDTS operations to a queue of the device.
696  * This functions only enqueues as many operations as currently possible and
697  * does not block until @p num_ops entries in the queue are available.
698  * This function does not provide any error notification to avoid the
699  * corresponding overhead.
700  *
701  * @param dev_id
702  *   The identifier of the device.
703  * @param queue_id
704  *   The index of the queue.
705  * @param ops
706  *   Pointer array containing operations to be enqueued Must have at least
707  *   @p num_ops entries
708  * @param num_ops
709  *   The maximum number of operations to enqueue.
710  *
711  * @return
712  *   The number of operations actually enqueued (this is the number of processed
713  *   entries in the @p ops array).
714  */
715 static inline uint16_t
716 rte_bbdev_enqueue_mldts_ops(uint16_t dev_id, uint16_t queue_id,
717 		struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
718 {
719 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
720 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
721 	return dev->enqueue_mldts_ops(q_data, ops, num_ops);
722 }
723 
724 /**
725  * Dequeue a burst of processed encode operations from a queue of the device.
726  * This functions returns only the current contents of the queue,
727  * and does not block until @ num_ops is available.
728  * This function does not provide any error notification to avoid the
729  * corresponding overhead.
730  *
731  * @param dev_id
732  *   The identifier of the device.
733  * @param queue_id
734  *   The index of the queue.
735  * @param ops
736  *   Pointer array where operations will be dequeued to.
737  *   Must have at least @p num_ops entries, i.e.
738  *   a pointer to a table of void * pointers (ops) that will be filled.
739  * @param num_ops
740  *   The maximum number of operations to dequeue.
741  *
742  * @return
743  *   The number of operations actually dequeued.
744  *   (This is the number of entries copied into the @p ops array.)
745  */
746 static inline uint16_t
747 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
748 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
749 {
750 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
751 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
752 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
753 }
754 
755 /**
756  * Dequeue a burst of processed decode operations from a queue of the device.
757  * This functions returns only the current contents of the queue, and does not
758  * block until @ num_ops is available.
759  * This function does not provide any error notification to avoid the
760  * corresponding overhead.
761  *
762  * @param dev_id
763  *   The identifier of the device.
764  * @param queue_id
765  *   The index of the queue.
766  * @param ops
767  *   Pointer array where operations will be dequeued to. Must have at least
768  *   @p num_ops entries
769  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
770  * @param num_ops
771  *   The maximum number of operations to dequeue.
772  *
773  * @return
774  *   The number of operations actually dequeued (this is the number of entries
775  *   copied into the @p ops array).
776  */
777 
778 static inline uint16_t
779 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
780 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
781 {
782 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
783 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
784 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
785 }
786 
787 
788 /**
789  * Dequeue a burst of processed encode operations from a queue of the device.
790  * This functions returns only the current contents of the queue, and does not
791  * block until @ num_ops is available.
792  * This function does not provide any error notification to avoid the
793  * corresponding overhead.
794  *
795  * @param dev_id
796  *   The identifier of the device.
797  * @param queue_id
798  *   The index of the queue.
799  * @param ops
800  *   Pointer array where operations will be dequeued to. Must have at least
801  *   @p num_ops entries
802  * @param num_ops
803  *   The maximum number of operations to dequeue.
804  *
805  * @return
806  *   The number of operations actually dequeued (this is the number of entries
807  *   copied into the @p ops array).
808  */
809 static inline uint16_t
810 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
811 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
812 {
813 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
814 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
815 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
816 }
817 
818 /**
819  * Dequeue a burst of processed decode operations from a queue of the device.
820  * This functions returns only the current contents of the queue, and does not
821  * block until @ num_ops is available.
822  * This function does not provide any error notification to avoid the
823  * corresponding overhead.
824  *
825  * @param dev_id
826  *   The identifier of the device.
827  * @param queue_id
828  *   The index of the queue.
829  * @param ops
830  *   Pointer array where operations will be dequeued to. Must have at least
831  *   @p num_ops entries
832  * @param num_ops
833  *   The maximum number of operations to dequeue.
834  *
835  * @return
836  *   The number of operations actually dequeued (this is the number of entries
837  *   copied into the @p ops array).
838  */
839 static inline uint16_t
840 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
841 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
842 {
843 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
844 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
845 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
846 }
847 
848 /**
849  * Dequeue a burst of FFT operations from a queue of the device.
850  * This functions returns only the current contents of the queue, and does not
851  * block until @ num_ops is available.
852  * This function does not provide any error notification to avoid the
853  * corresponding overhead.
854  *
855  * @param dev_id
856  *   The identifier of the device.
857  * @param queue_id
858  *   The index of the queue.
859  * @param ops
860  *   Pointer array where operations will be dequeued to. Must have at least
861  *   @p num_ops entries
862  * @param num_ops
863  *   The maximum number of operations to dequeue.
864  *
865  * @return
866  *   The number of operations actually dequeued (this is the number of entries
867  *   copied into the @p ops array).
868  */
869 __rte_experimental
870 static inline uint16_t
871 rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id,
872 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
873 {
874 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
875 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
876 	return dev->dequeue_fft_ops(q_data, ops, num_ops);
877 }
878 
879 /**
880  * Dequeue a burst of MLDTS operations from a queue of the device.
881  * This functions returns only the current contents of the queue, and does not
882  * block until @p num_ops is available.
883  * This function does not provide any error notification to avoid the
884  * corresponding overhead.
885  *
886  * @param dev_id
887  *   The identifier of the device.
888  * @param queue_id
889  *   The index of the queue.
890  * @param ops
891  *   Pointer array where operations will be dequeued to. Must have at least
892  *   @p num_ops entries
893  * @param num_ops
894  *   The maximum number of operations to dequeue.
895  *
896  * @return
897  *   The number of operations actually dequeued (this is the number of entries
898  *   copied into the @p ops array).
899  */
900 __rte_experimental
901 static inline uint16_t
902 rte_bbdev_dequeue_mldts_ops(uint16_t dev_id, uint16_t queue_id,
903 		struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
904 {
905 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
906 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
907 	return dev->dequeue_mldts_ops(q_data, ops, num_ops);
908 }
909 
910 /** Definitions of device event types */
911 enum rte_bbdev_event_type {
912 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
913 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
914 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
915 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
916 };
917 
918 /**
919  * Typedef for application callback function registered by application
920  * software for notification of device events
921  *
922  * @param dev_id
923  *   Device identifier
924  * @param event
925  *   Device event to register for notification of.
926  * @param cb_arg
927  *   User specified parameter to be passed to user's callback function.
928  * @param ret_param
929  *   To pass data back to user application.
930  */
931 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
932 		enum rte_bbdev_event_type event, void *cb_arg,
933 		void *ret_param);
934 
935 /**
936  * Register a callback function for specific device id. Multiple callbacks can
937  * be added and will be called in the order they are added when an event is
938  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
939  *
940  * @param dev_id
941  *   Device id.
942  * @param event
943  *   The event that the callback will be registered for.
944  * @param cb_fn
945  *   User supplied callback function to be called.
946  * @param cb_arg
947  *   Pointer to parameter that will be passed to the callback.
948  *
949  * @return
950  *   Zero on success, negative value on failure.
951  */
952 int
953 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
954 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
955 
956 /**
957  * Unregister a callback function for specific device id.
958  *
959  * @param dev_id
960  *   The device identifier.
961  * @param event
962  *   The event that the callback will be unregistered for.
963  * @param cb_fn
964  *   User supplied callback function to be unregistered.
965  * @param cb_arg
966  *   Pointer to the parameter supplied when registering the callback.
967  *   (void *)-1 means to remove all registered callbacks with the specified
968  *   function address.
969  *
970  * @return
971  *   - 0 on success
972  *   - EINVAL if invalid parameter pointer is provided
973  *   - EAGAIN if the provided callback pointer does not exist
974  */
975 int
976 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
977 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
978 
979 /**
980  * Enable a one-shot interrupt on the next operation enqueued to a particular
981  * queue. The interrupt will be triggered when the operation is ready to be
982  * dequeued. To handle the interrupt, an epoll file descriptor must be
983  * registered using rte_bbdev_queue_intr_ctl(), and then an application
984  * thread/lcore can wait for the interrupt using rte_epoll_wait().
985  *
986  * @param dev_id
987  *   The device identifier.
988  * @param queue_id
989  *   The index of the queue.
990  *
991  * @return
992  *   - 0 on success
993  *   - negative value on failure - as returned from PMD
994  */
995 int
996 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
997 
998 /**
999  * Disable a one-shot interrupt on the next operation enqueued to a particular
1000  * queue (if it has been enabled).
1001  *
1002  * @param dev_id
1003  *   The device identifier.
1004  * @param queue_id
1005  *   The index of the queue.
1006  *
1007  * @return
1008  *   - 0 on success
1009  *   - negative value on failure - as returned from PMD
1010  */
1011 int
1012 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
1013 
1014 /**
1015  * Control interface for per-queue interrupts.
1016  *
1017  * @param dev_id
1018  *   The device identifier.
1019  * @param queue_id
1020  *   The index of the queue.
1021  * @param epfd
1022  *   Epoll file descriptor that will be associated with the interrupt source.
1023  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
1024  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
1025  *   be used when calling rte_epoll_wait()).
1026  * @param op
1027  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
1028  *   RTE_INTR_EVENT_DEL.
1029  * @param data
1030  *   User context, that will be returned in the epdata.data field of the
1031  *   rte_epoll_event structure filled in by rte_epoll_wait().
1032  *
1033  * @return
1034  *   - 0 on success
1035  *   - ENOTSUP if interrupts are not supported by the identified device
1036  *   - negative value on failure - as returned from PMD
1037  */
1038 int
1039 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1040 		void *data);
1041 
1042 /**
1043  * Convert device status from enum to string.
1044  *
1045  * @param status
1046  *   Device status as enum.
1047  *
1048  * @returns
1049  *   Device status as string or NULL if invalid.
1050  */
1051 __rte_experimental
1052 const char*
1053 rte_bbdev_device_status_str(enum rte_bbdev_device_status status);
1054 
1055 /**
1056  * Convert queue status from enum to string.
1057  *
1058  * @param status
1059  *   Queue status as enum.
1060  *
1061  * @returns
1062  *   Queue status as string or NULL if op_type is invalid.
1063  */
1064 __rte_experimental
1065 const char*
1066 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status);
1067 
1068 #ifdef __cplusplus
1069 }
1070 #endif
1071 
1072 #endif /* _RTE_BBDEV_H_ */
1073