xref: /dpdk/lib/bbdev/rte_bbdev.h (revision d029f35384d0844e9aeb5dbc46fbe1b063d649f7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * This API allows an application to discover, configure and use a device to
14  * process operations. An asynchronous API (enqueue, followed by later dequeue)
15  * is used for processing operations.
16  *
17  * The functions in this API are not thread-safe when called on the same
18  * target object (a device, or a queue on a device), with the exception that
19  * one thread can enqueue operations to a queue while another thread dequeues
20  * from the same queue.
21  */
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26 
27 #include <stdint.h>
28 #include <stdbool.h>
29 
30 #include <rte_compat.h>
31 #include <rte_cpuflags.h>
32 
33 #include "rte_bbdev_op.h"
34 
35 #ifndef RTE_BBDEV_MAX_DEVS
36 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
37 #endif
38 
39 /*
40  * Maximum size to be used to manage the enum rte_bbdev_enqueue_status
41  * including padding for future enum insertion.
42  * The enum values must be explicitly kept smaller or equal to this padded maximum size.
43  */
44 #define RTE_BBDEV_ENQ_STATUS_SIZE_MAX 6
45 
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
48 	RTE_BBDEV_UNUSED,
49 	RTE_BBDEV_INITIALIZED
50 };
51 
52 /**
53  * Get the total number of devices that have been successfully initialised.
54  *
55  * @return
56  *   The total number of usable devices.
57  */
58 uint16_t
59 rte_bbdev_count(void);
60 
61 /**
62  * Check if a device is valid.
63  *
64  * @param dev_id
65  *   The identifier of the device.
66  *
67  * @return
68  *   true if device ID is valid and device is attached, false otherwise.
69  */
70 bool
71 rte_bbdev_is_valid(uint16_t dev_id);
72 
73 /**
74  * Get the next enabled device.
75  *
76  * @param dev_id
77  *   The current device
78  *
79  * @return
80  *   - The next device, or
81  *   - RTE_BBDEV_MAX_DEVS if none found
82  */
83 uint16_t
84 rte_bbdev_find_next(uint16_t dev_id);
85 
86 /** Iterate through all enabled devices */
87 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
88 		i < RTE_BBDEV_MAX_DEVS; \
89 		i = rte_bbdev_find_next(i))
90 
91 /**
92  * Setup up device queues.
93  * This function must be called on a device before setting up the queues and
94  * starting the device. It can also be called when a device is in the stopped
95  * state. If any device queues have been configured their configuration will be
96  * cleared by a call to this function.
97  *
98  * @param dev_id
99  *   The identifier of the device.
100  * @param num_queues
101  *   Number of queues to configure on device.
102  * @param socket_id
103  *   ID of a socket which will be used to allocate memory.
104  *
105  * @return
106  *   - 0 on success
107  *   - -ENODEV if dev_id is invalid or the device is corrupted
108  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
109  *   - -EBUSY if the identified device has already started
110  *   - -ENOMEM if unable to allocate memory
111  */
112 int
113 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
114 
115 /**
116  * Enable interrupts.
117  * This function may be called before starting the device to enable the
118  * interrupts if they are available.
119  *
120  * @param dev_id
121  *   The identifier of the device.
122  *
123  * @return
124  *   - 0 on success
125  *   - -ENODEV if dev_id is invalid or the device is corrupted
126  *   - -EBUSY if the identified device has already started
127  *   - -ENOTSUP if the interrupts are not supported by the device
128  */
129 int
130 rte_bbdev_intr_enable(uint16_t dev_id);
131 
132 /** Device queue configuration structure */
133 struct rte_bbdev_queue_conf {
134 	int socket;  /**< NUMA socket used for memory allocation */
135 	uint32_t queue_size;  /**< Size of queue */
136 	uint8_t priority;  /**< Queue priority */
137 	bool deferred_start; /**< Do not start queue when device is started. */
138 	enum rte_bbdev_op_type op_type; /**< Operation type */
139 };
140 
141 /**
142  * Configure a queue on a device.
143  * This function can be called after device configuration, and before starting.
144  * It can also be called when the device or the queue is in the stopped state.
145  *
146  * @param dev_id
147  *   The identifier of the device.
148  * @param queue_id
149  *   The index of the queue.
150  * @param conf
151  *   The queue configuration. If NULL, a default configuration will be used.
152  *
153  * @return
154  *   - 0 on success
155  *   - EINVAL if the identified queue size or priority are invalid
156  *   - EBUSY if the identified queue or its device have already started
157  */
158 int
159 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
160 		const struct rte_bbdev_queue_conf *conf);
161 
162 /**
163  * Start a device.
164  * This is the last step needed before enqueueing operations is possible.
165  *
166  * @param dev_id
167  *   The identifier of the device.
168  *
169  * @return
170  *   - 0 on success
171  *   - negative value on failure - as returned from PMD
172  */
173 int
174 rte_bbdev_start(uint16_t dev_id);
175 
176 /**
177  * Stop a device.
178  * The device can be reconfigured, and restarted after being stopped.
179  *
180  * @param dev_id
181  *   The identifier of the device.
182  *
183  * @return
184  *   - 0 on success
185  */
186 int
187 rte_bbdev_stop(uint16_t dev_id);
188 
189 /**
190  * Close a device.
191  * The device cannot be restarted without reconfiguration!
192  *
193  * @param dev_id
194  *   The identifier of the device.
195  *
196  * @return
197  *   - 0 on success
198  */
199 int
200 rte_bbdev_close(uint16_t dev_id);
201 
202 /**
203  * Start a specified queue on a device.
204  * This is only needed if the queue has been stopped, or if the deferred_start
205  * flag has been set when configuring the queue.
206  *
207  * @param dev_id
208  *   The identifier of the device.
209  * @param queue_id
210  *   The index of the queue.
211  *
212  * @return
213  *   - 0 on success
214  *   - negative value on failure - as returned from PMD
215  */
216 int
217 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
218 
219 /**
220  * Stop a specified queue on a device, to allow re configuration.
221  *
222  * @param dev_id
223  *   The identifier of the device.
224  * @param queue_id
225  *   The index of the queue.
226  *
227  * @return
228  *   - 0 on success
229  *   - negative value on failure - as returned from PMD
230  */
231 int
232 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
233 
234 /**
235  * Flags to indicate the reason why a previous enqueue may not have
236  * consumed all requested operations.
237  * In case of multiple reasons the latter supersedes a previous one.
238  * The related macro RTE_BBDEV_ENQ_STATUS_SIZE_MAX can be used
239  * as an absolute maximum for notably sizing array
240  * while allowing for future enumeration insertion.
241  */
242 enum rte_bbdev_enqueue_status {
243 	RTE_BBDEV_ENQ_STATUS_NONE,             /**< Nothing to report. */
244 	RTE_BBDEV_ENQ_STATUS_QUEUE_FULL,       /**< Not enough room in queue. */
245 	RTE_BBDEV_ENQ_STATUS_RING_FULL,        /**< Not enough room in ring. */
246 	RTE_BBDEV_ENQ_STATUS_INVALID_OP,       /**< Operation was rejected as invalid. */
247 	/* Note: RTE_BBDEV_ENQ_STATUS_SIZE_MAX must be larger or equal to maximum enum value. */
248 };
249 
250 /**
251  * Flags to indicate the status of the device.
252  */
253 enum rte_bbdev_device_status {
254 	RTE_BBDEV_DEV_NOSTATUS,        /**< Nothing being reported. */
255 	RTE_BBDEV_DEV_NOT_SUPPORTED,   /**< Device status is not supported on the PMD. */
256 	RTE_BBDEV_DEV_RESET,           /**< Device in reset and un-configured state. */
257 	RTE_BBDEV_DEV_CONFIGURED,      /**< Device is configured and ready to use. */
258 	RTE_BBDEV_DEV_ACTIVE,          /**< Device is configured and VF is being used. */
259 	RTE_BBDEV_DEV_FATAL_ERR,       /**< Device has hit a fatal uncorrectable error. */
260 	RTE_BBDEV_DEV_RESTART_REQ,     /**< Device requires application to restart. */
261 	RTE_BBDEV_DEV_RECONFIG_REQ,    /**< Device requires application to reconfigure queues. */
262 	RTE_BBDEV_DEV_CORRECT_ERR,     /**< Warning of a correctable error event happened. */
263 };
264 
265 /** Device statistics. */
266 struct rte_bbdev_stats {
267 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
268 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
269 	/** Total error count on operations enqueued */
270 	uint64_t enqueue_err_count;
271 	/** Total error count on operations dequeued */
272 	uint64_t dequeue_err_count;
273 	/** Total warning count on operations enqueued. */
274 	uint64_t enqueue_warn_count;
275 	/** Total warning count on operations dequeued. */
276 	uint64_t dequeue_warn_count;
277 	/** Total enqueue status count based on *rte_bbdev_enqueue_status* enum. */
278 	uint64_t enqueue_status_count[RTE_BBDEV_ENQ_STATUS_SIZE_MAX];
279 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
280 	 *  the enqueue request to its internal queues.
281 	 *  - For a HW device this is the cycles consumed in MMIO write
282 	 *  - For a SW (vdev) device, this is the processing time of the
283 	 *     bbdev operation
284 	 */
285 	uint64_t acc_offload_cycles;
286 };
287 
288 /**
289  * Retrieve the general I/O statistics of a device.
290  *
291  * @param dev_id
292  *   The identifier of the device.
293  * @param stats
294  *   Pointer to structure to where statistics will be copied. On error, this
295  *   location may or may not have been modified.
296  *
297  * @return
298  *   - 0 on success
299  *   - EINVAL if invalid parameter pointer is provided
300  */
301 int
302 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
303 
304 /**
305  * Reset the statistics of a device.
306  *
307  * @param dev_id
308  *   The identifier of the device.
309  * @return
310  *   - 0 on success
311  */
312 int
313 rte_bbdev_stats_reset(uint16_t dev_id);
314 
315 /** Device information supplied by the device's driver */
316 
317 /* Structure rte_bbdev_driver_info 8< */
318 struct rte_bbdev_driver_info {
319 	/** Driver name */
320 	const char *driver_name;
321 
322 	/** Maximum number of queues supported by the device */
323 	unsigned int max_num_queues;
324 	/** Maximum number of queues supported per operation type */
325 	unsigned int num_queues[RTE_BBDEV_OP_TYPE_SIZE_MAX];
326 	/** Priority level supported per operation type */
327 	unsigned int queue_priority[RTE_BBDEV_OP_TYPE_SIZE_MAX];
328 	/** Queue size limit (queue size must also be power of 2) */
329 	uint32_t queue_size_lim;
330 	/** Set if device off-loads operation to hardware  */
331 	bool hardware_accelerated;
332 	/** Max value supported by queue priority for DL */
333 	uint8_t max_dl_queue_priority;
334 	/** Max value supported by queue priority for UL */
335 	uint8_t max_ul_queue_priority;
336 	/** Set if device supports per-queue interrupts */
337 	bool queue_intr_supported;
338 	/** Device Status */
339 	enum rte_bbdev_device_status device_status;
340 	/** HARQ memory available in kB */
341 	uint32_t harq_buffer_size;
342 	/** Minimum alignment of buffers, in bytes */
343 	uint16_t min_alignment;
344 	/** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
345 	 *  for input/output data
346 	 */
347 	uint8_t data_endianness;
348 	/** Default queue configuration used if none is supplied  */
349 	struct rte_bbdev_queue_conf default_queue_conf;
350 	/** Device operation capabilities */
351 	const struct rte_bbdev_op_cap *capabilities;
352 	/** Device cpu_flag requirements */
353 	const enum rte_cpu_flag_t *cpu_flag_reqs;
354 	/** FFT windowing width for 2048 FFT - size defined in capability. */
355 	uint16_t *fft_window_width;
356 };
357 /* >8 End of structure rte_bbdev_driver_info. */
358 
359 /** Macro used at end of bbdev PMD list */
360 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
361 	{ RTE_BBDEV_OP_NONE }
362 
363 /**
364  * Device information structure used by an application to discover a devices
365  * capabilities and current configuration
366  */
367 
368 /* Structure rte_bbdev_info 8< */
369 struct rte_bbdev_info {
370 	int socket_id;  /**< NUMA socket that device is on */
371 	const char *dev_name;  /**< Unique device name */
372 	const struct rte_device *device; /**< Device Information */
373 	uint16_t num_queues;  /**< Number of queues currently configured */
374 	bool started;  /**< Set if device is currently started */
375 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
376 };
377 /* >8 End of structure rte_bbdev_info. */
378 
379 /**
380  * Retrieve information about a device.
381  *
382  * @param dev_id
383  *   The identifier of the device.
384  * @param dev_info
385  *   Pointer to structure to where information will be copied. On error, this
386  *   location may or may not have been modified.
387  *
388  * @return
389  *   - 0 on success
390  *   - EINVAL if invalid parameter pointer is provided
391  */
392 int
393 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
394 
395 /** Queue information */
396 struct rte_bbdev_queue_info {
397 	/** Current device configuration */
398 	struct rte_bbdev_queue_conf conf;
399 	/** Set if queue is currently started */
400 	bool started;
401 };
402 
403 /**
404  * Retrieve information about a specific queue on a device.
405  *
406  * @param dev_id
407  *   The identifier of the device.
408  * @param queue_id
409  *   The index of the queue.
410  * @param queue_info
411  *   Pointer to structure to where information will be copied. On error, this
412  *   location may or may not have been modified.
413  *
414  * @return
415  *   - 0 on success
416  *   - EINVAL if invalid parameter pointer is provided
417  */
418 int
419 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
420 		struct rte_bbdev_queue_info *queue_info);
421 
422 /** @internal The data structure associated with each queue of a device. */
423 struct rte_bbdev_queue_data {
424 	void *queue_private;  /**< Driver-specific per-queue data */
425 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
426 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
427 	enum rte_bbdev_enqueue_status enqueue_status; /**< Enqueue status when op is rejected */
428 	bool started;  /**< Queue state */
429 };
430 
431 /** @internal Enqueue encode operations for processing on queue of a device. */
432 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
433 		struct rte_bbdev_queue_data *q_data,
434 		struct rte_bbdev_enc_op **ops,
435 		uint16_t num);
436 
437 /** @internal Enqueue decode operations for processing on queue of a device. */
438 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
439 		struct rte_bbdev_queue_data *q_data,
440 		struct rte_bbdev_dec_op **ops,
441 		uint16_t num);
442 
443 /** @internal Enqueue FFT operations for processing on queue of a device. */
444 typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)(
445 		struct rte_bbdev_queue_data *q_data,
446 		struct rte_bbdev_fft_op **ops,
447 		uint16_t num);
448 
449 /** @internal Enqueue MLD-TS operations for processing on queue of a device. */
450 typedef uint16_t (*rte_bbdev_enqueue_mldts_ops_t)(
451 		struct rte_bbdev_queue_data *q_data,
452 		struct rte_bbdev_mldts_op **ops,
453 		uint16_t num);
454 
455 /** @internal Dequeue encode operations from a queue of a device. */
456 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
457 		struct rte_bbdev_queue_data *q_data,
458 		struct rte_bbdev_enc_op **ops, uint16_t num);
459 
460 /** @internal Dequeue decode operations from a queue of a device. */
461 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
462 		struct rte_bbdev_queue_data *q_data,
463 		struct rte_bbdev_dec_op **ops, uint16_t num);
464 
465 /** @internal Dequeue FFT operations from a queue of a device. */
466 typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)(
467 		struct rte_bbdev_queue_data *q_data,
468 		struct rte_bbdev_fft_op **ops, uint16_t num);
469 
470 /** @internal Dequeue MLDTS operations from a queue of a device. */
471 typedef uint16_t (*rte_bbdev_dequeue_mldts_ops_t)(
472 		struct rte_bbdev_queue_data *q_data,
473 		struct rte_bbdev_mldts_op **ops, uint16_t num);
474 
475 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
476 
477 /**
478  * @internal The data associated with a device, with no function pointers.
479  * This structure is safe to place in shared memory to be common among
480  * different processes in a multi-process configuration. Drivers can access
481  * these fields, but should never write to them!
482  */
483 struct rte_bbdev_data {
484 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
485 	void *dev_private;  /**< Driver-specific private data */
486 	uint16_t num_queues;  /**< Number of currently configured queues */
487 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
488 	uint16_t dev_id;  /**< Device ID */
489 	int socket_id;  /**< NUMA socket that device is on */
490 	bool started;  /**< Device run-time state */
491 	RTE_ATOMIC(uint16_t) process_cnt;  /** Counter of processes using the device */
492 };
493 
494 /* Forward declarations */
495 struct rte_bbdev_ops;
496 struct rte_bbdev_callback;
497 struct rte_intr_handle;
498 
499 /** Structure to keep track of registered callbacks */
500 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
501 
502 /**
503  * @internal The data structure associated with a device. Drivers can access
504  * these fields, but should only write to the *_ops fields.
505  */
506 struct __rte_cache_aligned rte_bbdev {
507 	/** Enqueue encode function */
508 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
509 	/** Enqueue decode function */
510 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
511 	/** Dequeue encode function */
512 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
513 	/** Dequeue decode function */
514 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
515 	/** Enqueue encode function */
516 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
517 	/** Enqueue decode function */
518 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
519 	/** Dequeue encode function */
520 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
521 	/** Dequeue decode function */
522 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
523 	/** Enqueue FFT function */
524 	rte_bbdev_enqueue_fft_ops_t enqueue_fft_ops;
525 	/** Dequeue FFT function */
526 	rte_bbdev_dequeue_fft_ops_t dequeue_fft_ops;
527 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
528 	struct rte_bbdev_data *data;  /**< Pointer to device data */
529 	enum rte_bbdev_state state;  /**< If device is currently used or not */
530 	struct rte_device *device; /**< Backing device */
531 	/** User application callback for interrupts if present */
532 	struct rte_bbdev_cb_list list_cbs;
533 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
534 	/** Enqueue MLD-TS function */
535 	rte_bbdev_enqueue_mldts_ops_t enqueue_mldts_ops;
536 	/** Dequeue MLD-TS function */
537 	rte_bbdev_dequeue_mldts_ops_t dequeue_mldts_ops;
538 };
539 
540 /** @internal array of all devices */
541 extern struct rte_bbdev rte_bbdev_devices[];
542 
543 /**
544  * Enqueue a burst of processed encode operations to a queue of the device.
545  * This functions only enqueues as many operations as currently possible and
546  * does not block until @p num_ops entries in the queue are available.
547  * This function does not provide any error notification to avoid the
548  * corresponding overhead.
549  *
550  * @param dev_id
551  *   The identifier of the device.
552  * @param queue_id
553  *   The index of the queue.
554  * @param ops
555  *   Pointer array containing operations to be enqueued Must have at least
556  *   @p num_ops entries
557  * @param num_ops
558  *   The maximum number of operations to enqueue.
559  *
560  * @return
561  *   The number of operations actually enqueued (this is the number of processed
562  *   entries in the @p ops array).
563  */
564 static inline uint16_t
565 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
566 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
567 {
568 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
569 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
570 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
571 }
572 
573 /**
574  * Enqueue a burst of processed decode operations to a queue of the device.
575  * This functions only enqueues as many operations as currently possible and
576  * does not block until @p num_ops entries in the queue are available.
577  * This function does not provide any error notification to avoid the
578  * corresponding overhead.
579  *
580  * @param dev_id
581  *   The identifier of the device.
582  * @param queue_id
583  *   The index of the queue.
584  * @param ops
585  *   Pointer array containing operations to be enqueued Must have at least
586  *   @p num_ops entries
587  * @param num_ops
588  *   The maximum number of operations to enqueue.
589  *
590  * @return
591  *   The number of operations actually enqueued (this is the number of processed
592  *   entries in the @p ops array).
593  */
594 static inline uint16_t
595 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
596 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
597 {
598 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
599 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
600 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
601 }
602 
603 /**
604  * Enqueue a burst of processed encode operations to a queue of the device.
605  * This functions only enqueues as many operations as currently possible and
606  * does not block until @p num_ops entries in the queue are available.
607  * This function does not provide any error notification to avoid the
608  * corresponding overhead.
609  *
610  * @param dev_id
611  *   The identifier of the device.
612  * @param queue_id
613  *   The index of the queue.
614  * @param ops
615  *   Pointer array containing operations to be enqueued Must have at least
616  *   @p num_ops entries
617  * @param num_ops
618  *   The maximum number of operations to enqueue.
619  *
620  * @return
621  *   The number of operations actually enqueued (this is the number of processed
622  *   entries in the @p ops array).
623  */
624 static inline uint16_t
625 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
626 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
627 {
628 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
629 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
630 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
631 }
632 
633 /**
634  * Enqueue a burst of processed decode operations to a queue of the device.
635  * This functions only enqueues as many operations as currently possible and
636  * does not block until @p num_ops entries in the queue are available.
637  * This function does not provide any error notification to avoid the
638  * corresponding overhead.
639  *
640  * @param dev_id
641  *   The identifier of the device.
642  * @param queue_id
643  *   The index of the queue.
644  * @param ops
645  *   Pointer array containing operations to be enqueued Must have at least
646  *   @p num_ops entries
647  * @param num_ops
648  *   The maximum number of operations to enqueue.
649  *
650  * @return
651  *   The number of operations actually enqueued (this is the number of processed
652  *   entries in the @p ops array).
653  */
654 static inline uint16_t
655 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
656 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
657 {
658 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
659 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
660 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
661 }
662 
663 /**
664  * Enqueue a burst of FFT operations to a queue of the device.
665  * This functions only enqueues as many operations as currently possible and
666  * does not block until @p num_ops entries in the queue are available.
667  * This function does not provide any error notification to avoid the
668  * corresponding overhead.
669  *
670  * @param dev_id
671  *   The identifier of the device.
672  * @param queue_id
673  *   The index of the queue.
674  * @param ops
675  *   Pointer array containing operations to be enqueued.
676  *   Must have at least @p num_ops entries.
677  * @param num_ops
678  *   The maximum number of operations to enqueue.
679  *
680  * @return
681  *   The number of operations actually enqueued.
682  *   (This is the number of processed entries in the @p ops array.)
683  */
684 static inline uint16_t
685 rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id,
686 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
687 {
688 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
689 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
690 	return dev->enqueue_fft_ops(q_data, ops, num_ops);
691 }
692 
693 /**
694  * Enqueue a burst of MLDTS operations to a queue of the device.
695  * This functions only enqueues as many operations as currently possible and
696  * does not block until @p num_ops entries in the queue are available.
697  * This function does not provide any error notification to avoid the
698  * corresponding overhead.
699  *
700  * @param dev_id
701  *   The identifier of the device.
702  * @param queue_id
703  *   The index of the queue.
704  * @param ops
705  *   Pointer array containing operations to be enqueued Must have at least
706  *   @p num_ops entries
707  * @param num_ops
708  *   The maximum number of operations to enqueue.
709  *
710  * @return
711  *   The number of operations actually enqueued (this is the number of processed
712  *   entries in the @p ops array).
713  */
714 static inline uint16_t
715 rte_bbdev_enqueue_mldts_ops(uint16_t dev_id, uint16_t queue_id,
716 		struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
717 {
718 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
719 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
720 	return dev->enqueue_mldts_ops(q_data, ops, num_ops);
721 }
722 
723 /**
724  * Dequeue a burst of processed encode operations from a queue of the device.
725  * This functions returns only the current contents of the queue,
726  * and does not block until @ num_ops is available.
727  * This function does not provide any error notification to avoid the
728  * corresponding overhead.
729  *
730  * @param dev_id
731  *   The identifier of the device.
732  * @param queue_id
733  *   The index of the queue.
734  * @param ops
735  *   Pointer array where operations will be dequeued to.
736  *   Must have at least @p num_ops entries, i.e.
737  *   a pointer to a table of void * pointers (ops) that will be filled.
738  * @param num_ops
739  *   The maximum number of operations to dequeue.
740  *
741  * @return
742  *   The number of operations actually dequeued.
743  *   (This is the number of entries copied into the @p ops array.)
744  */
745 static inline uint16_t
746 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
747 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
748 {
749 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
750 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
751 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
752 }
753 
754 /**
755  * Dequeue a burst of processed decode operations from a queue of the device.
756  * This functions returns only the current contents of the queue, and does not
757  * block until @ num_ops is available.
758  * This function does not provide any error notification to avoid the
759  * corresponding overhead.
760  *
761  * @param dev_id
762  *   The identifier of the device.
763  * @param queue_id
764  *   The index of the queue.
765  * @param ops
766  *   Pointer array where operations will be dequeued to. Must have at least
767  *   @p num_ops entries
768  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
769  * @param num_ops
770  *   The maximum number of operations to dequeue.
771  *
772  * @return
773  *   The number of operations actually dequeued (this is the number of entries
774  *   copied into the @p ops array).
775  */
776 
777 static inline uint16_t
778 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
779 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
780 {
781 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
782 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
783 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
784 }
785 
786 
787 /**
788  * Dequeue a burst of processed encode operations from a queue of the device.
789  * This functions returns only the current contents of the queue, and does not
790  * block until @ num_ops is available.
791  * This function does not provide any error notification to avoid the
792  * corresponding overhead.
793  *
794  * @param dev_id
795  *   The identifier of the device.
796  * @param queue_id
797  *   The index of the queue.
798  * @param ops
799  *   Pointer array where operations will be dequeued to. Must have at least
800  *   @p num_ops entries
801  * @param num_ops
802  *   The maximum number of operations to dequeue.
803  *
804  * @return
805  *   The number of operations actually dequeued (this is the number of entries
806  *   copied into the @p ops array).
807  */
808 static inline uint16_t
809 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
810 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
811 {
812 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
813 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
814 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
815 }
816 
817 /**
818  * Dequeue a burst of processed decode operations from a queue of the device.
819  * This functions returns only the current contents of the queue, and does not
820  * block until @ num_ops is available.
821  * This function does not provide any error notification to avoid the
822  * corresponding overhead.
823  *
824  * @param dev_id
825  *   The identifier of the device.
826  * @param queue_id
827  *   The index of the queue.
828  * @param ops
829  *   Pointer array where operations will be dequeued to. Must have at least
830  *   @p num_ops entries
831  * @param num_ops
832  *   The maximum number of operations to dequeue.
833  *
834  * @return
835  *   The number of operations actually dequeued (this is the number of entries
836  *   copied into the @p ops array).
837  */
838 static inline uint16_t
839 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
840 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
841 {
842 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
843 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
844 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
845 }
846 
847 /**
848  * Dequeue a burst of FFT operations from a queue of the device.
849  * This functions returns only the current contents of the queue, and does not
850  * block until @ num_ops is available.
851  * This function does not provide any error notification to avoid the
852  * corresponding overhead.
853  *
854  * @param dev_id
855  *   The identifier of the device.
856  * @param queue_id
857  *   The index of the queue.
858  * @param ops
859  *   Pointer array where operations will be dequeued to. Must have at least
860  *   @p num_ops entries
861  * @param num_ops
862  *   The maximum number of operations to dequeue.
863  *
864  * @return
865  *   The number of operations actually dequeued (this is the number of entries
866  *   copied into the @p ops array).
867  */
868 static inline uint16_t
869 rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id,
870 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
871 {
872 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
873 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
874 	return dev->dequeue_fft_ops(q_data, ops, num_ops);
875 }
876 
877 /**
878  * Dequeue a burst of MLDTS operations from a queue of the device.
879  * This functions returns only the current contents of the queue, and does not
880  * block until @p num_ops is available.
881  * This function does not provide any error notification to avoid the
882  * corresponding overhead.
883  *
884  * @param dev_id
885  *   The identifier of the device.
886  * @param queue_id
887  *   The index of the queue.
888  * @param ops
889  *   Pointer array where operations will be dequeued to. Must have at least
890  *   @p num_ops entries
891  * @param num_ops
892  *   The maximum number of operations to dequeue.
893  *
894  * @return
895  *   The number of operations actually dequeued (this is the number of entries
896  *   copied into the @p ops array).
897  */
898 __rte_experimental
899 static inline uint16_t
900 rte_bbdev_dequeue_mldts_ops(uint16_t dev_id, uint16_t queue_id,
901 		struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
902 {
903 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
904 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
905 	return dev->dequeue_mldts_ops(q_data, ops, num_ops);
906 }
907 
908 /** Definitions of device event types */
909 enum rte_bbdev_event_type {
910 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
911 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
912 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
913 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
914 };
915 
916 /**
917  * Typedef for application callback function registered by application
918  * software for notification of device events
919  *
920  * @param dev_id
921  *   Device identifier
922  * @param event
923  *   Device event to register for notification of.
924  * @param cb_arg
925  *   User specified parameter to be passed to user's callback function.
926  * @param ret_param
927  *   To pass data back to user application.
928  */
929 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
930 		enum rte_bbdev_event_type event, void *cb_arg,
931 		void *ret_param);
932 
933 /**
934  * Register a callback function for specific device id. Multiple callbacks can
935  * be added and will be called in the order they are added when an event is
936  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
937  *
938  * @param dev_id
939  *   Device id.
940  * @param event
941  *   The event that the callback will be registered for.
942  * @param cb_fn
943  *   User supplied callback function to be called.
944  * @param cb_arg
945  *   Pointer to parameter that will be passed to the callback.
946  *
947  * @return
948  *   Zero on success, negative value on failure.
949  */
950 int
951 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
952 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
953 
954 /**
955  * Unregister a callback function for specific device id.
956  *
957  * @param dev_id
958  *   The device identifier.
959  * @param event
960  *   The event that the callback will be unregistered for.
961  * @param cb_fn
962  *   User supplied callback function to be unregistered.
963  * @param cb_arg
964  *   Pointer to the parameter supplied when registering the callback.
965  *   (void *)-1 means to remove all registered callbacks with the specified
966  *   function address.
967  *
968  * @return
969  *   - 0 on success
970  *   - EINVAL if invalid parameter pointer is provided
971  *   - EAGAIN if the provided callback pointer does not exist
972  */
973 int
974 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
975 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
976 
977 /**
978  * Enable a one-shot interrupt on the next operation enqueued to a particular
979  * queue. The interrupt will be triggered when the operation is ready to be
980  * dequeued. To handle the interrupt, an epoll file descriptor must be
981  * registered using rte_bbdev_queue_intr_ctl(), and then an application
982  * thread/lcore can wait for the interrupt using rte_epoll_wait().
983  *
984  * @param dev_id
985  *   The device identifier.
986  * @param queue_id
987  *   The index of the queue.
988  *
989  * @return
990  *   - 0 on success
991  *   - negative value on failure - as returned from PMD
992  */
993 int
994 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
995 
996 /**
997  * Disable a one-shot interrupt on the next operation enqueued to a particular
998  * queue (if it has been enabled).
999  *
1000  * @param dev_id
1001  *   The device identifier.
1002  * @param queue_id
1003  *   The index of the queue.
1004  *
1005  * @return
1006  *   - 0 on success
1007  *   - negative value on failure - as returned from PMD
1008  */
1009 int
1010 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
1011 
1012 /**
1013  * Control interface for per-queue interrupts.
1014  *
1015  * @param dev_id
1016  *   The device identifier.
1017  * @param queue_id
1018  *   The index of the queue.
1019  * @param epfd
1020  *   Epoll file descriptor that will be associated with the interrupt source.
1021  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
1022  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
1023  *   be used when calling rte_epoll_wait()).
1024  * @param op
1025  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
1026  *   RTE_INTR_EVENT_DEL.
1027  * @param data
1028  *   User context, that will be returned in the epdata.data field of the
1029  *   rte_epoll_event structure filled in by rte_epoll_wait().
1030  *
1031  * @return
1032  *   - 0 on success
1033  *   - ENOTSUP if interrupts are not supported by the identified device
1034  *   - negative value on failure - as returned from PMD
1035  */
1036 int
1037 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1038 		void *data);
1039 
1040 /**
1041  * Convert device status from enum to string.
1042  *
1043  * @param status
1044  *   Device status as enum.
1045  *
1046  * @returns
1047  *   Device status as string or NULL if invalid.
1048  */
1049 const char*
1050 rte_bbdev_device_status_str(enum rte_bbdev_device_status status);
1051 
1052 /**
1053  * Convert queue status from enum to string.
1054  *
1055  * @param status
1056  *   Queue status as enum.
1057  *
1058  * @returns
1059  *   Queue status as string or NULL if op_type is invalid.
1060  */
1061 const char*
1062 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status);
1063 
1064 #ifdef __cplusplus
1065 }
1066 #endif
1067 
1068 #endif /* _RTE_BBDEV_H_ */
1069