xref: /dpdk/lib/bbdev/rte_bbdev.h (revision f9dfb59edbccae50e7c5508348aa2b4b84413048)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * This API allows an application to discover, configure and use a device to
14  * process operations. An asynchronous API (enqueue, followed by later dequeue)
15  * is used for processing operations.
16  *
17  * The functions in this API are not thread-safe when called on the same
18  * target object (a device, or a queue on a device), with the exception that
19  * one thread can enqueue operations to a queue while another thread dequeues
20  * from the same queue.
21  */
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26 
27 #include <stdint.h>
28 #include <stdbool.h>
29 
30 #include <rte_cpuflags.h>
31 
32 #include "rte_bbdev_op.h"
33 
34 #ifndef RTE_BBDEV_MAX_DEVS
35 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
36 #endif
37 
38 /*
39  * Maximum size to be used to manage the enum rte_bbdev_enqueue_status
40  * including padding for future enum insertion.
41  * The enum values must be explicitly kept smaller or equal to this padded maximum size.
42  */
43 #define RTE_BBDEV_ENQ_STATUS_SIZE_MAX 6
44 
45 /** Flags indicate current state of BBDEV device */
46 enum rte_bbdev_state {
47 	RTE_BBDEV_UNUSED,
48 	RTE_BBDEV_INITIALIZED
49 };
50 
51 /**
52  * Get the total number of devices that have been successfully initialised.
53  *
54  * @return
55  *   The total number of usable devices.
56  */
57 uint16_t
58 rte_bbdev_count(void);
59 
60 /**
61  * Check if a device is valid.
62  *
63  * @param dev_id
64  *   The identifier of the device.
65  *
66  * @return
67  *   true if device ID is valid and device is attached, false otherwise.
68  */
69 bool
70 rte_bbdev_is_valid(uint16_t dev_id);
71 
72 /**
73  * Get the next enabled device.
74  *
75  * @param dev_id
76  *   The current device
77  *
78  * @return
79  *   - The next device, or
80  *   - RTE_BBDEV_MAX_DEVS if none found
81  */
82 uint16_t
83 rte_bbdev_find_next(uint16_t dev_id);
84 
85 /** Iterate through all enabled devices */
86 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
87 		i < RTE_BBDEV_MAX_DEVS; \
88 		i = rte_bbdev_find_next(i))
89 
90 /**
91  * Setup up device queues.
92  * This function must be called on a device before setting up the queues and
93  * starting the device. It can also be called when a device is in the stopped
94  * state. If any device queues have been configured their configuration will be
95  * cleared by a call to this function.
96  *
97  * @param dev_id
98  *   The identifier of the device.
99  * @param num_queues
100  *   Number of queues to configure on device.
101  * @param socket_id
102  *   ID of a socket which will be used to allocate memory.
103  *
104  * @return
105  *   - 0 on success
106  *   - -ENODEV if dev_id is invalid or the device is corrupted
107  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
108  *   - -EBUSY if the identified device has already started
109  *   - -ENOMEM if unable to allocate memory
110  */
111 int
112 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
113 
114 /**
115  * Enable interrupts.
116  * This function may be called before starting the device to enable the
117  * interrupts if they are available.
118  *
119  * @param dev_id
120  *   The identifier of the device.
121  *
122  * @return
123  *   - 0 on success
124  *   - -ENODEV if dev_id is invalid or the device is corrupted
125  *   - -EBUSY if the identified device has already started
126  *   - -ENOTSUP if the interrupts are not supported by the device
127  */
128 int
129 rte_bbdev_intr_enable(uint16_t dev_id);
130 
131 /** Device queue configuration structure */
132 struct rte_bbdev_queue_conf {
133 	int socket;  /**< NUMA socket used for memory allocation */
134 	uint32_t queue_size;  /**< Size of queue */
135 	uint8_t priority;  /**< Queue priority */
136 	bool deferred_start; /**< Do not start queue when device is started. */
137 	enum rte_bbdev_op_type op_type; /**< Operation type */
138 };
139 
140 /**
141  * Configure a queue on a device.
142  * This function can be called after device configuration, and before starting.
143  * It can also be called when the device or the queue is in the stopped state.
144  *
145  * @param dev_id
146  *   The identifier of the device.
147  * @param queue_id
148  *   The index of the queue.
149  * @param conf
150  *   The queue configuration. If NULL, a default configuration will be used.
151  *
152  * @return
153  *   - 0 on success
154  *   - EINVAL if the identified queue size or priority are invalid
155  *   - EBUSY if the identified queue or its device have already started
156  */
157 int
158 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
159 		const struct rte_bbdev_queue_conf *conf);
160 
161 /**
162  * Start a device.
163  * This is the last step needed before enqueueing operations is possible.
164  *
165  * @param dev_id
166  *   The identifier of the device.
167  *
168  * @return
169  *   - 0 on success
170  *   - negative value on failure - as returned from PMD
171  */
172 int
173 rte_bbdev_start(uint16_t dev_id);
174 
175 /**
176  * Stop a device.
177  * The device can be reconfigured, and restarted after being stopped.
178  *
179  * @param dev_id
180  *   The identifier of the device.
181  *
182  * @return
183  *   - 0 on success
184  */
185 int
186 rte_bbdev_stop(uint16_t dev_id);
187 
188 /**
189  * Close a device.
190  * The device cannot be restarted without reconfiguration!
191  *
192  * @param dev_id
193  *   The identifier of the device.
194  *
195  * @return
196  *   - 0 on success
197  */
198 int
199 rte_bbdev_close(uint16_t dev_id);
200 
201 /**
202  * Start a specified queue on a device.
203  * This is only needed if the queue has been stopped, or if the deferred_start
204  * flag has been set when configuring the queue.
205  *
206  * @param dev_id
207  *   The identifier of the device.
208  * @param queue_id
209  *   The index of the queue.
210  *
211  * @return
212  *   - 0 on success
213  *   - negative value on failure - as returned from PMD
214  */
215 int
216 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
217 
218 /**
219  * Stop a specified queue on a device, to allow re configuration.
220  *
221  * @param dev_id
222  *   The identifier of the device.
223  * @param queue_id
224  *   The index of the queue.
225  *
226  * @return
227  *   - 0 on success
228  *   - negative value on failure - as returned from PMD
229  */
230 int
231 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
232 
233 /**
234  * Flags to indicate the reason why a previous enqueue may not have
235  * consumed all requested operations.
236  * In case of multiple reasons the latter supersedes a previous one.
237  * The related macro RTE_BBDEV_ENQ_STATUS_SIZE_MAX can be used
238  * as an absolute maximum for notably sizing array
239  * while allowing for future enumeration insertion.
240  */
241 enum rte_bbdev_enqueue_status {
242 	RTE_BBDEV_ENQ_STATUS_NONE,             /**< Nothing to report. */
243 	RTE_BBDEV_ENQ_STATUS_QUEUE_FULL,       /**< Not enough room in queue. */
244 	RTE_BBDEV_ENQ_STATUS_RING_FULL,        /**< Not enough room in ring. */
245 	RTE_BBDEV_ENQ_STATUS_INVALID_OP,       /**< Operation was rejected as invalid. */
246 	/* Note: RTE_BBDEV_ENQ_STATUS_SIZE_MAX must be larger or equal to maximum enum value. */
247 };
248 
249 /**
250  * Flags to indicate the status of the device.
251  */
252 enum rte_bbdev_device_status {
253 	RTE_BBDEV_DEV_NOSTATUS,        /**< Nothing being reported. */
254 	RTE_BBDEV_DEV_NOT_SUPPORTED,   /**< Device status is not supported on the PMD. */
255 	RTE_BBDEV_DEV_RESET,           /**< Device in reset and un-configured state. */
256 	RTE_BBDEV_DEV_CONFIGURED,      /**< Device is configured and ready to use. */
257 	RTE_BBDEV_DEV_ACTIVE,          /**< Device is configured and VF is being used. */
258 	RTE_BBDEV_DEV_FATAL_ERR,       /**< Device has hit a fatal uncorrectable error. */
259 	RTE_BBDEV_DEV_RESTART_REQ,     /**< Device requires application to restart. */
260 	RTE_BBDEV_DEV_RECONFIG_REQ,    /**< Device requires application to reconfigure queues. */
261 	RTE_BBDEV_DEV_CORRECT_ERR,     /**< Warning of a correctable error event happened. */
262 };
263 
264 /** Device statistics. */
265 struct rte_bbdev_stats {
266 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
267 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
268 	/** Total error count on operations enqueued */
269 	uint64_t enqueue_err_count;
270 	/** Total error count on operations dequeued */
271 	uint64_t dequeue_err_count;
272 	/** Total warning count on operations enqueued. */
273 	uint64_t enqueue_warn_count;
274 	/** Total warning count on operations dequeued. */
275 	uint64_t dequeue_warn_count;
276 	/** Total enqueue status count based on *rte_bbdev_enqueue_status* enum. */
277 	uint64_t enqueue_status_count[RTE_BBDEV_ENQ_STATUS_SIZE_MAX];
278 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
279 	 *  the enqueue request to its internal queues.
280 	 *  - For a HW device this is the cycles consumed in MMIO write
281 	 *  - For a SW (vdev) device, this is the processing time of the
282 	 *     bbdev operation
283 	 */
284 	uint64_t acc_offload_cycles;
285 };
286 
287 /**
288  * Retrieve the general I/O statistics of a device.
289  *
290  * @param dev_id
291  *   The identifier of the device.
292  * @param stats
293  *   Pointer to structure to where statistics will be copied. On error, this
294  *   location may or may not have been modified.
295  *
296  * @return
297  *   - 0 on success
298  *   - EINVAL if invalid parameter pointer is provided
299  */
300 int
301 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
302 
303 /**
304  * Reset the statistics of a device.
305  *
306  * @param dev_id
307  *   The identifier of the device.
308  * @return
309  *   - 0 on success
310  */
311 int
312 rte_bbdev_stats_reset(uint16_t dev_id);
313 
314 /** Device information supplied by the device's driver */
315 struct rte_bbdev_driver_info {
316 	/** Driver name */
317 	const char *driver_name;
318 
319 	/** Maximum number of queues supported by the device */
320 	unsigned int max_num_queues;
321 	/** Maximum number of queues supported per operation type */
322 	unsigned int num_queues[RTE_BBDEV_OP_TYPE_SIZE_MAX];
323 	/** Priority level supported per operation type */
324 	unsigned int queue_priority[RTE_BBDEV_OP_TYPE_SIZE_MAX];
325 	/** Queue size limit (queue size must also be power of 2) */
326 	uint32_t queue_size_lim;
327 	/** Set if device off-loads operation to hardware  */
328 	bool hardware_accelerated;
329 	/** Max value supported by queue priority for DL */
330 	uint8_t max_dl_queue_priority;
331 	/** Max value supported by queue priority for UL */
332 	uint8_t max_ul_queue_priority;
333 	/** Set if device supports per-queue interrupts */
334 	bool queue_intr_supported;
335 	/** Device Status */
336 	enum rte_bbdev_device_status device_status;
337 	/** HARQ memory available in kB */
338 	uint32_t harq_buffer_size;
339 	/** Minimum alignment of buffers, in bytes */
340 	uint16_t min_alignment;
341 	/** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
342 	 *  for input/output data
343 	 */
344 	uint8_t data_endianness;
345 	/** Default queue configuration used if none is supplied  */
346 	struct rte_bbdev_queue_conf default_queue_conf;
347 	/** Device operation capabilities */
348 	const struct rte_bbdev_op_cap *capabilities;
349 	/** Device cpu_flag requirements */
350 	const enum rte_cpu_flag_t *cpu_flag_reqs;
351 };
352 
353 /** Macro used at end of bbdev PMD list */
354 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
355 	{ RTE_BBDEV_OP_NONE }
356 
357 /**
358  * Device information structure used by an application to discover a devices
359  * capabilities and current configuration
360  */
361 struct rte_bbdev_info {
362 	int socket_id;  /**< NUMA socket that device is on */
363 	const char *dev_name;  /**< Unique device name */
364 	const struct rte_device *device; /**< Device Information */
365 	uint16_t num_queues;  /**< Number of queues currently configured */
366 	bool started;  /**< Set if device is currently started */
367 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
368 };
369 
370 /**
371  * Retrieve information about a device.
372  *
373  * @param dev_id
374  *   The identifier of the device.
375  * @param dev_info
376  *   Pointer to structure to where information will be copied. On error, this
377  *   location may or may not have been modified.
378  *
379  * @return
380  *   - 0 on success
381  *   - EINVAL if invalid parameter pointer is provided
382  */
383 int
384 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
385 
386 /** Queue information */
387 struct rte_bbdev_queue_info {
388 	/** Current device configuration */
389 	struct rte_bbdev_queue_conf conf;
390 	/** Set if queue is currently started */
391 	bool started;
392 };
393 
394 /**
395  * Retrieve information about a specific queue on a device.
396  *
397  * @param dev_id
398  *   The identifier of the device.
399  * @param queue_id
400  *   The index of the queue.
401  * @param queue_info
402  *   Pointer to structure to where information will be copied. On error, this
403  *   location may or may not have been modified.
404  *
405  * @return
406  *   - 0 on success
407  *   - EINVAL if invalid parameter pointer is provided
408  */
409 int
410 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
411 		struct rte_bbdev_queue_info *queue_info);
412 
413 /** @internal The data structure associated with each queue of a device. */
414 struct rte_bbdev_queue_data {
415 	void *queue_private;  /**< Driver-specific per-queue data */
416 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
417 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
418 	enum rte_bbdev_enqueue_status enqueue_status; /**< Enqueue status when op is rejected */
419 	bool started;  /**< Queue state */
420 };
421 
422 /** @internal Enqueue encode operations for processing on queue of a device. */
423 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
424 		struct rte_bbdev_queue_data *q_data,
425 		struct rte_bbdev_enc_op **ops,
426 		uint16_t num);
427 
428 /** @internal Enqueue decode operations for processing on queue of a device. */
429 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
430 		struct rte_bbdev_queue_data *q_data,
431 		struct rte_bbdev_dec_op **ops,
432 		uint16_t num);
433 
434 /** @internal Enqueue FFT operations for processing on queue of a device. */
435 typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)(
436 		struct rte_bbdev_queue_data *q_data,
437 		struct rte_bbdev_fft_op **ops,
438 		uint16_t num);
439 
440 /** @internal Dequeue encode operations from a queue of a device. */
441 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
442 		struct rte_bbdev_queue_data *q_data,
443 		struct rte_bbdev_enc_op **ops, uint16_t num);
444 
445 /** @internal Dequeue decode operations from a queue of a device. */
446 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
447 		struct rte_bbdev_queue_data *q_data,
448 		struct rte_bbdev_dec_op **ops, uint16_t num);
449 
450 /** @internal Dequeue FFT operations from a queue of a device. */
451 typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)(
452 		struct rte_bbdev_queue_data *q_data,
453 		struct rte_bbdev_fft_op **ops, uint16_t num);
454 
455 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
456 
457 /**
458  * @internal The data associated with a device, with no function pointers.
459  * This structure is safe to place in shared memory to be common among
460  * different processes in a multi-process configuration. Drivers can access
461  * these fields, but should never write to them!
462  */
463 struct rte_bbdev_data {
464 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
465 	void *dev_private;  /**< Driver-specific private data */
466 	uint16_t num_queues;  /**< Number of currently configured queues */
467 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
468 	uint16_t dev_id;  /**< Device ID */
469 	int socket_id;  /**< NUMA socket that device is on */
470 	bool started;  /**< Device run-time state */
471 	uint16_t process_cnt;  /** Counter of processes using the device */
472 };
473 
474 /* Forward declarations */
475 struct rte_bbdev_ops;
476 struct rte_bbdev_callback;
477 struct rte_intr_handle;
478 
479 /** Structure to keep track of registered callbacks */
480 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
481 
482 /**
483  * @internal The data structure associated with a device. Drivers can access
484  * these fields, but should only write to the *_ops fields.
485  */
486 struct __rte_cache_aligned rte_bbdev {
487 	/** Enqueue encode function */
488 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
489 	/** Enqueue decode function */
490 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
491 	/** Dequeue encode function */
492 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
493 	/** Dequeue decode function */
494 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
495 	/** Enqueue encode function */
496 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
497 	/** Enqueue decode function */
498 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
499 	/** Dequeue encode function */
500 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
501 	/** Dequeue decode function */
502 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
503 	/** Enqueue FFT function */
504 	rte_bbdev_enqueue_fft_ops_t enqueue_fft_ops;
505 	/** Dequeue FFT function */
506 	rte_bbdev_dequeue_fft_ops_t dequeue_fft_ops;
507 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
508 	struct rte_bbdev_data *data;  /**< Pointer to device data */
509 	enum rte_bbdev_state state;  /**< If device is currently used or not */
510 	struct rte_device *device; /**< Backing device */
511 	/** User application callback for interrupts if present */
512 	struct rte_bbdev_cb_list list_cbs;
513 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
514 };
515 
516 /** @internal array of all devices */
517 extern struct rte_bbdev rte_bbdev_devices[];
518 
519 /**
520  * Enqueue a burst of processed encode operations to a queue of the device.
521  * This functions only enqueues as many operations as currently possible and
522  * does not block until @p num_ops entries in the queue are available.
523  * This function does not provide any error notification to avoid the
524  * corresponding overhead.
525  *
526  * @param dev_id
527  *   The identifier of the device.
528  * @param queue_id
529  *   The index of the queue.
530  * @param ops
531  *   Pointer array containing operations to be enqueued Must have at least
532  *   @p num_ops entries
533  * @param num_ops
534  *   The maximum number of operations to enqueue.
535  *
536  * @return
537  *   The number of operations actually enqueued (this is the number of processed
538  *   entries in the @p ops array).
539  */
540 static inline uint16_t
541 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
542 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
543 {
544 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
545 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
546 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
547 }
548 
549 /**
550  * Enqueue a burst of processed decode operations to a queue of the device.
551  * This functions only enqueues as many operations as currently possible and
552  * does not block until @p num_ops entries in the queue are available.
553  * This function does not provide any error notification to avoid the
554  * corresponding overhead.
555  *
556  * @param dev_id
557  *   The identifier of the device.
558  * @param queue_id
559  *   The index of the queue.
560  * @param ops
561  *   Pointer array containing operations to be enqueued Must have at least
562  *   @p num_ops entries
563  * @param num_ops
564  *   The maximum number of operations to enqueue.
565  *
566  * @return
567  *   The number of operations actually enqueued (this is the number of processed
568  *   entries in the @p ops array).
569  */
570 static inline uint16_t
571 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
572 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
573 {
574 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
575 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
576 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
577 }
578 
579 /**
580  * Enqueue a burst of processed encode operations to a queue of the device.
581  * This functions only enqueues as many operations as currently possible and
582  * does not block until @p num_ops entries in the queue are available.
583  * This function does not provide any error notification to avoid the
584  * corresponding overhead.
585  *
586  * @param dev_id
587  *   The identifier of the device.
588  * @param queue_id
589  *   The index of the queue.
590  * @param ops
591  *   Pointer array containing operations to be enqueued Must have at least
592  *   @p num_ops entries
593  * @param num_ops
594  *   The maximum number of operations to enqueue.
595  *
596  * @return
597  *   The number of operations actually enqueued (this is the number of processed
598  *   entries in the @p ops array).
599  */
600 static inline uint16_t
601 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
602 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
603 {
604 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
605 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
606 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
607 }
608 
609 /**
610  * Enqueue a burst of processed decode operations to a queue of the device.
611  * This functions only enqueues as many operations as currently possible and
612  * does not block until @p num_ops entries in the queue are available.
613  * This function does not provide any error notification to avoid the
614  * corresponding overhead.
615  *
616  * @param dev_id
617  *   The identifier of the device.
618  * @param queue_id
619  *   The index of the queue.
620  * @param ops
621  *   Pointer array containing operations to be enqueued Must have at least
622  *   @p num_ops entries
623  * @param num_ops
624  *   The maximum number of operations to enqueue.
625  *
626  * @return
627  *   The number of operations actually enqueued (this is the number of processed
628  *   entries in the @p ops array).
629  */
630 static inline uint16_t
631 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
632 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
633 {
634 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
635 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
636 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
637 }
638 
639 /**
640  * Enqueue a burst of FFT operations to a queue of the device.
641  * This functions only enqueues as many operations as currently possible and
642  * does not block until @p num_ops entries in the queue are available.
643  * This function does not provide any error notification to avoid the
644  * corresponding overhead.
645  *
646  * @param dev_id
647  *   The identifier of the device.
648  * @param queue_id
649  *   The index of the queue.
650  * @param ops
651  *   Pointer array containing operations to be enqueued.
652  *   Must have at least @p num_ops entries.
653  * @param num_ops
654  *   The maximum number of operations to enqueue.
655  *
656  * @return
657  *   The number of operations actually enqueued.
658  *   (This is the number of processed entries in the @p ops array.)
659  */
660 __rte_experimental
661 static inline uint16_t
662 rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id,
663 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
664 {
665 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
666 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
667 	return dev->enqueue_fft_ops(q_data, ops, num_ops);
668 }
669 
670 /**
671  * Dequeue a burst of processed encode operations from a queue of the device.
672  * This functions returns only the current contents of the queue,
673  * and does not block until @ num_ops is available.
674  * This function does not provide any error notification to avoid the
675  * corresponding overhead.
676  *
677  * @param dev_id
678  *   The identifier of the device.
679  * @param queue_id
680  *   The index of the queue.
681  * @param ops
682  *   Pointer array where operations will be dequeued to.
683  *   Must have at least @p num_ops entries, i.e.
684  *   a pointer to a table of void * pointers (ops) that will be filled.
685  * @param num_ops
686  *   The maximum number of operations to dequeue.
687  *
688  * @return
689  *   The number of operations actually dequeued.
690  *   (This is the number of entries copied into the @p ops array.)
691  */
692 static inline uint16_t
693 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
694 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
695 {
696 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
697 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
698 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
699 }
700 
701 /**
702  * Dequeue a burst of processed decode operations from a queue of the device.
703  * This functions returns only the current contents of the queue, and does not
704  * block until @ num_ops is available.
705  * This function does not provide any error notification to avoid the
706  * corresponding overhead.
707  *
708  * @param dev_id
709  *   The identifier of the device.
710  * @param queue_id
711  *   The index of the queue.
712  * @param ops
713  *   Pointer array where operations will be dequeued to. Must have at least
714  *   @p num_ops entries
715  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
716  * @param num_ops
717  *   The maximum number of operations to dequeue.
718  *
719  * @return
720  *   The number of operations actually dequeued (this is the number of entries
721  *   copied into the @p ops array).
722  */
723 
724 static inline uint16_t
725 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
726 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
727 {
728 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
729 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
730 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
731 }
732 
733 
734 /**
735  * Dequeue a burst of processed encode operations from a queue of the device.
736  * This functions returns only the current contents of the queue, and does not
737  * block until @ num_ops is available.
738  * This function does not provide any error notification to avoid the
739  * corresponding overhead.
740  *
741  * @param dev_id
742  *   The identifier of the device.
743  * @param queue_id
744  *   The index of the queue.
745  * @param ops
746  *   Pointer array where operations will be dequeued to. Must have at least
747  *   @p num_ops entries
748  * @param num_ops
749  *   The maximum number of operations to dequeue.
750  *
751  * @return
752  *   The number of operations actually dequeued (this is the number of entries
753  *   copied into the @p ops array).
754  */
755 static inline uint16_t
756 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
757 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
758 {
759 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
760 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
761 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
762 }
763 
764 /**
765  * Dequeue a burst of processed decode operations from a queue of the device.
766  * This functions returns only the current contents of the queue, and does not
767  * block until @ num_ops is available.
768  * This function does not provide any error notification to avoid the
769  * corresponding overhead.
770  *
771  * @param dev_id
772  *   The identifier of the device.
773  * @param queue_id
774  *   The index of the queue.
775  * @param ops
776  *   Pointer array where operations will be dequeued to. Must have at least
777  *   @p num_ops entries
778  * @param num_ops
779  *   The maximum number of operations to dequeue.
780  *
781  * @return
782  *   The number of operations actually dequeued (this is the number of entries
783  *   copied into the @p ops array).
784  */
785 static inline uint16_t
786 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
787 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
788 {
789 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
790 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
791 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
792 }
793 
794 /**
795  * Dequeue a burst of FFT operations from a queue of the device.
796  * This functions returns only the current contents of the queue, and does not
797  * block until @ num_ops is available.
798  * This function does not provide any error notification to avoid the
799  * corresponding overhead.
800  *
801  * @param dev_id
802  *   The identifier of the device.
803  * @param queue_id
804  *   The index of the queue.
805  * @param ops
806  *   Pointer array where operations will be dequeued to. Must have at least
807  *   @p num_ops entries
808  * @param num_ops
809  *   The maximum number of operations to dequeue.
810  *
811  * @return
812  *   The number of operations actually dequeued (this is the number of entries
813  *   copied into the @p ops array).
814  */
815 __rte_experimental
816 static inline uint16_t
817 rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id,
818 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
819 {
820 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
821 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
822 	return dev->dequeue_fft_ops(q_data, ops, num_ops);
823 }
824 
825 /** Definitions of device event types */
826 enum rte_bbdev_event_type {
827 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
828 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
829 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
830 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
831 };
832 
833 /**
834  * Typedef for application callback function registered by application
835  * software for notification of device events
836  *
837  * @param dev_id
838  *   Device identifier
839  * @param event
840  *   Device event to register for notification of.
841  * @param cb_arg
842  *   User specified parameter to be passed to user's callback function.
843  * @param ret_param
844  *   To pass data back to user application.
845  */
846 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
847 		enum rte_bbdev_event_type event, void *cb_arg,
848 		void *ret_param);
849 
850 /**
851  * Register a callback function for specific device id. Multiple callbacks can
852  * be added and will be called in the order they are added when an event is
853  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
854  *
855  * @param dev_id
856  *   Device id.
857  * @param event
858  *   The event that the callback will be registered for.
859  * @param cb_fn
860  *   User supplied callback function to be called.
861  * @param cb_arg
862  *   Pointer to parameter that will be passed to the callback.
863  *
864  * @return
865  *   Zero on success, negative value on failure.
866  */
867 int
868 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
869 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
870 
871 /**
872  * Unregister a callback function for specific device id.
873  *
874  * @param dev_id
875  *   The device identifier.
876  * @param event
877  *   The event that the callback will be unregistered for.
878  * @param cb_fn
879  *   User supplied callback function to be unregistered.
880  * @param cb_arg
881  *   Pointer to the parameter supplied when registering the callback.
882  *   (void *)-1 means to remove all registered callbacks with the specified
883  *   function address.
884  *
885  * @return
886  *   - 0 on success
887  *   - EINVAL if invalid parameter pointer is provided
888  *   - EAGAIN if the provided callback pointer does not exist
889  */
890 int
891 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
892 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
893 
894 /**
895  * Enable a one-shot interrupt on the next operation enqueued to a particular
896  * queue. The interrupt will be triggered when the operation is ready to be
897  * dequeued. To handle the interrupt, an epoll file descriptor must be
898  * registered using rte_bbdev_queue_intr_ctl(), and then an application
899  * thread/lcore can wait for the interrupt using rte_epoll_wait().
900  *
901  * @param dev_id
902  *   The device identifier.
903  * @param queue_id
904  *   The index of the queue.
905  *
906  * @return
907  *   - 0 on success
908  *   - negative value on failure - as returned from PMD
909  */
910 int
911 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
912 
913 /**
914  * Disable a one-shot interrupt on the next operation enqueued to a particular
915  * queue (if it has been enabled).
916  *
917  * @param dev_id
918  *   The device identifier.
919  * @param queue_id
920  *   The index of the queue.
921  *
922  * @return
923  *   - 0 on success
924  *   - negative value on failure - as returned from PMD
925  */
926 int
927 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
928 
929 /**
930  * Control interface for per-queue interrupts.
931  *
932  * @param dev_id
933  *   The device identifier.
934  * @param queue_id
935  *   The index of the queue.
936  * @param epfd
937  *   Epoll file descriptor that will be associated with the interrupt source.
938  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
939  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
940  *   be used when calling rte_epoll_wait()).
941  * @param op
942  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
943  *   RTE_INTR_EVENT_DEL.
944  * @param data
945  *   User context, that will be returned in the epdata.data field of the
946  *   rte_epoll_event structure filled in by rte_epoll_wait().
947  *
948  * @return
949  *   - 0 on success
950  *   - ENOTSUP if interrupts are not supported by the identified device
951  *   - negative value on failure - as returned from PMD
952  */
953 int
954 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
955 		void *data);
956 
957 /**
958  * Convert device status from enum to string.
959  *
960  * @param status
961  *   Device status as enum.
962  *
963  * @returns
964  *   Device status as string or NULL if invalid.
965  *
966  */
967 __rte_experimental
968 const char*
969 rte_bbdev_device_status_str(enum rte_bbdev_device_status status);
970 
971 /**
972  * Convert queue status from enum to string.
973  *
974  * @param status
975  *   Queue status as enum.
976  *
977  * @returns
978  *   Queue status as string or NULL if op_type is invalid.
979  *
980  */
981 __rte_experimental
982 const char*
983 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status);
984 
985 #ifdef __cplusplus
986 }
987 #endif
988 
989 #endif /* _RTE_BBDEV_H_ */
990