xref: /dpdk/lib/bbdev/rte_bbdev.h (revision af0785a2447b307965377b62f46a5f39457a85a3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * This API allows an application to discover, configure and use a device to
14  * process operations. An asynchronous API (enqueue, followed by later dequeue)
15  * is used for processing operations.
16  *
17  * The functions in this API are not thread-safe when called on the same
18  * target object (a device, or a queue on a device), with the exception that
19  * one thread can enqueue operations to a queue while another thread dequeues
20  * from the same queue.
21  */
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26 
27 #include <stdint.h>
28 #include <stdbool.h>
29 
30 #include <rte_compat.h>
31 #include <rte_cpuflags.h>
32 
33 #include "rte_bbdev_op.h"
34 
35 #ifndef RTE_BBDEV_MAX_DEVS
36 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
37 #endif
38 
39 /*
40  * Maximum size to be used to manage the enum rte_bbdev_enqueue_status
41  * including padding for future enum insertion.
42  * The enum values must be explicitly kept smaller or equal to this padded maximum size.
43  */
44 #define RTE_BBDEV_ENQ_STATUS_SIZE_MAX 6
45 
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
48 	RTE_BBDEV_UNUSED,
49 	RTE_BBDEV_INITIALIZED
50 };
51 
52 /**
53  * Get the total number of devices that have been successfully initialised.
54  *
55  * @return
56  *   The total number of usable devices.
57  */
58 uint16_t
59 rte_bbdev_count(void);
60 
61 /**
62  * Check if a device is valid.
63  *
64  * @param dev_id
65  *   The identifier of the device.
66  *
67  * @return
68  *   true if device ID is valid and device is attached, false otherwise.
69  */
70 bool
71 rte_bbdev_is_valid(uint16_t dev_id);
72 
73 /**
74  * Get the next enabled device.
75  *
76  * @param dev_id
77  *   The current device
78  *
79  * @return
80  *   - The next device, or
81  *   - RTE_BBDEV_MAX_DEVS if none found
82  */
83 uint16_t
84 rte_bbdev_find_next(uint16_t dev_id);
85 
86 /** Iterate through all enabled devices */
87 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
88 		i < RTE_BBDEV_MAX_DEVS; \
89 		i = rte_bbdev_find_next(i))
90 
91 /**
92  * Setup up device queues.
93  * This function must be called on a device before setting up the queues and
94  * starting the device. It can also be called when a device is in the stopped
95  * state. If any device queues have been configured their configuration will be
96  * cleared by a call to this function.
97  *
98  * @param dev_id
99  *   The identifier of the device.
100  * @param num_queues
101  *   Number of queues to configure on device.
102  * @param socket_id
103  *   ID of a socket which will be used to allocate memory.
104  *
105  * @return
106  *   - 0 on success
107  *   - -ENODEV if dev_id is invalid or the device is corrupted
108  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
109  *   - -EBUSY if the identified device has already started
110  *   - -ENOMEM if unable to allocate memory
111  */
112 int
113 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
114 
115 /**
116  * Enable interrupts.
117  * This function may be called before starting the device to enable the
118  * interrupts if they are available.
119  *
120  * @param dev_id
121  *   The identifier of the device.
122  *
123  * @return
124  *   - 0 on success
125  *   - -ENODEV if dev_id is invalid or the device is corrupted
126  *   - -EBUSY if the identified device has already started
127  *   - -ENOTSUP if the interrupts are not supported by the device
128  */
129 int
130 rte_bbdev_intr_enable(uint16_t dev_id);
131 
132 /** Device queue configuration structure */
133 struct rte_bbdev_queue_conf {
134 	int socket;  /**< NUMA socket used for memory allocation */
135 	uint32_t queue_size;  /**< Size of queue */
136 	uint8_t priority;  /**< Queue priority */
137 	bool deferred_start; /**< Do not start queue when device is started. */
138 	enum rte_bbdev_op_type op_type; /**< Operation type */
139 };
140 
141 /**
142  * Configure a queue on a device.
143  * This function can be called after device configuration, and before starting.
144  * It can also be called when the device or the queue is in the stopped state.
145  *
146  * @param dev_id
147  *   The identifier of the device.
148  * @param queue_id
149  *   The index of the queue.
150  * @param conf
151  *   The queue configuration. If NULL, a default configuration will be used.
152  *
153  * @return
154  *   - 0 on success
155  *   - EINVAL if the identified queue size or priority are invalid
156  *   - EBUSY if the identified queue or its device have already started
157  */
158 int
159 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
160 		const struct rte_bbdev_queue_conf *conf);
161 
162 /**
163  * Start a device.
164  * This is the last step needed before enqueueing operations is possible.
165  *
166  * @param dev_id
167  *   The identifier of the device.
168  *
169  * @return
170  *   - 0 on success
171  *   - negative value on failure - as returned from PMD
172  */
173 int
174 rte_bbdev_start(uint16_t dev_id);
175 
176 /**
177  * Stop a device.
178  * The device can be reconfigured, and restarted after being stopped.
179  *
180  * @param dev_id
181  *   The identifier of the device.
182  *
183  * @return
184  *   - 0 on success
185  */
186 int
187 rte_bbdev_stop(uint16_t dev_id);
188 
189 /**
190  * Close a device.
191  * The device cannot be restarted without reconfiguration!
192  *
193  * @param dev_id
194  *   The identifier of the device.
195  *
196  * @return
197  *   - 0 on success
198  */
199 int
200 rte_bbdev_close(uint16_t dev_id);
201 
202 /**
203  * Start a specified queue on a device.
204  * This is only needed if the queue has been stopped, or if the deferred_start
205  * flag has been set when configuring the queue.
206  *
207  * @param dev_id
208  *   The identifier of the device.
209  * @param queue_id
210  *   The index of the queue.
211  *
212  * @return
213  *   - 0 on success
214  *   - negative value on failure - as returned from PMD
215  */
216 int
217 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
218 
219 /**
220  * Stop a specified queue on a device, to allow re configuration.
221  *
222  * @param dev_id
223  *   The identifier of the device.
224  * @param queue_id
225  *   The index of the queue.
226  *
227  * @return
228  *   - 0 on success
229  *   - negative value on failure - as returned from PMD
230  */
231 int
232 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
233 
234 /**
235  * Flags to indicate the reason why a previous enqueue may not have
236  * consumed all requested operations.
237  * In case of multiple reasons the latter supersedes a previous one.
238  * The related macro RTE_BBDEV_ENQ_STATUS_SIZE_MAX can be used
239  * as an absolute maximum for notably sizing array
240  * while allowing for future enumeration insertion.
241  */
242 enum rte_bbdev_enqueue_status {
243 	RTE_BBDEV_ENQ_STATUS_NONE,             /**< Nothing to report. */
244 	RTE_BBDEV_ENQ_STATUS_QUEUE_FULL,       /**< Not enough room in queue. */
245 	RTE_BBDEV_ENQ_STATUS_RING_FULL,        /**< Not enough room in ring. */
246 	RTE_BBDEV_ENQ_STATUS_INVALID_OP,       /**< Operation was rejected as invalid. */
247 	/* Note: RTE_BBDEV_ENQ_STATUS_SIZE_MAX must be larger or equal to maximum enum value. */
248 };
249 
250 /**
251  * Flags to indicate the status of the device.
252  */
253 enum rte_bbdev_device_status {
254 	RTE_BBDEV_DEV_NOSTATUS,        /**< Nothing being reported. */
255 	RTE_BBDEV_DEV_NOT_SUPPORTED,   /**< Device status is not supported on the PMD. */
256 	RTE_BBDEV_DEV_RESET,           /**< Device in reset and un-configured state. */
257 	RTE_BBDEV_DEV_CONFIGURED,      /**< Device is configured and ready to use. */
258 	RTE_BBDEV_DEV_ACTIVE,          /**< Device is configured and VF is being used. */
259 	RTE_BBDEV_DEV_FATAL_ERR,       /**< Device has hit a fatal uncorrectable error. */
260 	RTE_BBDEV_DEV_RESTART_REQ,     /**< Device requires application to restart. */
261 	RTE_BBDEV_DEV_RECONFIG_REQ,    /**< Device requires application to reconfigure queues. */
262 	RTE_BBDEV_DEV_CORRECT_ERR,     /**< Warning of a correctable error event happened. */
263 };
264 
265 /** Device statistics. */
266 struct rte_bbdev_stats {
267 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
268 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
269 	/** Total error count on operations enqueued */
270 	uint64_t enqueue_err_count;
271 	/** Total error count on operations dequeued */
272 	uint64_t dequeue_err_count;
273 	/** Total warning count on operations enqueued. */
274 	uint64_t enqueue_warn_count;
275 	/** Total warning count on operations dequeued. */
276 	uint64_t dequeue_warn_count;
277 	/** Total enqueue status count based on *rte_bbdev_enqueue_status* enum. */
278 	uint64_t enqueue_status_count[RTE_BBDEV_ENQ_STATUS_SIZE_MAX];
279 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
280 	 *  the enqueue request to its internal queues.
281 	 *  - For a HW device this is the cycles consumed in MMIO write
282 	 *  - For a SW (vdev) device, this is the processing time of the
283 	 *     bbdev operation
284 	 */
285 	uint64_t acc_offload_cycles;
286 };
287 
288 /**
289  * Retrieve the general I/O statistics of a device.
290  *
291  * @param dev_id
292  *   The identifier of the device.
293  * @param stats
294  *   Pointer to structure to where statistics will be copied. On error, this
295  *   location may or may not have been modified.
296  *
297  * @return
298  *   - 0 on success
299  *   - EINVAL if invalid parameter pointer is provided
300  */
301 int
302 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
303 
304 /**
305  * Reset the statistics of a device.
306  *
307  * @param dev_id
308  *   The identifier of the device.
309  * @return
310  *   - 0 on success
311  */
312 int
313 rte_bbdev_stats_reset(uint16_t dev_id);
314 
315 /** Device information supplied by the device's driver */
316 struct rte_bbdev_driver_info {
317 	/** Driver name */
318 	const char *driver_name;
319 
320 	/** Maximum number of queues supported by the device */
321 	unsigned int max_num_queues;
322 	/** Maximum number of queues supported per operation type */
323 	unsigned int num_queues[RTE_BBDEV_OP_TYPE_SIZE_MAX];
324 	/** Priority level supported per operation type */
325 	unsigned int queue_priority[RTE_BBDEV_OP_TYPE_SIZE_MAX];
326 	/** Queue size limit (queue size must also be power of 2) */
327 	uint32_t queue_size_lim;
328 	/** Set if device off-loads operation to hardware  */
329 	bool hardware_accelerated;
330 	/** Max value supported by queue priority for DL */
331 	uint8_t max_dl_queue_priority;
332 	/** Max value supported by queue priority for UL */
333 	uint8_t max_ul_queue_priority;
334 	/** Set if device supports per-queue interrupts */
335 	bool queue_intr_supported;
336 	/** Device Status */
337 	enum rte_bbdev_device_status device_status;
338 	/** HARQ memory available in kB */
339 	uint32_t harq_buffer_size;
340 	/** Minimum alignment of buffers, in bytes */
341 	uint16_t min_alignment;
342 	/** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
343 	 *  for input/output data
344 	 */
345 	uint8_t data_endianness;
346 	/** Default queue configuration used if none is supplied  */
347 	struct rte_bbdev_queue_conf default_queue_conf;
348 	/** Device operation capabilities */
349 	const struct rte_bbdev_op_cap *capabilities;
350 	/** Device cpu_flag requirements */
351 	const enum rte_cpu_flag_t *cpu_flag_reqs;
352 };
353 
354 /** Macro used at end of bbdev PMD list */
355 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
356 	{ RTE_BBDEV_OP_NONE }
357 
358 /**
359  * Device information structure used by an application to discover a devices
360  * capabilities and current configuration
361  */
362 struct rte_bbdev_info {
363 	int socket_id;  /**< NUMA socket that device is on */
364 	const char *dev_name;  /**< Unique device name */
365 	const struct rte_device *device; /**< Device Information */
366 	uint16_t num_queues;  /**< Number of queues currently configured */
367 	bool started;  /**< Set if device is currently started */
368 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
369 };
370 
371 /**
372  * Retrieve information about a device.
373  *
374  * @param dev_id
375  *   The identifier of the device.
376  * @param dev_info
377  *   Pointer to structure to where information will be copied. On error, this
378  *   location may or may not have been modified.
379  *
380  * @return
381  *   - 0 on success
382  *   - EINVAL if invalid parameter pointer is provided
383  */
384 int
385 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
386 
387 /** Queue information */
388 struct rte_bbdev_queue_info {
389 	/** Current device configuration */
390 	struct rte_bbdev_queue_conf conf;
391 	/** Set if queue is currently started */
392 	bool started;
393 };
394 
395 /**
396  * Retrieve information about a specific queue on a device.
397  *
398  * @param dev_id
399  *   The identifier of the device.
400  * @param queue_id
401  *   The index of the queue.
402  * @param queue_info
403  *   Pointer to structure to where information will be copied. On error, this
404  *   location may or may not have been modified.
405  *
406  * @return
407  *   - 0 on success
408  *   - EINVAL if invalid parameter pointer is provided
409  */
410 int
411 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
412 		struct rte_bbdev_queue_info *queue_info);
413 
414 /** @internal The data structure associated with each queue of a device. */
415 struct rte_bbdev_queue_data {
416 	void *queue_private;  /**< Driver-specific per-queue data */
417 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
418 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
419 	enum rte_bbdev_enqueue_status enqueue_status; /**< Enqueue status when op is rejected */
420 	bool started;  /**< Queue state */
421 };
422 
423 /** @internal Enqueue encode operations for processing on queue of a device. */
424 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
425 		struct rte_bbdev_queue_data *q_data,
426 		struct rte_bbdev_enc_op **ops,
427 		uint16_t num);
428 
429 /** @internal Enqueue decode operations for processing on queue of a device. */
430 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
431 		struct rte_bbdev_queue_data *q_data,
432 		struct rte_bbdev_dec_op **ops,
433 		uint16_t num);
434 
435 /** @internal Enqueue FFT operations for processing on queue of a device. */
436 typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)(
437 		struct rte_bbdev_queue_data *q_data,
438 		struct rte_bbdev_fft_op **ops,
439 		uint16_t num);
440 
441 /** @internal Dequeue encode operations from a queue of a device. */
442 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
443 		struct rte_bbdev_queue_data *q_data,
444 		struct rte_bbdev_enc_op **ops, uint16_t num);
445 
446 /** @internal Dequeue decode operations from a queue of a device. */
447 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
448 		struct rte_bbdev_queue_data *q_data,
449 		struct rte_bbdev_dec_op **ops, uint16_t num);
450 
451 /** @internal Dequeue FFT operations from a queue of a device. */
452 typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)(
453 		struct rte_bbdev_queue_data *q_data,
454 		struct rte_bbdev_fft_op **ops, uint16_t num);
455 
456 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
457 
458 /**
459  * @internal The data associated with a device, with no function pointers.
460  * This structure is safe to place in shared memory to be common among
461  * different processes in a multi-process configuration. Drivers can access
462  * these fields, but should never write to them!
463  */
464 struct rte_bbdev_data {
465 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
466 	void *dev_private;  /**< Driver-specific private data */
467 	uint16_t num_queues;  /**< Number of currently configured queues */
468 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
469 	uint16_t dev_id;  /**< Device ID */
470 	int socket_id;  /**< NUMA socket that device is on */
471 	bool started;  /**< Device run-time state */
472 	uint16_t process_cnt;  /** Counter of processes using the device */
473 };
474 
475 /* Forward declarations */
476 struct rte_bbdev_ops;
477 struct rte_bbdev_callback;
478 struct rte_intr_handle;
479 
480 /** Structure to keep track of registered callbacks */
481 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
482 
483 /**
484  * @internal The data structure associated with a device. Drivers can access
485  * these fields, but should only write to the *_ops fields.
486  */
487 struct __rte_cache_aligned rte_bbdev {
488 	/** Enqueue encode function */
489 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
490 	/** Enqueue decode function */
491 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
492 	/** Dequeue encode function */
493 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
494 	/** Dequeue decode function */
495 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
496 	/** Enqueue encode function */
497 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
498 	/** Enqueue decode function */
499 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
500 	/** Dequeue encode function */
501 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
502 	/** Dequeue decode function */
503 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
504 	/** Enqueue FFT function */
505 	rte_bbdev_enqueue_fft_ops_t enqueue_fft_ops;
506 	/** Dequeue FFT function */
507 	rte_bbdev_dequeue_fft_ops_t dequeue_fft_ops;
508 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
509 	struct rte_bbdev_data *data;  /**< Pointer to device data */
510 	enum rte_bbdev_state state;  /**< If device is currently used or not */
511 	struct rte_device *device; /**< Backing device */
512 	/** User application callback for interrupts if present */
513 	struct rte_bbdev_cb_list list_cbs;
514 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
515 };
516 
517 /** @internal array of all devices */
518 extern struct rte_bbdev rte_bbdev_devices[];
519 
520 /**
521  * Enqueue a burst of processed encode operations to a queue of the device.
522  * This functions only enqueues as many operations as currently possible and
523  * does not block until @p num_ops entries in the queue are available.
524  * This function does not provide any error notification to avoid the
525  * corresponding overhead.
526  *
527  * @param dev_id
528  *   The identifier of the device.
529  * @param queue_id
530  *   The index of the queue.
531  * @param ops
532  *   Pointer array containing operations to be enqueued Must have at least
533  *   @p num_ops entries
534  * @param num_ops
535  *   The maximum number of operations to enqueue.
536  *
537  * @return
538  *   The number of operations actually enqueued (this is the number of processed
539  *   entries in the @p ops array).
540  */
541 static inline uint16_t
542 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
543 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
544 {
545 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
546 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
547 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
548 }
549 
550 /**
551  * Enqueue a burst of processed decode operations to a queue of the device.
552  * This functions only enqueues as many operations as currently possible and
553  * does not block until @p num_ops entries in the queue are available.
554  * This function does not provide any error notification to avoid the
555  * corresponding overhead.
556  *
557  * @param dev_id
558  *   The identifier of the device.
559  * @param queue_id
560  *   The index of the queue.
561  * @param ops
562  *   Pointer array containing operations to be enqueued Must have at least
563  *   @p num_ops entries
564  * @param num_ops
565  *   The maximum number of operations to enqueue.
566  *
567  * @return
568  *   The number of operations actually enqueued (this is the number of processed
569  *   entries in the @p ops array).
570  */
571 static inline uint16_t
572 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
573 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
574 {
575 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
576 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
577 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
578 }
579 
580 /**
581  * Enqueue a burst of processed encode operations to a queue of the device.
582  * This functions only enqueues as many operations as currently possible and
583  * does not block until @p num_ops entries in the queue are available.
584  * This function does not provide any error notification to avoid the
585  * corresponding overhead.
586  *
587  * @param dev_id
588  *   The identifier of the device.
589  * @param queue_id
590  *   The index of the queue.
591  * @param ops
592  *   Pointer array containing operations to be enqueued Must have at least
593  *   @p num_ops entries
594  * @param num_ops
595  *   The maximum number of operations to enqueue.
596  *
597  * @return
598  *   The number of operations actually enqueued (this is the number of processed
599  *   entries in the @p ops array).
600  */
601 static inline uint16_t
602 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
603 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
604 {
605 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
606 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
607 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
608 }
609 
610 /**
611  * Enqueue a burst of processed decode operations to a queue of the device.
612  * This functions only enqueues as many operations as currently possible and
613  * does not block until @p num_ops entries in the queue are available.
614  * This function does not provide any error notification to avoid the
615  * corresponding overhead.
616  *
617  * @param dev_id
618  *   The identifier of the device.
619  * @param queue_id
620  *   The index of the queue.
621  * @param ops
622  *   Pointer array containing operations to be enqueued Must have at least
623  *   @p num_ops entries
624  * @param num_ops
625  *   The maximum number of operations to enqueue.
626  *
627  * @return
628  *   The number of operations actually enqueued (this is the number of processed
629  *   entries in the @p ops array).
630  */
631 static inline uint16_t
632 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
633 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
634 {
635 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
636 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
637 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
638 }
639 
640 /**
641  * Enqueue a burst of FFT operations to a queue of the device.
642  * This functions only enqueues as many operations as currently possible and
643  * does not block until @p num_ops entries in the queue are available.
644  * This function does not provide any error notification to avoid the
645  * corresponding overhead.
646  *
647  * @param dev_id
648  *   The identifier of the device.
649  * @param queue_id
650  *   The index of the queue.
651  * @param ops
652  *   Pointer array containing operations to be enqueued.
653  *   Must have at least @p num_ops entries.
654  * @param num_ops
655  *   The maximum number of operations to enqueue.
656  *
657  * @return
658  *   The number of operations actually enqueued.
659  *   (This is the number of processed entries in the @p ops array.)
660  */
661 __rte_experimental
662 static inline uint16_t
663 rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id,
664 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
665 {
666 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
667 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
668 	return dev->enqueue_fft_ops(q_data, ops, num_ops);
669 }
670 
671 /**
672  * Dequeue a burst of processed encode operations from a queue of the device.
673  * This functions returns only the current contents of the queue,
674  * and does not block until @ num_ops is available.
675  * This function does not provide any error notification to avoid the
676  * corresponding overhead.
677  *
678  * @param dev_id
679  *   The identifier of the device.
680  * @param queue_id
681  *   The index of the queue.
682  * @param ops
683  *   Pointer array where operations will be dequeued to.
684  *   Must have at least @p num_ops entries, i.e.
685  *   a pointer to a table of void * pointers (ops) that will be filled.
686  * @param num_ops
687  *   The maximum number of operations to dequeue.
688  *
689  * @return
690  *   The number of operations actually dequeued.
691  *   (This is the number of entries copied into the @p ops array.)
692  */
693 static inline uint16_t
694 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
695 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
696 {
697 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
698 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
699 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
700 }
701 
702 /**
703  * Dequeue a burst of processed decode operations from a queue of the device.
704  * This functions returns only the current contents of the queue, and does not
705  * block until @ num_ops is available.
706  * This function does not provide any error notification to avoid the
707  * corresponding overhead.
708  *
709  * @param dev_id
710  *   The identifier of the device.
711  * @param queue_id
712  *   The index of the queue.
713  * @param ops
714  *   Pointer array where operations will be dequeued to. Must have at least
715  *   @p num_ops entries
716  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
717  * @param num_ops
718  *   The maximum number of operations to dequeue.
719  *
720  * @return
721  *   The number of operations actually dequeued (this is the number of entries
722  *   copied into the @p ops array).
723  */
724 
725 static inline uint16_t
726 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
727 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
728 {
729 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
730 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
731 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
732 }
733 
734 
735 /**
736  * Dequeue a burst of processed encode operations from a queue of the device.
737  * This functions returns only the current contents of the queue, and does not
738  * block until @ num_ops is available.
739  * This function does not provide any error notification to avoid the
740  * corresponding overhead.
741  *
742  * @param dev_id
743  *   The identifier of the device.
744  * @param queue_id
745  *   The index of the queue.
746  * @param ops
747  *   Pointer array where operations will be dequeued to. Must have at least
748  *   @p num_ops entries
749  * @param num_ops
750  *   The maximum number of operations to dequeue.
751  *
752  * @return
753  *   The number of operations actually dequeued (this is the number of entries
754  *   copied into the @p ops array).
755  */
756 static inline uint16_t
757 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
758 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
759 {
760 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
761 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
762 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
763 }
764 
765 /**
766  * Dequeue a burst of processed decode operations from a queue of the device.
767  * This functions returns only the current contents of the queue, and does not
768  * block until @ num_ops is available.
769  * This function does not provide any error notification to avoid the
770  * corresponding overhead.
771  *
772  * @param dev_id
773  *   The identifier of the device.
774  * @param queue_id
775  *   The index of the queue.
776  * @param ops
777  *   Pointer array where operations will be dequeued to. Must have at least
778  *   @p num_ops entries
779  * @param num_ops
780  *   The maximum number of operations to dequeue.
781  *
782  * @return
783  *   The number of operations actually dequeued (this is the number of entries
784  *   copied into the @p ops array).
785  */
786 static inline uint16_t
787 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
788 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
789 {
790 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
791 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
792 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
793 }
794 
795 /**
796  * Dequeue a burst of FFT operations from a queue of the device.
797  * This functions returns only the current contents of the queue, and does not
798  * block until @ num_ops is available.
799  * This function does not provide any error notification to avoid the
800  * corresponding overhead.
801  *
802  * @param dev_id
803  *   The identifier of the device.
804  * @param queue_id
805  *   The index of the queue.
806  * @param ops
807  *   Pointer array where operations will be dequeued to. Must have at least
808  *   @p num_ops entries
809  * @param num_ops
810  *   The maximum number of operations to dequeue.
811  *
812  * @return
813  *   The number of operations actually dequeued (this is the number of entries
814  *   copied into the @p ops array).
815  */
816 __rte_experimental
817 static inline uint16_t
818 rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id,
819 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
820 {
821 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
822 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
823 	return dev->dequeue_fft_ops(q_data, ops, num_ops);
824 }
825 
826 /** Definitions of device event types */
827 enum rte_bbdev_event_type {
828 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
829 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
830 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
831 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
832 };
833 
834 /**
835  * Typedef for application callback function registered by application
836  * software for notification of device events
837  *
838  * @param dev_id
839  *   Device identifier
840  * @param event
841  *   Device event to register for notification of.
842  * @param cb_arg
843  *   User specified parameter to be passed to user's callback function.
844  * @param ret_param
845  *   To pass data back to user application.
846  */
847 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
848 		enum rte_bbdev_event_type event, void *cb_arg,
849 		void *ret_param);
850 
851 /**
852  * Register a callback function for specific device id. Multiple callbacks can
853  * be added and will be called in the order they are added when an event is
854  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
855  *
856  * @param dev_id
857  *   Device id.
858  * @param event
859  *   The event that the callback will be registered for.
860  * @param cb_fn
861  *   User supplied callback function to be called.
862  * @param cb_arg
863  *   Pointer to parameter that will be passed to the callback.
864  *
865  * @return
866  *   Zero on success, negative value on failure.
867  */
868 int
869 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
870 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
871 
872 /**
873  * Unregister a callback function for specific device id.
874  *
875  * @param dev_id
876  *   The device identifier.
877  * @param event
878  *   The event that the callback will be unregistered for.
879  * @param cb_fn
880  *   User supplied callback function to be unregistered.
881  * @param cb_arg
882  *   Pointer to the parameter supplied when registering the callback.
883  *   (void *)-1 means to remove all registered callbacks with the specified
884  *   function address.
885  *
886  * @return
887  *   - 0 on success
888  *   - EINVAL if invalid parameter pointer is provided
889  *   - EAGAIN if the provided callback pointer does not exist
890  */
891 int
892 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
893 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
894 
895 /**
896  * Enable a one-shot interrupt on the next operation enqueued to a particular
897  * queue. The interrupt will be triggered when the operation is ready to be
898  * dequeued. To handle the interrupt, an epoll file descriptor must be
899  * registered using rte_bbdev_queue_intr_ctl(), and then an application
900  * thread/lcore can wait for the interrupt using rte_epoll_wait().
901  *
902  * @param dev_id
903  *   The device identifier.
904  * @param queue_id
905  *   The index of the queue.
906  *
907  * @return
908  *   - 0 on success
909  *   - negative value on failure - as returned from PMD
910  */
911 int
912 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
913 
914 /**
915  * Disable a one-shot interrupt on the next operation enqueued to a particular
916  * queue (if it has been enabled).
917  *
918  * @param dev_id
919  *   The device identifier.
920  * @param queue_id
921  *   The index of the queue.
922  *
923  * @return
924  *   - 0 on success
925  *   - negative value on failure - as returned from PMD
926  */
927 int
928 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
929 
930 /**
931  * Control interface for per-queue interrupts.
932  *
933  * @param dev_id
934  *   The device identifier.
935  * @param queue_id
936  *   The index of the queue.
937  * @param epfd
938  *   Epoll file descriptor that will be associated with the interrupt source.
939  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
940  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
941  *   be used when calling rte_epoll_wait()).
942  * @param op
943  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
944  *   RTE_INTR_EVENT_DEL.
945  * @param data
946  *   User context, that will be returned in the epdata.data field of the
947  *   rte_epoll_event structure filled in by rte_epoll_wait().
948  *
949  * @return
950  *   - 0 on success
951  *   - ENOTSUP if interrupts are not supported by the identified device
952  *   - negative value on failure - as returned from PMD
953  */
954 int
955 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
956 		void *data);
957 
958 /**
959  * Convert device status from enum to string.
960  *
961  * @param status
962  *   Device status as enum.
963  *
964  * @returns
965  *   Device status as string or NULL if invalid.
966  *
967  */
968 __rte_experimental
969 const char*
970 rte_bbdev_device_status_str(enum rte_bbdev_device_status status);
971 
972 /**
973  * Convert queue status from enum to string.
974  *
975  * @param status
976  *   Queue status as enum.
977  *
978  * @returns
979  *   Queue status as string or NULL if op_type is invalid.
980  *
981  */
982 __rte_experimental
983 const char*
984 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status);
985 
986 #ifdef __cplusplus
987 }
988 #endif
989 
990 #endif /* _RTE_BBDEV_H_ */
991