xref: /dpdk/lib/bbdev/rte_bbdev.h (revision 353e3639d458f5cdaf3d938aade25579fa490b1b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * This API allows an application to discover, configure and use a device to
14  * process operations. An asynchronous API (enqueue, followed by later dequeue)
15  * is used for processing operations.
16  *
17  * The functions in this API are not thread-safe when called on the same
18  * target object (a device, or a queue on a device), with the exception that
19  * one thread can enqueue operations to a queue while another thread dequeues
20  * from the same queue.
21  */
22 
23 #include <stdint.h>
24 #include <stdbool.h>
25 
26 #include <rte_compat.h>
27 #include <rte_cpuflags.h>
28 
29 #include "rte_bbdev_op.h"
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #ifndef RTE_BBDEV_MAX_DEVS
36 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
37 #endif
38 
39 /*
40  * Maximum size to be used to manage the enum rte_bbdev_enqueue_status
41  * including padding for future enum insertion.
42  * The enum values must be explicitly kept smaller or equal to this padded maximum size.
43  */
44 #define RTE_BBDEV_ENQ_STATUS_SIZE_MAX 6
45 
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
48 	RTE_BBDEV_UNUSED,
49 	RTE_BBDEV_INITIALIZED
50 };
51 
52 /**
53  * Get the total number of devices that have been successfully initialised.
54  *
55  * @return
56  *   The total number of usable devices.
57  */
58 uint16_t
59 rte_bbdev_count(void);
60 
61 /**
62  * Check if a device is valid.
63  *
64  * @param dev_id
65  *   The identifier of the device.
66  *
67  * @return
68  *   true if device ID is valid and device is attached, false otherwise.
69  */
70 bool
71 rte_bbdev_is_valid(uint16_t dev_id);
72 
73 /**
74  * Get the next enabled device.
75  *
76  * @param dev_id
77  *   The current device
78  *
79  * @return
80  *   - The next device, or
81  *   - RTE_BBDEV_MAX_DEVS if none found
82  */
83 uint16_t
84 rte_bbdev_find_next(uint16_t dev_id);
85 
86 /** Iterate through all enabled devices */
87 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
88 		i < RTE_BBDEV_MAX_DEVS; \
89 		i = rte_bbdev_find_next(i))
90 
91 /**
92  * Setup up device queues.
93  * This function must be called on a device before setting up the queues and
94  * starting the device. It can also be called when a device is in the stopped
95  * state. If any device queues have been configured their configuration will be
96  * cleared by a call to this function.
97  *
98  * @param dev_id
99  *   The identifier of the device.
100  * @param num_queues
101  *   Number of queues to configure on device.
102  * @param socket_id
103  *   ID of a socket which will be used to allocate memory.
104  *
105  * @return
106  *   - 0 on success
107  *   - -ENODEV if dev_id is invalid or the device is corrupted
108  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
109  *   - -EBUSY if the identified device has already started
110  *   - -ENOMEM if unable to allocate memory
111  */
112 int
113 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
114 
115 /**
116  * Enable interrupts.
117  * This function may be called before starting the device to enable the
118  * interrupts if they are available.
119  *
120  * @param dev_id
121  *   The identifier of the device.
122  *
123  * @return
124  *   - 0 on success
125  *   - -ENODEV if dev_id is invalid or the device is corrupted
126  *   - -EBUSY if the identified device has already started
127  *   - -ENOTSUP if the interrupts are not supported by the device
128  */
129 int
130 rte_bbdev_intr_enable(uint16_t dev_id);
131 
132 /** Device queue configuration structure */
133 struct rte_bbdev_queue_conf {
134 	int socket;  /**< NUMA socket used for memory allocation */
135 	uint32_t queue_size;  /**< Size of queue */
136 	uint8_t priority;  /**< Queue priority */
137 	bool deferred_start; /**< Do not start queue when device is started. */
138 	enum rte_bbdev_op_type op_type; /**< Operation type */
139 };
140 
141 /**
142  * Configure a queue on a device.
143  * This function can be called after device configuration, and before starting.
144  * It can also be called when the device or the queue is in the stopped state.
145  *
146  * @param dev_id
147  *   The identifier of the device.
148  * @param queue_id
149  *   The index of the queue.
150  * @param conf
151  *   The queue configuration. If NULL, a default configuration will be used.
152  *
153  * @return
154  *   - 0 on success
155  *   - EINVAL if the identified queue size or priority are invalid
156  *   - EBUSY if the identified queue or its device have already started
157  */
158 int
159 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
160 		const struct rte_bbdev_queue_conf *conf);
161 
162 /**
163  * Start a device.
164  * This is the last step needed before enqueueing operations is possible.
165  *
166  * @param dev_id
167  *   The identifier of the device.
168  *
169  * @return
170  *   - 0 on success
171  *   - negative value on failure - as returned from PMD
172  */
173 int
174 rte_bbdev_start(uint16_t dev_id);
175 
176 /**
177  * Stop a device.
178  * The device can be reconfigured, and restarted after being stopped.
179  *
180  * @param dev_id
181  *   The identifier of the device.
182  *
183  * @return
184  *   - 0 on success
185  */
186 int
187 rte_bbdev_stop(uint16_t dev_id);
188 
189 /**
190  * Close a device.
191  * The device cannot be restarted without reconfiguration!
192  *
193  * @param dev_id
194  *   The identifier of the device.
195  *
196  * @return
197  *   - 0 on success
198  */
199 int
200 rte_bbdev_close(uint16_t dev_id);
201 
202 /**
203  * Start a specified queue on a device.
204  * This is only needed if the queue has been stopped, or if the deferred_start
205  * flag has been set when configuring the queue.
206  *
207  * @param dev_id
208  *   The identifier of the device.
209  * @param queue_id
210  *   The index of the queue.
211  *
212  * @return
213  *   - 0 on success
214  *   - negative value on failure - as returned from PMD
215  */
216 int
217 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
218 
219 /**
220  * Stop a specified queue on a device, to allow re configuration.
221  *
222  * @param dev_id
223  *   The identifier of the device.
224  * @param queue_id
225  *   The index of the queue.
226  *
227  * @return
228  *   - 0 on success
229  *   - negative value on failure - as returned from PMD
230  */
231 int
232 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
233 
234 /**
235  * Flags to indicate the reason why a previous enqueue may not have
236  * consumed all requested operations.
237  * In case of multiple reasons the latter supersedes a previous one.
238  * The related macro RTE_BBDEV_ENQ_STATUS_SIZE_MAX can be used
239  * as an absolute maximum for notably sizing array
240  * while allowing for future enumeration insertion.
241  */
242 enum rte_bbdev_enqueue_status {
243 	RTE_BBDEV_ENQ_STATUS_NONE,             /**< Nothing to report. */
244 	RTE_BBDEV_ENQ_STATUS_QUEUE_FULL,       /**< Not enough room in queue. */
245 	RTE_BBDEV_ENQ_STATUS_RING_FULL,        /**< Not enough room in ring. */
246 	RTE_BBDEV_ENQ_STATUS_INVALID_OP,       /**< Operation was rejected as invalid. */
247 	/* Note: RTE_BBDEV_ENQ_STATUS_SIZE_MAX must be larger or equal to maximum enum value. */
248 };
249 
250 /**
251  * Flags to indicate the status of the device.
252  */
253 enum rte_bbdev_device_status {
254 	RTE_BBDEV_DEV_NOSTATUS,        /**< Nothing being reported. */
255 	RTE_BBDEV_DEV_NOT_SUPPORTED,   /**< Device status is not supported on the PMD. */
256 	RTE_BBDEV_DEV_RESET,           /**< Device in reset and un-configured state. */
257 	RTE_BBDEV_DEV_CONFIGURED,      /**< Device is configured and ready to use. */
258 	RTE_BBDEV_DEV_ACTIVE,          /**< Device is configured and VF is being used. */
259 	RTE_BBDEV_DEV_FATAL_ERR,       /**< Device has hit a fatal uncorrectable error. */
260 	RTE_BBDEV_DEV_RESTART_REQ,     /**< Device requires application to restart. */
261 	RTE_BBDEV_DEV_RECONFIG_REQ,    /**< Device requires application to reconfigure queues. */
262 	RTE_BBDEV_DEV_CORRECT_ERR,     /**< Warning of a correctable error event happened. */
263 };
264 
265 /** Device statistics. */
266 struct rte_bbdev_stats {
267 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
268 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
269 	/** Total error count on operations enqueued */
270 	uint64_t enqueue_err_count;
271 	/** Total error count on operations dequeued */
272 	uint64_t dequeue_err_count;
273 	/** Total warning count on operations enqueued. */
274 	uint64_t enqueue_warn_count;
275 	/** Total warning count on operations dequeued. */
276 	uint64_t dequeue_warn_count;
277 	/** Total enqueue status count based on *rte_bbdev_enqueue_status* enum. */
278 	uint64_t enqueue_status_count[RTE_BBDEV_ENQ_STATUS_SIZE_MAX];
279 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
280 	 *  the enqueue request to its internal queues.
281 	 *  - For a HW device this is the cycles consumed in MMIO write
282 	 *  - For a SW (vdev) device, this is the processing time of the
283 	 *     bbdev operation
284 	 */
285 	uint64_t acc_offload_cycles;
286 	/** Available number of enqueue batch on that queue. */
287 	uint16_t enqueue_depth_avail;
288 };
289 
290 /**
291  * Retrieve the general I/O statistics of a device.
292  *
293  * @param dev_id
294  *   The identifier of the device.
295  * @param stats
296  *   Pointer to structure to where statistics will be copied. On error, this
297  *   location may or may not have been modified.
298  *
299  * @return
300  *   - 0 on success
301  *   - EINVAL if invalid parameter pointer is provided
302  */
303 int
304 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
305 
306 /**
307  * Reset the statistics of a device.
308  *
309  * @param dev_id
310  *   The identifier of the device.
311  * @return
312  *   - 0 on success
313  */
314 int
315 rte_bbdev_stats_reset(uint16_t dev_id);
316 
317 /** Device information supplied by the device's driver */
318 
319 /* Structure rte_bbdev_driver_info 8< */
320 struct rte_bbdev_driver_info {
321 	/** Driver name */
322 	const char *driver_name;
323 
324 	/** Maximum number of queues supported by the device */
325 	unsigned int max_num_queues;
326 	/** Maximum number of queues supported per operation type */
327 	unsigned int num_queues[RTE_BBDEV_OP_TYPE_SIZE_MAX];
328 	/** Priority level supported per operation type */
329 	unsigned int queue_priority[RTE_BBDEV_OP_TYPE_SIZE_MAX];
330 	/** Queue size limit (queue size must also be power of 2) */
331 	uint32_t queue_size_lim;
332 	/** Set if device off-loads operation to hardware  */
333 	bool hardware_accelerated;
334 	/** Max value supported by queue priority for DL */
335 	uint8_t max_dl_queue_priority;
336 	/** Max value supported by queue priority for UL */
337 	uint8_t max_ul_queue_priority;
338 	/** Set if device supports per-queue interrupts */
339 	bool queue_intr_supported;
340 	/** Device Status */
341 	enum rte_bbdev_device_status device_status;
342 	/** HARQ memory available in kB */
343 	uint32_t harq_buffer_size;
344 	/** Minimum alignment of buffers, in bytes */
345 	uint16_t min_alignment;
346 	/** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
347 	 *  for input/output data
348 	 */
349 	uint8_t data_endianness;
350 	/** Default queue configuration used if none is supplied  */
351 	struct rte_bbdev_queue_conf default_queue_conf;
352 	/** Device operation capabilities */
353 	const struct rte_bbdev_op_cap *capabilities;
354 	/** Device cpu_flag requirements */
355 	const enum rte_cpu_flag_t *cpu_flag_reqs;
356 	/** FFT windowing width for 2048 FFT - size defined in capability. */
357 	uint16_t *fft_window_width;
358 };
359 /* >8 End of structure rte_bbdev_driver_info. */
360 
361 /** Macro used at end of bbdev PMD list */
362 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
363 	{ RTE_BBDEV_OP_NONE }
364 
365 /**
366  * Device information structure used by an application to discover a devices
367  * capabilities and current configuration
368  */
369 
370 /* Structure rte_bbdev_info 8< */
371 struct rte_bbdev_info {
372 	int socket_id;  /**< NUMA socket that device is on */
373 	const char *dev_name;  /**< Unique device name */
374 	const struct rte_device *device; /**< Device Information */
375 	uint16_t num_queues;  /**< Number of queues currently configured */
376 	bool started;  /**< Set if device is currently started */
377 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
378 };
379 /* >8 End of structure rte_bbdev_info. */
380 
381 /**
382  * Retrieve information about a device.
383  *
384  * @param dev_id
385  *   The identifier of the device.
386  * @param dev_info
387  *   Pointer to structure to where information will be copied. On error, this
388  *   location may or may not have been modified.
389  *
390  * @return
391  *   - 0 on success
392  *   - EINVAL if invalid parameter pointer is provided
393  */
394 int
395 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
396 
397 /** Queue information */
398 struct rte_bbdev_queue_info {
399 	/** Current device configuration */
400 	struct rte_bbdev_queue_conf conf;
401 	/** Set if queue is currently started */
402 	bool started;
403 };
404 
405 /**
406  * Retrieve information about a specific queue on a device.
407  *
408  * @param dev_id
409  *   The identifier of the device.
410  * @param queue_id
411  *   The index of the queue.
412  * @param queue_info
413  *   Pointer to structure to where information will be copied. On error, this
414  *   location may or may not have been modified.
415  *
416  * @return
417  *   - 0 on success
418  *   - EINVAL if invalid parameter pointer is provided
419  */
420 int
421 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
422 		struct rte_bbdev_queue_info *queue_info);
423 
424 /** @internal The data structure associated with each queue of a device. */
425 struct rte_bbdev_queue_data {
426 	void *queue_private;  /**< Driver-specific per-queue data */
427 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
428 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
429 	enum rte_bbdev_enqueue_status enqueue_status; /**< Enqueue status when op is rejected */
430 	bool started;  /**< Queue state */
431 };
432 
433 /** @internal Enqueue encode operations for processing on queue of a device. */
434 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
435 		struct rte_bbdev_queue_data *q_data,
436 		struct rte_bbdev_enc_op **ops,
437 		uint16_t num);
438 
439 /** @internal Enqueue decode operations for processing on queue of a device. */
440 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
441 		struct rte_bbdev_queue_data *q_data,
442 		struct rte_bbdev_dec_op **ops,
443 		uint16_t num);
444 
445 /** @internal Enqueue FFT operations for processing on queue of a device. */
446 typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)(
447 		struct rte_bbdev_queue_data *q_data,
448 		struct rte_bbdev_fft_op **ops,
449 		uint16_t num);
450 
451 /** @internal Enqueue MLD-TS operations for processing on queue of a device. */
452 typedef uint16_t (*rte_bbdev_enqueue_mldts_ops_t)(
453 		struct rte_bbdev_queue_data *q_data,
454 		struct rte_bbdev_mldts_op **ops,
455 		uint16_t num);
456 
457 /** @internal Dequeue encode operations from a queue of a device. */
458 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
459 		struct rte_bbdev_queue_data *q_data,
460 		struct rte_bbdev_enc_op **ops, uint16_t num);
461 
462 /** @internal Dequeue decode operations from a queue of a device. */
463 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
464 		struct rte_bbdev_queue_data *q_data,
465 		struct rte_bbdev_dec_op **ops, uint16_t num);
466 
467 /** @internal Dequeue FFT operations from a queue of a device. */
468 typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)(
469 		struct rte_bbdev_queue_data *q_data,
470 		struct rte_bbdev_fft_op **ops, uint16_t num);
471 
472 /** @internal Dequeue MLDTS operations from a queue of a device. */
473 typedef uint16_t (*rte_bbdev_dequeue_mldts_ops_t)(
474 		struct rte_bbdev_queue_data *q_data,
475 		struct rte_bbdev_mldts_op **ops, uint16_t num);
476 
477 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
478 
479 /**
480  * @internal The data associated with a device, with no function pointers.
481  * This structure is safe to place in shared memory to be common among
482  * different processes in a multi-process configuration. Drivers can access
483  * these fields, but should never write to them!
484  */
485 struct rte_bbdev_data {
486 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
487 	void *dev_private;  /**< Driver-specific private data */
488 	uint16_t num_queues;  /**< Number of currently configured queues */
489 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
490 	uint16_t dev_id;  /**< Device ID */
491 	int socket_id;  /**< NUMA socket that device is on */
492 	bool started;  /**< Device run-time state */
493 	RTE_ATOMIC(uint16_t) process_cnt;  /** Counter of processes using the device */
494 };
495 
496 /* Forward declarations */
497 struct rte_bbdev_ops;
498 struct rte_bbdev_callback;
499 struct rte_intr_handle;
500 
501 /** Structure to keep track of registered callbacks */
502 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
503 
504 /**
505  * @internal The data structure associated with a device. Drivers can access
506  * these fields, but should only write to the *_ops fields.
507  */
508 struct __rte_cache_aligned rte_bbdev {
509 	/** Enqueue encode function */
510 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
511 	/** Enqueue decode function */
512 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
513 	/** Dequeue encode function */
514 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
515 	/** Dequeue decode function */
516 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
517 	/** Enqueue encode function */
518 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
519 	/** Enqueue decode function */
520 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
521 	/** Dequeue encode function */
522 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
523 	/** Dequeue decode function */
524 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
525 	/** Enqueue FFT function */
526 	rte_bbdev_enqueue_fft_ops_t enqueue_fft_ops;
527 	/** Dequeue FFT function */
528 	rte_bbdev_dequeue_fft_ops_t dequeue_fft_ops;
529 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
530 	struct rte_bbdev_data *data;  /**< Pointer to device data */
531 	enum rte_bbdev_state state;  /**< If device is currently used or not */
532 	struct rte_device *device; /**< Backing device */
533 	/** User application callback for interrupts if present */
534 	struct rte_bbdev_cb_list list_cbs;
535 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
536 	/** Enqueue MLD-TS function */
537 	rte_bbdev_enqueue_mldts_ops_t enqueue_mldts_ops;
538 	/** Dequeue MLD-TS function */
539 	rte_bbdev_dequeue_mldts_ops_t dequeue_mldts_ops;
540 };
541 
542 /** @internal array of all devices */
543 extern struct rte_bbdev rte_bbdev_devices[];
544 
545 /**
546  * Enqueue a burst of processed encode operations to a queue of the device.
547  * This functions only enqueues as many operations as currently possible and
548  * does not block until @p num_ops entries in the queue are available.
549  * This function does not provide any error notification to avoid the
550  * corresponding overhead.
551  *
552  * @param dev_id
553  *   The identifier of the device.
554  * @param queue_id
555  *   The index of the queue.
556  * @param ops
557  *   Pointer array containing operations to be enqueued Must have at least
558  *   @p num_ops entries
559  * @param num_ops
560  *   The maximum number of operations to enqueue.
561  *
562  * @return
563  *   The number of operations actually enqueued (this is the number of processed
564  *   entries in the @p ops array).
565  */
566 static inline uint16_t
567 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
568 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
569 {
570 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
571 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
572 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
573 }
574 
575 /**
576  * Enqueue a burst of processed decode operations to a queue of the device.
577  * This functions only enqueues as many operations as currently possible and
578  * does not block until @p num_ops entries in the queue are available.
579  * This function does not provide any error notification to avoid the
580  * corresponding overhead.
581  *
582  * @param dev_id
583  *   The identifier of the device.
584  * @param queue_id
585  *   The index of the queue.
586  * @param ops
587  *   Pointer array containing operations to be enqueued Must have at least
588  *   @p num_ops entries
589  * @param num_ops
590  *   The maximum number of operations to enqueue.
591  *
592  * @return
593  *   The number of operations actually enqueued (this is the number of processed
594  *   entries in the @p ops array).
595  */
596 static inline uint16_t
597 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
598 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
599 {
600 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
601 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
602 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
603 }
604 
605 /**
606  * Enqueue a burst of processed encode operations to a queue of the device.
607  * This functions only enqueues as many operations as currently possible and
608  * does not block until @p num_ops entries in the queue are available.
609  * This function does not provide any error notification to avoid the
610  * corresponding overhead.
611  *
612  * @param dev_id
613  *   The identifier of the device.
614  * @param queue_id
615  *   The index of the queue.
616  * @param ops
617  *   Pointer array containing operations to be enqueued Must have at least
618  *   @p num_ops entries
619  * @param num_ops
620  *   The maximum number of operations to enqueue.
621  *
622  * @return
623  *   The number of operations actually enqueued (this is the number of processed
624  *   entries in the @p ops array).
625  */
626 static inline uint16_t
627 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
628 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
629 {
630 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
631 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
632 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
633 }
634 
635 /**
636  * Enqueue a burst of processed decode operations to a queue of the device.
637  * This functions only enqueues as many operations as currently possible and
638  * does not block until @p num_ops entries in the queue are available.
639  * This function does not provide any error notification to avoid the
640  * corresponding overhead.
641  *
642  * @param dev_id
643  *   The identifier of the device.
644  * @param queue_id
645  *   The index of the queue.
646  * @param ops
647  *   Pointer array containing operations to be enqueued Must have at least
648  *   @p num_ops entries
649  * @param num_ops
650  *   The maximum number of operations to enqueue.
651  *
652  * @return
653  *   The number of operations actually enqueued (this is the number of processed
654  *   entries in the @p ops array).
655  */
656 static inline uint16_t
657 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
658 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
659 {
660 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
661 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
662 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
663 }
664 
665 /**
666  * Enqueue a burst of FFT operations to a queue of the device.
667  * This functions only enqueues as many operations as currently possible and
668  * does not block until @p num_ops entries in the queue are available.
669  * This function does not provide any error notification to avoid the
670  * corresponding overhead.
671  *
672  * @param dev_id
673  *   The identifier of the device.
674  * @param queue_id
675  *   The index of the queue.
676  * @param ops
677  *   Pointer array containing operations to be enqueued.
678  *   Must have at least @p num_ops entries.
679  * @param num_ops
680  *   The maximum number of operations to enqueue.
681  *
682  * @return
683  *   The number of operations actually enqueued.
684  *   (This is the number of processed entries in the @p ops array.)
685  */
686 static inline uint16_t
687 rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id,
688 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
689 {
690 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
691 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
692 	return dev->enqueue_fft_ops(q_data, ops, num_ops);
693 }
694 
695 /**
696  * Enqueue a burst of MLDTS operations to a queue of the device.
697  * This functions only enqueues as many operations as currently possible and
698  * does not block until @p num_ops entries in the queue are available.
699  * This function does not provide any error notification to avoid the
700  * corresponding overhead.
701  *
702  * @param dev_id
703  *   The identifier of the device.
704  * @param queue_id
705  *   The index of the queue.
706  * @param ops
707  *   Pointer array containing operations to be enqueued Must have at least
708  *   @p num_ops entries
709  * @param num_ops
710  *   The maximum number of operations to enqueue.
711  *
712  * @return
713  *   The number of operations actually enqueued (this is the number of processed
714  *   entries in the @p ops array).
715  */
716 static inline uint16_t
717 rte_bbdev_enqueue_mldts_ops(uint16_t dev_id, uint16_t queue_id,
718 		struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
719 {
720 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
721 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
722 	return dev->enqueue_mldts_ops(q_data, ops, num_ops);
723 }
724 
725 /**
726  * Dequeue a burst of processed encode operations from a queue of the device.
727  * This functions returns only the current contents of the queue,
728  * and does not block until @ num_ops is available.
729  * This function does not provide any error notification to avoid the
730  * corresponding overhead.
731  *
732  * @param dev_id
733  *   The identifier of the device.
734  * @param queue_id
735  *   The index of the queue.
736  * @param ops
737  *   Pointer array where operations will be dequeued to.
738  *   Must have at least @p num_ops entries, i.e.
739  *   a pointer to a table of void * pointers (ops) that will be filled.
740  * @param num_ops
741  *   The maximum number of operations to dequeue.
742  *
743  * @return
744  *   The number of operations actually dequeued.
745  *   (This is the number of entries copied into the @p ops array.)
746  */
747 static inline uint16_t
748 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
749 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
750 {
751 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
752 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
753 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
754 }
755 
756 /**
757  * Dequeue a burst of processed decode operations from a queue of the device.
758  * This functions returns only the current contents of the queue, and does not
759  * block until @ num_ops is available.
760  * This function does not provide any error notification to avoid the
761  * corresponding overhead.
762  *
763  * @param dev_id
764  *   The identifier of the device.
765  * @param queue_id
766  *   The index of the queue.
767  * @param ops
768  *   Pointer array where operations will be dequeued to. Must have at least
769  *   @p num_ops entries
770  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
771  * @param num_ops
772  *   The maximum number of operations to dequeue.
773  *
774  * @return
775  *   The number of operations actually dequeued (this is the number of entries
776  *   copied into the @p ops array).
777  */
778 
779 static inline uint16_t
780 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
781 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
782 {
783 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
784 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
785 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
786 }
787 
788 
789 /**
790  * Dequeue a burst of processed encode operations from a queue of the device.
791  * This functions returns only the current contents of the queue, and does not
792  * block until @ num_ops is available.
793  * This function does not provide any error notification to avoid the
794  * corresponding overhead.
795  *
796  * @param dev_id
797  *   The identifier of the device.
798  * @param queue_id
799  *   The index of the queue.
800  * @param ops
801  *   Pointer array where operations will be dequeued to. Must have at least
802  *   @p num_ops entries
803  * @param num_ops
804  *   The maximum number of operations to dequeue.
805  *
806  * @return
807  *   The number of operations actually dequeued (this is the number of entries
808  *   copied into the @p ops array).
809  */
810 static inline uint16_t
811 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
812 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
813 {
814 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
815 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
816 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
817 }
818 
819 /**
820  * Dequeue a burst of processed decode operations from a queue of the device.
821  * This functions returns only the current contents of the queue, and does not
822  * block until @ num_ops is available.
823  * This function does not provide any error notification to avoid the
824  * corresponding overhead.
825  *
826  * @param dev_id
827  *   The identifier of the device.
828  * @param queue_id
829  *   The index of the queue.
830  * @param ops
831  *   Pointer array where operations will be dequeued to. Must have at least
832  *   @p num_ops entries
833  * @param num_ops
834  *   The maximum number of operations to dequeue.
835  *
836  * @return
837  *   The number of operations actually dequeued (this is the number of entries
838  *   copied into the @p ops array).
839  */
840 static inline uint16_t
841 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
842 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
843 {
844 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
845 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
846 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
847 }
848 
849 /**
850  * Dequeue a burst of FFT operations from a queue of the device.
851  * This functions returns only the current contents of the queue, and does not
852  * block until @ num_ops is available.
853  * This function does not provide any error notification to avoid the
854  * corresponding overhead.
855  *
856  * @param dev_id
857  *   The identifier of the device.
858  * @param queue_id
859  *   The index of the queue.
860  * @param ops
861  *   Pointer array where operations will be dequeued to. Must have at least
862  *   @p num_ops entries
863  * @param num_ops
864  *   The maximum number of operations to dequeue.
865  *
866  * @return
867  *   The number of operations actually dequeued (this is the number of entries
868  *   copied into the @p ops array).
869  */
870 static inline uint16_t
871 rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id,
872 		struct rte_bbdev_fft_op **ops, uint16_t num_ops)
873 {
874 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
875 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
876 	return dev->dequeue_fft_ops(q_data, ops, num_ops);
877 }
878 
879 /**
880  * Dequeue a burst of MLDTS operations from a queue of the device.
881  * This functions returns only the current contents of the queue, and does not
882  * block until @p num_ops is available.
883  * This function does not provide any error notification to avoid the
884  * corresponding overhead.
885  *
886  * @param dev_id
887  *   The identifier of the device.
888  * @param queue_id
889  *   The index of the queue.
890  * @param ops
891  *   Pointer array where operations will be dequeued to. Must have at least
892  *   @p num_ops entries
893  * @param num_ops
894  *   The maximum number of operations to dequeue.
895  *
896  * @return
897  *   The number of operations actually dequeued (this is the number of entries
898  *   copied into the @p ops array).
899  */
900 static inline uint16_t
901 rte_bbdev_dequeue_mldts_ops(uint16_t dev_id, uint16_t queue_id,
902 		struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
903 {
904 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
905 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
906 	return dev->dequeue_mldts_ops(q_data, ops, num_ops);
907 }
908 
909 /** Definitions of device event types */
910 enum rte_bbdev_event_type {
911 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
912 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
913 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
914 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
915 };
916 
917 /**
918  * Typedef for application callback function registered by application
919  * software for notification of device events
920  *
921  * @param dev_id
922  *   Device identifier
923  * @param event
924  *   Device event to register for notification of.
925  * @param cb_arg
926  *   User specified parameter to be passed to user's callback function.
927  * @param ret_param
928  *   To pass data back to user application.
929  */
930 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
931 		enum rte_bbdev_event_type event, void *cb_arg,
932 		void *ret_param);
933 
934 /**
935  * Register a callback function for specific device id. Multiple callbacks can
936  * be added and will be called in the order they are added when an event is
937  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
938  *
939  * @param dev_id
940  *   Device id.
941  * @param event
942  *   The event that the callback will be registered for.
943  * @param cb_fn
944  *   User supplied callback function to be called.
945  * @param cb_arg
946  *   Pointer to parameter that will be passed to the callback.
947  *
948  * @return
949  *   Zero on success, negative value on failure.
950  */
951 int
952 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
953 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
954 
955 /**
956  * Unregister a callback function for specific device id.
957  *
958  * @param dev_id
959  *   The device identifier.
960  * @param event
961  *   The event that the callback will be unregistered for.
962  * @param cb_fn
963  *   User supplied callback function to be unregistered.
964  * @param cb_arg
965  *   Pointer to the parameter supplied when registering the callback.
966  *   (void *)-1 means to remove all registered callbacks with the specified
967  *   function address.
968  *
969  * @return
970  *   - 0 on success
971  *   - EINVAL if invalid parameter pointer is provided
972  *   - EAGAIN if the provided callback pointer does not exist
973  */
974 int
975 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
976 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
977 
978 /**
979  * Enable a one-shot interrupt on the next operation enqueued to a particular
980  * queue. The interrupt will be triggered when the operation is ready to be
981  * dequeued. To handle the interrupt, an epoll file descriptor must be
982  * registered using rte_bbdev_queue_intr_ctl(), and then an application
983  * thread/lcore can wait for the interrupt using rte_epoll_wait().
984  *
985  * @param dev_id
986  *   The device identifier.
987  * @param queue_id
988  *   The index of the queue.
989  *
990  * @return
991  *   - 0 on success
992  *   - negative value on failure - as returned from PMD
993  */
994 int
995 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
996 
997 /**
998  * Disable a one-shot interrupt on the next operation enqueued to a particular
999  * queue (if it has been enabled).
1000  *
1001  * @param dev_id
1002  *   The device identifier.
1003  * @param queue_id
1004  *   The index of the queue.
1005  *
1006  * @return
1007  *   - 0 on success
1008  *   - negative value on failure - as returned from PMD
1009  */
1010 int
1011 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
1012 
1013 /**
1014  * Control interface for per-queue interrupts.
1015  *
1016  * @param dev_id
1017  *   The device identifier.
1018  * @param queue_id
1019  *   The index of the queue.
1020  * @param epfd
1021  *   Epoll file descriptor that will be associated with the interrupt source.
1022  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
1023  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
1024  *   be used when calling rte_epoll_wait()).
1025  * @param op
1026  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
1027  *   RTE_INTR_EVENT_DEL.
1028  * @param data
1029  *   User context, that will be returned in the epdata.data field of the
1030  *   rte_epoll_event structure filled in by rte_epoll_wait().
1031  *
1032  * @return
1033  *   - 0 on success
1034  *   - ENOTSUP if interrupts are not supported by the identified device
1035  *   - negative value on failure - as returned from PMD
1036  */
1037 int
1038 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
1039 		void *data);
1040 
1041 /**
1042  * Convert device status from enum to string.
1043  *
1044  * @param status
1045  *   Device status as enum.
1046  *
1047  * @returns
1048  *   Device status as string or NULL if invalid.
1049  */
1050 const char*
1051 rte_bbdev_device_status_str(enum rte_bbdev_device_status status);
1052 
1053 /**
1054  * Convert queue status from enum to string.
1055  *
1056  * @param status
1057  *   Queue status as enum.
1058  *
1059  * @returns
1060  *   Queue status as string or NULL if op_type is invalid.
1061  */
1062 const char*
1063 rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status);
1064 
1065 /**
1066  * Dump operations info from device to a file.
1067  * This API is used for debugging provided input operations, not a dataplane API.
1068  *
1069  *  @param dev_id
1070  *    The device identifier.
1071  *
1072  *  @param queue_index
1073  *    Index of queue.
1074  *
1075  *  @param file
1076  *    A pointer to a file for output.
1077  *
1078  * @returns
1079  *   - 0 on success
1080  *   - ENOTSUP if interrupts are not supported by the identified device
1081  *   - negative value on failure - as returned from PMD
1082  *
1083  */
1084 __rte_experimental
1085 int
1086 rte_bbdev_queue_ops_dump(uint16_t dev_id, uint16_t queue_index, FILE *file);
1087 
1088 
1089 /**
1090  * String of parameters related to the parameters of an operation of a given type.
1091  *
1092  *  @param op
1093  *    Pointer to an operation.
1094  *
1095  *  @param op_type
1096  *    Operation type enum.
1097  *
1098  *  @param str
1099  *    String being describing the operations.
1100  *
1101  *  @param len
1102  *    Size of the string buffer.
1103  *
1104  * @returns
1105  *   String describing the provided operation.
1106  *
1107  */
1108 __rte_experimental
1109 char *
1110 rte_bbdev_ops_param_string(void *op, enum rte_bbdev_op_type op_type, char *str, uint32_t len);
1111 
1112 #ifdef __cplusplus
1113 }
1114 #endif
1115 
1116 #endif /* _RTE_BBDEV_H_ */
1117