xref: /dpdk/lib/bbdev/rte_bbdev.h (revision b53d106d34b5c638f5a2cbdfee0da5bd42d4383f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7 
8 /**
9  * @file rte_bbdev.h
10  *
11  * Wireless base band device abstraction APIs.
12  *
13  * This API allows an application to discover, configure and use a device to
14  * process operations. An asynchronous API (enqueue, followed by later dequeue)
15  * is used for processing operations.
16  *
17  * The functions in this API are not thread-safe when called on the same
18  * target object (a device, or a queue on a device), with the exception that
19  * one thread can enqueue operations to a queue while another thread dequeues
20  * from the same queue.
21  */
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26 
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30 
31 #include <rte_compat.h>
32 #include <rte_bus.h>
33 #include <rte_cpuflags.h>
34 #include <rte_memory.h>
35 
36 #include "rte_bbdev_op.h"
37 
38 #ifndef RTE_BBDEV_MAX_DEVS
39 #define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
40 #endif
41 
42 /** Flags indicate current state of BBDEV device */
43 enum rte_bbdev_state {
44 	RTE_BBDEV_UNUSED,
45 	RTE_BBDEV_INITIALIZED
46 };
47 
48 /**
49  * Get the total number of devices that have been successfully initialised.
50  *
51  * @return
52  *   The total number of usable devices.
53  */
54 uint16_t
55 rte_bbdev_count(void);
56 
57 /**
58  * Check if a device is valid.
59  *
60  * @param dev_id
61  *   The identifier of the device.
62  *
63  * @return
64  *   true if device ID is valid and device is attached, false otherwise.
65  */
66 bool
67 rte_bbdev_is_valid(uint16_t dev_id);
68 
69 /**
70  * Get the next enabled device.
71  *
72  * @param dev_id
73  *   The current device
74  *
75  * @return
76  *   - The next device, or
77  *   - RTE_BBDEV_MAX_DEVS if none found
78  */
79 uint16_t
80 rte_bbdev_find_next(uint16_t dev_id);
81 
82 /** Iterate through all enabled devices */
83 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
84 		i < RTE_BBDEV_MAX_DEVS; \
85 		i = rte_bbdev_find_next(i))
86 
87 /**
88  * Setup up device queues.
89  * This function must be called on a device before setting up the queues and
90  * starting the device. It can also be called when a device is in the stopped
91  * state. If any device queues have been configured their configuration will be
92  * cleared by a call to this function.
93  *
94  * @param dev_id
95  *   The identifier of the device.
96  * @param num_queues
97  *   Number of queues to configure on device.
98  * @param socket_id
99  *   ID of a socket which will be used to allocate memory.
100  *
101  * @return
102  *   - 0 on success
103  *   - -ENODEV if dev_id is invalid or the device is corrupted
104  *   - -EINVAL if num_queues is invalid, 0 or greater than maximum
105  *   - -EBUSY if the identified device has already started
106  *   - -ENOMEM if unable to allocate memory
107  */
108 int
109 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
110 
111 /**
112  * Enable interrupts.
113  * This function may be called before starting the device to enable the
114  * interrupts if they are available.
115  *
116  * @param dev_id
117  *   The identifier of the device.
118  *
119  * @return
120  *   - 0 on success
121  *   - -ENODEV if dev_id is invalid or the device is corrupted
122  *   - -EBUSY if the identified device has already started
123  *   - -ENOTSUP if the interrupts are not supported by the device
124  */
125 int
126 rte_bbdev_intr_enable(uint16_t dev_id);
127 
128 /** Device queue configuration structure */
129 struct rte_bbdev_queue_conf {
130 	int socket;  /**< NUMA socket used for memory allocation */
131 	uint32_t queue_size;  /**< Size of queue */
132 	uint8_t priority;  /**< Queue priority */
133 	bool deferred_start; /**< Do not start queue when device is started. */
134 	enum rte_bbdev_op_type op_type; /**< Operation type */
135 };
136 
137 /**
138  * Configure a queue on a device.
139  * This function can be called after device configuration, and before starting.
140  * It can also be called when the device or the queue is in the stopped state.
141  *
142  * @param dev_id
143  *   The identifier of the device.
144  * @param queue_id
145  *   The index of the queue.
146  * @param conf
147  *   The queue configuration. If NULL, a default configuration will be used.
148  *
149  * @return
150  *   - 0 on success
151  *   - EINVAL if the identified queue size or priority are invalid
152  *   - EBUSY if the identified queue or its device have already started
153  */
154 int
155 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
156 		const struct rte_bbdev_queue_conf *conf);
157 
158 /**
159  * Start a device.
160  * This is the last step needed before enqueueing operations is possible.
161  *
162  * @param dev_id
163  *   The identifier of the device.
164  *
165  * @return
166  *   - 0 on success
167  *   - negative value on failure - as returned from PMD driver
168  */
169 int
170 rte_bbdev_start(uint16_t dev_id);
171 
172 /**
173  * Stop a device.
174  * The device can be reconfigured, and restarted after being stopped.
175  *
176  * @param dev_id
177  *   The identifier of the device.
178  *
179  * @return
180  *   - 0 on success
181  */
182 int
183 rte_bbdev_stop(uint16_t dev_id);
184 
185 /**
186  * Close a device.
187  * The device cannot be restarted without reconfiguration!
188  *
189  * @param dev_id
190  *   The identifier of the device.
191  *
192  * @return
193  *   - 0 on success
194  */
195 int
196 rte_bbdev_close(uint16_t dev_id);
197 
198 /**
199  * Start a specified queue on a device.
200  * This is only needed if the queue has been stopped, or if the deferred_start
201  * flag has been set when configuring the queue.
202  *
203  * @param dev_id
204  *   The identifier of the device.
205  * @param queue_id
206  *   The index of the queue.
207  *
208  * @return
209  *   - 0 on success
210  *   - negative value on failure - as returned from PMD driver
211  */
212 int
213 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
214 
215 /**
216  * Stop a specified queue on a device, to allow re configuration.
217  *
218  * @param dev_id
219  *   The identifier of the device.
220  * @param queue_id
221  *   The index of the queue.
222  *
223  * @return
224  *   - 0 on success
225  *   - negative value on failure - as returned from PMD driver
226  */
227 int
228 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
229 
230 /** Device statistics. */
231 struct rte_bbdev_stats {
232 	uint64_t enqueued_count;  /**< Count of all operations enqueued */
233 	uint64_t dequeued_count;  /**< Count of all operations dequeued */
234 	/** Total error count on operations enqueued */
235 	uint64_t enqueue_err_count;
236 	/** Total error count on operations dequeued */
237 	uint64_t dequeue_err_count;
238 	/** CPU cycles consumed by the (HW/SW) accelerator device to offload
239 	 *  the enqueue request to its internal queues.
240 	 *  - For a HW device this is the cycles consumed in MMIO write
241 	 *  - For a SW (vdev) device, this is the processing time of the
242 	 *     bbdev operation
243 	 */
244 	uint64_t acc_offload_cycles;
245 };
246 
247 /**
248  * Retrieve the general I/O statistics of a device.
249  *
250  * @param dev_id
251  *   The identifier of the device.
252  * @param stats
253  *   Pointer to structure to where statistics will be copied. On error, this
254  *   location may or may not have been modified.
255  *
256  * @return
257  *   - 0 on success
258  *   - EINVAL if invalid parameter pointer is provided
259  */
260 int
261 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
262 
263 /**
264  * Reset the statistics of a device.
265  *
266  * @param dev_id
267  *   The identifier of the device.
268  * @return
269  *   - 0 on success
270  */
271 int
272 rte_bbdev_stats_reset(uint16_t dev_id);
273 
274 /** Device information supplied by the device's driver */
275 struct rte_bbdev_driver_info {
276 	/** Driver name */
277 	const char *driver_name;
278 
279 	/** Maximum number of queues supported by the device */
280 	unsigned int max_num_queues;
281 	/** Queue size limit (queue size must also be power of 2) */
282 	uint32_t queue_size_lim;
283 	/** Set if device off-loads operation to hardware  */
284 	bool hardware_accelerated;
285 	/** Max value supported by queue priority for DL */
286 	uint8_t max_dl_queue_priority;
287 	/** Max value supported by queue priority for UL */
288 	uint8_t max_ul_queue_priority;
289 	/** Set if device supports per-queue interrupts */
290 	bool queue_intr_supported;
291 	/** Minimum alignment of buffers, in bytes */
292 	uint16_t min_alignment;
293 	/** HARQ memory available in kB */
294 	uint32_t harq_buffer_size;
295 	/** Byte endianness (RTE_BIG_ENDIAN/RTE_LITTLE_ENDIAN) supported
296 	 *  for input/output data
297 	 */
298 	uint8_t data_endianness;
299 	/** Default queue configuration used if none is supplied  */
300 	struct rte_bbdev_queue_conf default_queue_conf;
301 	/** Device operation capabilities */
302 	const struct rte_bbdev_op_cap *capabilities;
303 	/** Device cpu_flag requirements */
304 	const enum rte_cpu_flag_t *cpu_flag_reqs;
305 };
306 
307 /** Macro used at end of bbdev PMD list */
308 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
309 	{ RTE_BBDEV_OP_NONE }
310 
311 /**
312  * Device information structure used by an application to discover a devices
313  * capabilities and current configuration
314  */
315 struct rte_bbdev_info {
316 	int socket_id;  /**< NUMA socket that device is on */
317 	const char *dev_name;  /**< Unique device name */
318 	const struct rte_device *device; /**< Device Information */
319 	uint16_t num_queues;  /**< Number of queues currently configured */
320 	bool started;  /**< Set if device is currently started */
321 	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
322 };
323 
324 /**
325  * Retrieve information about a device.
326  *
327  * @param dev_id
328  *   The identifier of the device.
329  * @param dev_info
330  *   Pointer to structure to where information will be copied. On error, this
331  *   location may or may not have been modified.
332  *
333  * @return
334  *   - 0 on success
335  *   - EINVAL if invalid parameter pointer is provided
336  */
337 int
338 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
339 
340 /** Queue information */
341 struct rte_bbdev_queue_info {
342 	/** Current device configuration */
343 	struct rte_bbdev_queue_conf conf;
344 	/** Set if queue is currently started */
345 	bool started;
346 };
347 
348 /**
349  * Retrieve information about a specific queue on a device.
350  *
351  * @param dev_id
352  *   The identifier of the device.
353  * @param queue_id
354  *   The index of the queue.
355  * @param queue_info
356  *   Pointer to structure to where information will be copied. On error, this
357  *   location may or may not have been modified.
358  *
359  * @return
360  *   - 0 on success
361  *   - EINVAL if invalid parameter pointer is provided
362  */
363 int
364 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
365 		struct rte_bbdev_queue_info *queue_info);
366 
367 /** @internal The data structure associated with each queue of a device. */
368 struct rte_bbdev_queue_data {
369 	void *queue_private;  /**< Driver-specific per-queue data */
370 	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
371 	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
372 	bool started;  /**< Queue state */
373 };
374 
375 /** @internal Enqueue encode operations for processing on queue of a device. */
376 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
377 		struct rte_bbdev_queue_data *q_data,
378 		struct rte_bbdev_enc_op **ops,
379 		uint16_t num);
380 
381 /** @internal Enqueue decode operations for processing on queue of a device. */
382 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
383 		struct rte_bbdev_queue_data *q_data,
384 		struct rte_bbdev_dec_op **ops,
385 		uint16_t num);
386 
387 /** @internal Dequeue encode operations from a queue of a device. */
388 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
389 		struct rte_bbdev_queue_data *q_data,
390 		struct rte_bbdev_enc_op **ops, uint16_t num);
391 
392 /** @internal Dequeue decode operations from a queue of a device. */
393 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
394 		struct rte_bbdev_queue_data *q_data,
395 		struct rte_bbdev_dec_op **ops, uint16_t num);
396 
397 #define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
398 
399 /**
400  * @internal The data associated with a device, with no function pointers.
401  * This structure is safe to place in shared memory to be common among
402  * different processes in a multi-process configuration. Drivers can access
403  * these fields, but should never write to them!
404  */
405 struct rte_bbdev_data {
406 	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
407 	void *dev_private;  /**< Driver-specific private data */
408 	uint16_t num_queues;  /**< Number of currently configured queues */
409 	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
410 	uint16_t dev_id;  /**< Device ID */
411 	int socket_id;  /**< NUMA socket that device is on */
412 	bool started;  /**< Device run-time state */
413 	uint16_t process_cnt;  /** Counter of processes using the device */
414 };
415 
416 /* Forward declarations */
417 struct rte_bbdev_ops;
418 struct rte_bbdev_callback;
419 struct rte_intr_handle;
420 
421 /** Structure to keep track of registered callbacks */
422 RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
423 
424 /**
425  * @internal The data structure associated with a device. Drivers can access
426  * these fields, but should only write to the *_ops fields.
427  */
428 struct __rte_cache_aligned rte_bbdev {
429 	/** Enqueue encode function */
430 	rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
431 	/** Enqueue decode function */
432 	rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
433 	/** Dequeue encode function */
434 	rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
435 	/** Dequeue decode function */
436 	rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
437 	/** Enqueue encode function */
438 	rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
439 	/** Enqueue decode function */
440 	rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
441 	/** Dequeue encode function */
442 	rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
443 	/** Dequeue decode function */
444 	rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
445 	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
446 	struct rte_bbdev_data *data;  /**< Pointer to device data */
447 	enum rte_bbdev_state state;  /**< If device is currently used or not */
448 	struct rte_device *device; /**< Backing device */
449 	/** User application callback for interrupts if present */
450 	struct rte_bbdev_cb_list list_cbs;
451 	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
452 };
453 
454 /** @internal array of all devices */
455 extern struct rte_bbdev rte_bbdev_devices[];
456 
457 /**
458  * Enqueue a burst of processed encode operations to a queue of the device.
459  * This functions only enqueues as many operations as currently possible and
460  * does not block until @p num_ops entries in the queue are available.
461  * This function does not provide any error notification to avoid the
462  * corresponding overhead.
463  *
464  * @param dev_id
465  *   The identifier of the device.
466  * @param queue_id
467  *   The index of the queue.
468  * @param ops
469  *   Pointer array containing operations to be enqueued Must have at least
470  *   @p num_ops entries
471  * @param num_ops
472  *   The maximum number of operations to enqueue.
473  *
474  * @return
475  *   The number of operations actually enqueued (this is the number of processed
476  *   entries in the @p ops array).
477  */
478 static inline uint16_t
479 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
480 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
481 {
482 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
483 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
484 	return dev->enqueue_enc_ops(q_data, ops, num_ops);
485 }
486 
487 /**
488  * Enqueue a burst of processed decode operations to a queue of the device.
489  * This functions only enqueues as many operations as currently possible and
490  * does not block until @p num_ops entries in the queue are available.
491  * This function does not provide any error notification to avoid the
492  * corresponding overhead.
493  *
494  * @param dev_id
495  *   The identifier of the device.
496  * @param queue_id
497  *   The index of the queue.
498  * @param ops
499  *   Pointer array containing operations to be enqueued Must have at least
500  *   @p num_ops entries
501  * @param num_ops
502  *   The maximum number of operations to enqueue.
503  *
504  * @return
505  *   The number of operations actually enqueued (this is the number of processed
506  *   entries in the @p ops array).
507  */
508 static inline uint16_t
509 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
510 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
511 {
512 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
513 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
514 	return dev->enqueue_dec_ops(q_data, ops, num_ops);
515 }
516 
517 /**
518  * Enqueue a burst of processed encode operations to a queue of the device.
519  * This functions only enqueues as many operations as currently possible and
520  * does not block until @p num_ops entries in the queue are available.
521  * This function does not provide any error notification to avoid the
522  * corresponding overhead.
523  *
524  * @param dev_id
525  *   The identifier of the device.
526  * @param queue_id
527  *   The index of the queue.
528  * @param ops
529  *   Pointer array containing operations to be enqueued Must have at least
530  *   @p num_ops entries
531  * @param num_ops
532  *   The maximum number of operations to enqueue.
533  *
534  * @return
535  *   The number of operations actually enqueued (this is the number of processed
536  *   entries in the @p ops array).
537  */
538 static inline uint16_t
539 rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
540 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
541 {
542 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
543 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
544 	return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
545 }
546 
547 /**
548  * Enqueue a burst of processed decode operations to a queue of the device.
549  * This functions only enqueues as many operations as currently possible and
550  * does not block until @p num_ops entries in the queue are available.
551  * This function does not provide any error notification to avoid the
552  * corresponding overhead.
553  *
554  * @param dev_id
555  *   The identifier of the device.
556  * @param queue_id
557  *   The index of the queue.
558  * @param ops
559  *   Pointer array containing operations to be enqueued Must have at least
560  *   @p num_ops entries
561  * @param num_ops
562  *   The maximum number of operations to enqueue.
563  *
564  * @return
565  *   The number of operations actually enqueued (this is the number of processed
566  *   entries in the @p ops array).
567  */
568 static inline uint16_t
569 rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
570 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
571 {
572 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
573 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
574 	return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
575 }
576 
577 
578 /**
579  * Dequeue a burst of processed encode operations from a queue of the device.
580  * This functions returns only the current contents of the queue, and does not
581  * block until @ num_ops is available.
582  * This function does not provide any error notification to avoid the
583  * corresponding overhead.
584  *
585  * @param dev_id
586  *   The identifier of the device.
587  * @param queue_id
588  *   The index of the queue.
589  * @param ops
590  *   Pointer array where operations will be dequeued to. Must have at least
591  *   @p num_ops entries
592  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
593  * @param num_ops
594  *   The maximum number of operations to dequeue.
595  *
596  * @return
597  *   The number of operations actually dequeued (this is the number of entries
598  *   copied into the @p ops array).
599  */
600 static inline uint16_t
601 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
602 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
603 {
604 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
605 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
606 	return dev->dequeue_enc_ops(q_data, ops, num_ops);
607 }
608 
609 /**
610  * Dequeue a burst of processed decode operations from a queue of the device.
611  * This functions returns only the current contents of the queue, and does not
612  * block until @ num_ops is available.
613  * This function does not provide any error notification to avoid the
614  * corresponding overhead.
615  *
616  * @param dev_id
617  *   The identifier of the device.
618  * @param queue_id
619  *   The index of the queue.
620  * @param ops
621  *   Pointer array where operations will be dequeued to. Must have at least
622  *   @p num_ops entries
623  *   ie. A pointer to a table of void * pointers (ops) that will be filled.
624  * @param num_ops
625  *   The maximum number of operations to dequeue.
626  *
627  * @return
628  *   The number of operations actually dequeued (this is the number of entries
629  *   copied into the @p ops array).
630  */
631 
632 static inline uint16_t
633 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
634 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
635 {
636 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
637 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
638 	return dev->dequeue_dec_ops(q_data, ops, num_ops);
639 }
640 
641 
642 /**
643  * Dequeue a burst of processed encode operations from a queue of the device.
644  * This functions returns only the current contents of the queue, and does not
645  * block until @ num_ops is available.
646  * This function does not provide any error notification to avoid the
647  * corresponding overhead.
648  *
649  * @param dev_id
650  *   The identifier of the device.
651  * @param queue_id
652  *   The index of the queue.
653  * @param ops
654  *   Pointer array where operations will be dequeued to. Must have at least
655  *   @p num_ops entries
656  * @param num_ops
657  *   The maximum number of operations to dequeue.
658  *
659  * @return
660  *   The number of operations actually dequeued (this is the number of entries
661  *   copied into the @p ops array).
662  */
663 static inline uint16_t
664 rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id,
665 		struct rte_bbdev_enc_op **ops, uint16_t num_ops)
666 {
667 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
668 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
669 	return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
670 }
671 
672 /**
673  * Dequeue a burst of processed decode operations from a queue of the device.
674  * This functions returns only the current contents of the queue, and does not
675  * block until @ num_ops is available.
676  * This function does not provide any error notification to avoid the
677  * corresponding overhead.
678  *
679  * @param dev_id
680  *   The identifier of the device.
681  * @param queue_id
682  *   The index of the queue.
683  * @param ops
684  *   Pointer array where operations will be dequeued to. Must have at least
685  *   @p num_ops entries
686  * @param num_ops
687  *   The maximum number of operations to dequeue.
688  *
689  * @return
690  *   The number of operations actually dequeued (this is the number of entries
691  *   copied into the @p ops array).
692  */
693 static inline uint16_t
694 rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id,
695 		struct rte_bbdev_dec_op **ops, uint16_t num_ops)
696 {
697 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
698 	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
699 	return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
700 }
701 
702 /** Definitions of device event types */
703 enum rte_bbdev_event_type {
704 	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
705 	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
706 	RTE_BBDEV_EVENT_DEQUEUE,  /**< dequeue event */
707 	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
708 };
709 
710 /**
711  * Typedef for application callback function registered by application
712  * software for notification of device events
713  *
714  * @param dev_id
715  *   Device identifier
716  * @param event
717  *   Device event to register for notification of.
718  * @param cb_arg
719  *   User specified parameter to be passed to user's callback function.
720  * @param ret_param
721  *   To pass data back to user application.
722  */
723 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
724 		enum rte_bbdev_event_type event, void *cb_arg,
725 		void *ret_param);
726 
727 /**
728  * Register a callback function for specific device id. Multiple callbacks can
729  * be added and will be called in the order they are added when an event is
730  * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
731  *
732  * @param dev_id
733  *   Device id.
734  * @param event
735  *   The event that the callback will be registered for.
736  * @param cb_fn
737  *   User supplied callback function to be called.
738  * @param cb_arg
739  *   Pointer to parameter that will be passed to the callback.
740  *
741  * @return
742  *   Zero on success, negative value on failure.
743  */
744 int
745 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
746 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
747 
748 /**
749  * Unregister a callback function for specific device id.
750  *
751  * @param dev_id
752  *   The device identifier.
753  * @param event
754  *   The event that the callback will be unregistered for.
755  * @param cb_fn
756  *   User supplied callback function to be unregistered.
757  * @param cb_arg
758  *   Pointer to the parameter supplied when registering the callback.
759  *   (void *)-1 means to remove all registered callbacks with the specified
760  *   function address.
761  *
762  * @return
763  *   - 0 on success
764  *   - EINVAL if invalid parameter pointer is provided
765  *   - EAGAIN if the provided callback pointer does not exist
766  */
767 int
768 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
769 		rte_bbdev_cb_fn cb_fn, void *cb_arg);
770 
771 /**
772  * Enable a one-shot interrupt on the next operation enqueued to a particular
773  * queue. The interrupt will be triggered when the operation is ready to be
774  * dequeued. To handle the interrupt, an epoll file descriptor must be
775  * registered using rte_bbdev_queue_intr_ctl(), and then an application
776  * thread/lcore can wait for the interrupt using rte_epoll_wait().
777  *
778  * @param dev_id
779  *   The device identifier.
780  * @param queue_id
781  *   The index of the queue.
782  *
783  * @return
784  *   - 0 on success
785  *   - negative value on failure - as returned from PMD driver
786  */
787 int
788 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
789 
790 /**
791  * Disable a one-shot interrupt on the next operation enqueued to a particular
792  * queue (if it has been enabled).
793  *
794  * @param dev_id
795  *   The device identifier.
796  * @param queue_id
797  *   The index of the queue.
798  *
799  * @return
800  *   - 0 on success
801  *   - negative value on failure - as returned from PMD driver
802  */
803 int
804 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
805 
806 /**
807  * Control interface for per-queue interrupts.
808  *
809  * @param dev_id
810  *   The device identifier.
811  * @param queue_id
812  *   The index of the queue.
813  * @param epfd
814  *   Epoll file descriptor that will be associated with the interrupt source.
815  *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
816  *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
817  *   be used when calling rte_epoll_wait()).
818  * @param op
819  *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
820  *   RTE_INTR_EVENT_DEL.
821  * @param data
822  *   User context, that will be returned in the epdata.data field of the
823  *   rte_epoll_event structure filled in by rte_epoll_wait().
824  *
825  * @return
826  *   - 0 on success
827  *   - ENOTSUP if interrupts are not supported by the identified device
828  *   - negative value on failure - as returned from PMD driver
829  */
830 int
831 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
832 		void *data);
833 
834 #ifdef __cplusplus
835 }
836 #endif
837 
838 #endif /* _RTE_BBDEV_H_ */
839