xref: /dpdk/lib/compressdev/rte_compressdev.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 
5 #ifndef _RTE_COMPRESSDEV_H_
6 #define _RTE_COMPRESSDEV_H_
7 
8 /**
9  * @file rte_compressdev.h
10  *
11  * RTE Compression Device APIs.
12  *
13  * Defines comp device APIs for the provisioning of compression operations.
14  */
15 
16 #include "rte_comp.h"
17 
18 #ifdef __cplusplus
19 extern "C" {
20 #endif
21 
22 /**
23  * Parameter log base 2 range description.
24  * Final value will be 2^value.
25  */
26 struct rte_param_log2_range {
27 	uint8_t min;	/**< Minimum log2 value */
28 	uint8_t max;	/**< Maximum log2 value */
29 	uint8_t increment;
30 	/**< If a range of sizes are supported,
31 	 * this parameter is used to indicate
32 	 * increments in base 2 log byte value
33 	 * that are supported between the minimum and maximum
34 	 */
35 };
36 
37 /** Structure used to capture a capability of a comp device */
38 struct rte_compressdev_capabilities {
39 	enum rte_comp_algorithm algo;
40 	/* Compression algorithm */
41 	uint64_t comp_feature_flags;
42 	/**< Bitmask of flags for compression service features */
43 	struct rte_param_log2_range window_size;
44 	/**< Window size range in base two log byte values */
45 };
46 
47 /** Macro used at end of comp PMD list */
48 #define RTE_COMP_END_OF_CAPABILITIES_LIST() \
49 	{ RTE_COMP_ALGO_UNSPECIFIED }
50 
51 const struct rte_compressdev_capabilities *
52 rte_compressdev_capability_get(uint8_t dev_id,
53 			enum rte_comp_algorithm algo);
54 
55 /**
56  * compression device supported feature flags
57  *
58  * @note New features flags should be added to the end of the list
59  *
60  * Keep these flags synchronised with rte_compressdev_get_feature_name()
61  */
62 #define	RTE_COMPDEV_FF_HW_ACCELERATED		(1ULL << 0)
63 /**< Operations are off-loaded to an external hardware accelerator */
64 #define	RTE_COMPDEV_FF_CPU_SSE			(1ULL << 1)
65 /**< Utilises CPU SIMD SSE instructions */
66 #define	RTE_COMPDEV_FF_CPU_AVX			(1ULL << 2)
67 /**< Utilises CPU SIMD AVX instructions */
68 #define	RTE_COMPDEV_FF_CPU_AVX2			(1ULL << 3)
69 /**< Utilises CPU SIMD AVX2 instructions */
70 #define	RTE_COMPDEV_FF_CPU_AVX512		(1ULL << 4)
71 /**< Utilises CPU SIMD AVX512 instructions */
72 #define	RTE_COMPDEV_FF_CPU_NEON			(1ULL << 5)
73 /**< Utilises CPU NEON instructions */
74 #define RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE	(1ULL << 6)
75 /**< A PMD should set this if the bulk of the
76  * processing is done during the dequeue. It should leave it
77  * cleared if the processing is done during the enqueue (default).
78  * Applications can use this as a hint for tuning.
79  */
80 
81 /**
82  * Get the name of a compress device feature flag.
83  *
84  * @param flag
85  *   The mask describing the flag
86  *
87  * @return
88  *   The name of this flag, or NULL if it's not a valid feature flag.
89  */
90 const char *
91 rte_compressdev_get_feature_name(uint64_t flag);
92 
93 /**  comp device information */
94 struct rte_compressdev_info {
95 	const char *driver_name;		/**< Driver name. */
96 	uint64_t feature_flags;			/**< Feature flags */
97 	const struct rte_compressdev_capabilities *capabilities;
98 	/**< Array of devices supported capabilities */
99 	uint16_t max_nb_queue_pairs;
100 	/**< Maximum number of queues pairs supported by device.
101 	 * (If 0, there is no limit in maximum number of queue pairs)
102 	 */
103 };
104 
105 /** comp device statistics */
106 struct rte_compressdev_stats {
107 	uint64_t enqueued_count;
108 	/**< Count of all operations enqueued */
109 	uint64_t dequeued_count;
110 	/**< Count of all operations dequeued */
111 
112 	uint64_t enqueue_err_count;
113 	/**< Total error count on operations enqueued */
114 	uint64_t dequeue_err_count;
115 	/**< Total error count on operations dequeued */
116 };
117 
118 
119 /**
120  * Get the device identifier for the named compress device.
121  *
122  * @param name
123  *   Device name to select the device structure
124  * @return
125  *   - Returns compress device identifier on success.
126  *   - Return -1 on failure to find named compress device.
127  */
128 int
129 rte_compressdev_get_dev_id(const char *name);
130 
131 /**
132  * Get the compress device name given a device identifier.
133  *
134  * @param dev_id
135  *   Compress device identifier
136  * @return
137  *   - Returns compress device name.
138  *   - Returns NULL if compress device is not present.
139  */
140 const char *
141 rte_compressdev_name_get(uint8_t dev_id);
142 
143 /**
144  * Get the total number of compress devices that have been successfully
145  * initialised.
146  *
147  * @return
148  *   - The total number of usable compress devices.
149  */
150 uint8_t
151 rte_compressdev_count(void);
152 
153 /**
154  * Get number and identifiers of attached comp devices that
155  * use the same compress driver.
156  *
157  * @param driver_name
158  *   Driver name
159  * @param devices
160  *   Output devices identifiers
161  * @param nb_devices
162  *   Maximal number of devices
163  *
164  * @return
165  *   Returns number of attached compress devices.
166  */
167 uint8_t
168 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
169 		uint8_t nb_devices);
170 
171 /*
172  * Return the NUMA socket to which a device is connected.
173  *
174  * @param dev_id
175  *   Compress device identifier
176  * @return
177  *   The NUMA socket id to which the device is connected or
178  *   a default of zero if the socket could not be determined.
179  *   -1 if returned is the dev_id value is out of range.
180  */
181 int
182 rte_compressdev_socket_id(uint8_t dev_id);
183 
184 /** Compress device configuration structure */
185 struct rte_compressdev_config {
186 	int socket_id;
187 	/**< Socket on which to allocate resources */
188 	uint16_t nb_queue_pairs;
189 	/**< Total number of queue pairs to configure on a device */
190 	uint16_t max_nb_priv_xforms;
191 	/**< Max number of private_xforms which will be created on the device */
192 	uint16_t max_nb_streams;
193 	/**< Max number of streams which will be created on the device */
194 };
195 
196 /**
197  * Configure a device.
198  *
199  * This function must be invoked first before any other function in the
200  * API. This function can also be re-invoked when a device is in the
201  * stopped state.
202  *
203  * @param dev_id
204  *   Compress device identifier
205  * @param config
206  *   The compress device configuration
207  * @return
208  *   - 0: Success, device configured.
209  *   - <0: Error code returned by the driver configuration function.
210  */
211 int
212 rte_compressdev_configure(uint8_t dev_id,
213 			struct rte_compressdev_config *config);
214 
215 /**
216  * Start a device.
217  *
218  * The device start step is called after configuring the device and setting up
219  * its queue pairs.
220  * On success, data-path functions exported by the API (enqueue/dequeue, etc)
221  * can be invoked.
222  *
223  * @param dev_id
224  *   Compress device identifier
225  * @return
226  *   - 0: Success, device started.
227  *   - <0: Error code of the driver device start function.
228  */
229 int
230 rte_compressdev_start(uint8_t dev_id);
231 
232 /**
233  * Stop a device. The device can be restarted with a call to
234  * rte_compressdev_start()
235  *
236  * @param dev_id
237  *   Compress device identifier
238  */
239 void
240 rte_compressdev_stop(uint8_t dev_id);
241 
242 /**
243  * Close an device.
244  * The memory allocated in the device gets freed.
245  * After calling this function, in order to use
246  * the device again, it is required to
247  * configure the device again.
248  *
249  * @param dev_id
250  *   Compress device identifier
251  *
252  * @return
253  *  - 0 on successfully closing device
254  *  - <0 on failure to close device
255  */
256 int
257 rte_compressdev_close(uint8_t dev_id);
258 
259 /**
260  * Allocate and set up a receive queue pair for a device.
261  * This should only be called when the device is stopped.
262  *
263  *
264  * @param dev_id
265  *   Compress device identifier
266  * @param queue_pair_id
267  *   The index of the queue pairs to set up. The
268  *   value must be in the range [0, nb_queue_pair - 1]
269  *   previously supplied to rte_compressdev_configure()
270  * @param max_inflight_ops
271  *   Max number of ops which the qp will have to
272  *   accommodate simultaneously
273  * @param socket_id
274  *   The *socket_id* argument is the socket identifier
275  *   in case of NUMA. The value can be *SOCKET_ID_ANY*
276  *   if there is no NUMA constraint for the DMA memory
277  *   allocated for the receive queue pair
278  * @return
279  *   - 0: Success, queue pair correctly set up.
280  *   - <0: Queue pair configuration failed
281  */
282 int
283 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
284 		uint32_t max_inflight_ops, int socket_id);
285 
286 /**
287  * Get the number of queue pairs on a specific comp device
288  *
289  * @param dev_id
290  *   Compress device identifier
291  * @return
292  *   - The number of configured queue pairs.
293  */
294 uint16_t
295 rte_compressdev_queue_pair_count(uint8_t dev_id);
296 
297 
298 /**
299  * Retrieve the general I/O statistics of a device.
300  *
301  * @param dev_id
302  *   The identifier of the device
303  * @param stats
304  *   A pointer to a structure of type
305  *   *rte_compressdev_stats* to be filled with the
306  *   values of device counters
307  * @return
308  *   - Zero if successful.
309  *   - Non-zero otherwise.
310  */
311 int
312 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
313 
314 /**
315  * Reset the general I/O statistics of a device.
316  *
317  * @param dev_id
318  *   The identifier of the device.
319  */
320 void
321 rte_compressdev_stats_reset(uint8_t dev_id);
322 
323 /**
324  * Retrieve the contextual information of a device.
325  *
326  * @param dev_id
327  *   Compress device identifier
328  * @param dev_info
329  *   A pointer to a structure of type *rte_compressdev_info*
330  *   to be filled with the contextual information of the device
331  *
332  * @note The capabilities field of dev_info is set to point to the first
333  * element of an array of struct rte_compressdev_capabilities.
334  * The element after the last valid element has it's op field set to
335  * RTE_COMP_ALGO_UNSPECIFIED.
336  */
337 void
338 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
339 
340 /**
341  *
342  * Dequeue a burst of processed compression operations from a queue on the comp
343  * device. The dequeued operation are stored in *rte_comp_op* structures
344  * whose pointers are supplied in the *ops* array.
345  *
346  * The rte_compressdev_dequeue_burst() function returns the number of ops
347  * actually dequeued, which is the number of *rte_comp_op* data structures
348  * effectively supplied into the *ops* array.
349  *
350  * A return value equal to *nb_ops* indicates that the queue contained
351  * at least *nb_ops* operations, and this is likely to signify that other
352  * processed operations remain in the devices output queue. Applications
353  * implementing a "retrieve as many processed operations as possible" policy
354  * can check this specific case and keep invoking the
355  * rte_compressdev_dequeue_burst() function until a value less than
356  * *nb_ops* is returned.
357  *
358  * The rte_compressdev_dequeue_burst() function does not provide any error
359  * notification to avoid the corresponding overhead.
360  *
361  * @note: operation ordering is not maintained within the queue pair.
362  *
363  * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the
364  * op must be resubmitted with the same input data and a larger output buffer.
365  * op.produced is usually 0, but in decompression cases a PMD may return > 0
366  * and the application may find it useful to inspect that data.
367  * This status is only returned on STATELESS ops.
368  *
369  * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used
370  * and next op in stream should continue on from op.consumed+1 with a fresh
371  * output buffer.
372  * Consumed=0, produced=0 is an unusual but allowed case. There may be useful
373  * state/history stored in the PMD, even though no output was produced yet.
374  *
375  *
376  * @param dev_id
377  *   Compress device identifier
378  * @param qp_id
379  *   The index of the queue pair from which to retrieve
380  *   processed operations. The value must be in the range
381  *   [0, nb_queue_pair - 1] previously supplied to
382  *   rte_compressdev_configure()
383  * @param ops
384  *   The address of an array of pointers to
385  *   *rte_comp_op* structures that must be
386  *   large enough to store *nb_ops* pointers in it
387  * @param nb_ops
388  *   The maximum number of operations to dequeue
389  * @return
390  *   - The number of operations actually dequeued, which is the number
391  *   of pointers to *rte_comp_op* structures effectively supplied to the
392  *   *ops* array.
393  */
394 uint16_t
395 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
396 		struct rte_comp_op **ops, uint16_t nb_ops);
397 
398 /**
399  * Enqueue a burst of operations for processing on a compression device.
400  *
401  * The rte_compressdev_enqueue_burst() function is invoked to place
402  * comp operations on the queue *qp_id* of the device designated by
403  * its *dev_id*.
404  *
405  * The *nb_ops* parameter is the number of operations to process which are
406  * supplied in the *ops* array of *rte_comp_op* structures.
407  *
408  * The rte_compressdev_enqueue_burst() function returns the number of
409  * operations it actually enqueued for processing. A return value equal to
410  * *nb_ops* means that all packets have been enqueued.
411  *
412  * @note All compression operations are Out-of-place (OOP) operations,
413  * as the size of the output data is different to the size of the input data.
414  *
415  * @note The rte_comp_op contains both input and output parameters and is the
416  * vehicle for the application to pass data into and out of the PMD. While an
417  * op is inflight, i.e. once it has been enqueued, the private_xform or stream
418  * attached to it and any mbufs or memory referenced by it should not be altered
419  * or freed by the application. The PMD may use or change some of this data at
420  * any time until it has been returned in a dequeue operation.
421  *
422  * @note The flush flag only applies to operations which return SUCCESS.
423  * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer
424  * is as if flush flag was FLUSH_NONE.
425  * @note flush flag only applies in compression direction. It has no meaning
426  * for decompression.
427  * @note: operation ordering is not maintained within the queue pair.
428  *
429  * @param dev_id
430  *   Compress device identifier
431  * @param qp_id
432  *   The index of the queue pair on which operations
433  *   are to be enqueued for processing. The value
434  *   must be in the range [0, nb_queue_pairs - 1]
435  *   previously supplied to *rte_compressdev_configure*
436  * @param ops
437  *   The address of an array of *nb_ops* pointers
438  *   to *rte_comp_op* structures which contain
439  *   the operations to be processed
440  * @param nb_ops
441  *   The number of operations to process
442  * @return
443  *   The number of operations actually enqueued on the device. The return
444  *   value can be less than the value of the *nb_ops* parameter when the
445  *   comp devices queue is full or if invalid parameters are specified in
446  *   a *rte_comp_op*.
447  */
448 uint16_t
449 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
450 		struct rte_comp_op **ops, uint16_t nb_ops);
451 
452 /**
453  * This should alloc a stream from the device's mempool and initialise it.
454  * The application should call this API when setting up for the stateful
455  * processing of a set of data on a device. The API can be called multiple
456  * times to set up a stream for each data set. The handle returned is only for
457  * use with ops of op_type STATEFUL and must be passed to the PMD
458  * with every op in the data stream
459  *
460  * @param dev_id
461  *   Compress device identifier
462  * @param xform
463  *   xform data
464  * @param stream
465  *   Pointer to where PMD's private stream handle should be stored
466  *
467  * @return
468  *  - 0 if successful and valid stream handle
469  *  - <0 in error cases
470  *  - Returns -EINVAL if input parameters are invalid.
471  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
472  *  - Returns -ENOTSUP if comp device does not support the comp transform.
473  *  - Returns -ENOMEM if the private stream could not be allocated.
474  */
475 int
476 rte_compressdev_stream_create(uint8_t dev_id,
477 		const struct rte_comp_xform *xform,
478 		void **stream);
479 
480 /**
481  * This should clear the stream and return it to the device's mempool.
482  *
483  * @param dev_id
484  *   Compress device identifier
485  *
486  * @param stream
487  *   PMD's private stream data
488  *
489  * @return
490  *  - 0 if successful
491  *  - <0 in error cases
492  *  - Returns -EINVAL if input parameters are invalid.
493  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
494  *  - Returns -EBUSY if can't free stream as there are inflight operations
495  */
496 int
497 rte_compressdev_stream_free(uint8_t dev_id, void *stream);
498 
499 /**
500  * This should alloc a private_xform from the device's mempool and initialise
501  * it. The application should call this API when setting up for stateless
502  * processing on a device. If it returns non-shareable, then the appl cannot
503  * share this handle with multiple in-flight ops and should call this API again
504  * to get a separate handle for every in-flight op.
505  * The handle returned is only valid for use with ops of op_type STATELESS.
506  *
507  * @param dev_id
508  *   Compress device identifier
509  * @param xform
510  *   xform data
511  * @param private_xform
512  *   Pointer to where PMD's private_xform handle should be stored
513  *
514  * @return
515  *  - if successful returns 0
516  *    and valid private_xform handle
517  *  - <0 in error cases
518  *  - Returns -EINVAL if input parameters are invalid.
519  *  - Returns -ENOTSUP if comp device does not support the comp transform.
520  *  - Returns -ENOMEM if the private_xform could not be allocated.
521  */
522 int
523 rte_compressdev_private_xform_create(uint8_t dev_id,
524 		const struct rte_comp_xform *xform,
525 		void **private_xform);
526 
527 /**
528  * This should clear the private_xform and return it to the device's mempool.
529  * It is the application's responsibility to ensure that private_xform data
530  * is not cleared while there are still in-flight operations using it.
531  *
532  * @param dev_id
533  *   Compress device identifier
534  *
535  * @param private_xform
536  *   PMD's private_xform data
537  *
538  * @return
539  *  - 0 if successful
540  *  - <0 in error cases
541  *  - Returns -EINVAL if input parameters are invalid.
542  */
543 int
544 rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform);
545 
546 #ifdef __cplusplus
547 }
548 #endif
549 
550 #endif /* _RTE_COMPRESSDEV_H_ */
551