xref: /dpdk/lib/compressdev/rte_compressdev.h (revision db8aee153e43375538667c6f861e981a47574476)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 
5 #ifndef _RTE_COMPRESSDEV_H_
6 #define _RTE_COMPRESSDEV_H_
7 
8 /**
9  * @file rte_compressdev.h
10  *
11  * RTE Compression Device APIs.
12  *
13  * Defines comp device APIs for the provisioning of compression operations.
14  */
15 
16 #ifdef __cplusplus
17 extern "C" {
18 #endif
19 
20 
21 #include "rte_comp.h"
22 
23 /**
24  * Parameter log base 2 range description.
25  * Final value will be 2^value.
26  */
27 struct rte_param_log2_range {
28 	uint8_t min;	/**< Minimum log2 value */
29 	uint8_t max;	/**< Maximum log2 value */
30 	uint8_t increment;
31 	/**< If a range of sizes are supported,
32 	 * this parameter is used to indicate
33 	 * increments in base 2 log byte value
34 	 * that are supported between the minimum and maximum
35 	 */
36 };
37 
38 /** Structure used to capture a capability of a comp device */
39 struct rte_compressdev_capabilities {
40 	enum rte_comp_algorithm algo;
41 	/* Compression algorithm */
42 	uint64_t comp_feature_flags;
43 	/**< Bitmask of flags for compression service features */
44 	struct rte_param_log2_range window_size;
45 	/**< Window size range in base two log byte values */
46 };
47 
48 /** Macro used at end of comp PMD list */
49 #define RTE_COMP_END_OF_CAPABILITIES_LIST() \
50 	{ RTE_COMP_ALGO_UNSPECIFIED }
51 
52 const struct rte_compressdev_capabilities *
53 rte_compressdev_capability_get(uint8_t dev_id,
54 			enum rte_comp_algorithm algo);
55 
56 /**
57  * compression device supported feature flags
58  *
59  * @note New features flags should be added to the end of the list
60  *
61  * Keep these flags synchronised with rte_compressdev_get_feature_name()
62  */
63 #define	RTE_COMPDEV_FF_HW_ACCELERATED		(1ULL << 0)
64 /**< Operations are off-loaded to an external hardware accelerator */
65 #define	RTE_COMPDEV_FF_CPU_SSE			(1ULL << 1)
66 /**< Utilises CPU SIMD SSE instructions */
67 #define	RTE_COMPDEV_FF_CPU_AVX			(1ULL << 2)
68 /**< Utilises CPU SIMD AVX instructions */
69 #define	RTE_COMPDEV_FF_CPU_AVX2			(1ULL << 3)
70 /**< Utilises CPU SIMD AVX2 instructions */
71 #define	RTE_COMPDEV_FF_CPU_AVX512		(1ULL << 4)
72 /**< Utilises CPU SIMD AVX512 instructions */
73 #define	RTE_COMPDEV_FF_CPU_NEON			(1ULL << 5)
74 /**< Utilises CPU NEON instructions */
75 #define RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE	(1ULL << 6)
76 /**< A PMD should set this if the bulk of the
77  * processing is done during the dequeue. It should leave it
78  * cleared if the processing is done during the enqueue (default).
79  * Applications can use this as a hint for tuning.
80  */
81 
82 /**
83  * Get the name of a compress device feature flag.
84  *
85  * @param flag
86  *   The mask describing the flag
87  *
88  * @return
89  *   The name of this flag, or NULL if it's not a valid feature flag.
90  */
91 const char *
92 rte_compressdev_get_feature_name(uint64_t flag);
93 
94 /**  comp device information */
95 struct rte_compressdev_info {
96 	const char *driver_name;		/**< Driver name. */
97 	uint64_t feature_flags;			/**< Feature flags */
98 	const struct rte_compressdev_capabilities *capabilities;
99 	/**< Array of devices supported capabilities */
100 	uint16_t max_nb_queue_pairs;
101 	/**< Maximum number of queues pairs supported by device.
102 	 * (If 0, there is no limit in maximum number of queue pairs)
103 	 */
104 };
105 
106 /** comp device statistics */
107 struct rte_compressdev_stats {
108 	uint64_t enqueued_count;
109 	/**< Count of all operations enqueued */
110 	uint64_t dequeued_count;
111 	/**< Count of all operations dequeued */
112 
113 	uint64_t enqueue_err_count;
114 	/**< Total error count on operations enqueued */
115 	uint64_t dequeue_err_count;
116 	/**< Total error count on operations dequeued */
117 };
118 
119 
120 /**
121  * Get the device identifier for the named compress device.
122  *
123  * @param name
124  *   Device name to select the device structure
125  * @return
126  *   - Returns compress device identifier on success.
127  *   - Return -1 on failure to find named compress device.
128  */
129 int
130 rte_compressdev_get_dev_id(const char *name);
131 
132 /**
133  * Get the compress device name given a device identifier.
134  *
135  * @param dev_id
136  *   Compress device identifier
137  * @return
138  *   - Returns compress device name.
139  *   - Returns NULL if compress device is not present.
140  */
141 const char *
142 rte_compressdev_name_get(uint8_t dev_id);
143 
144 /**
145  * Get the total number of compress devices that have been successfully
146  * initialised.
147  *
148  * @return
149  *   - The total number of usable compress devices.
150  */
151 uint8_t
152 rte_compressdev_count(void);
153 
154 /**
155  * Get number and identifiers of attached comp devices that
156  * use the same compress driver.
157  *
158  * @param driver_name
159  *   Driver name
160  * @param devices
161  *   Output devices identifiers
162  * @param nb_devices
163  *   Maximal number of devices
164  *
165  * @return
166  *   Returns number of attached compress devices.
167  */
168 uint8_t
169 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
170 		uint8_t nb_devices);
171 
172 /*
173  * Return the NUMA socket to which a device is connected.
174  *
175  * @param dev_id
176  *   Compress device identifier
177  * @return
178  *   The NUMA socket id to which the device is connected or
179  *   a default of zero if the socket could not be determined.
180  *   -1 if returned is the dev_id value is out of range.
181  */
182 int
183 rte_compressdev_socket_id(uint8_t dev_id);
184 
185 /** Compress device configuration structure */
186 struct rte_compressdev_config {
187 	int socket_id;
188 	/**< Socket on which to allocate resources */
189 	uint16_t nb_queue_pairs;
190 	/**< Total number of queue pairs to configure on a device */
191 	uint16_t max_nb_priv_xforms;
192 	/**< Max number of private_xforms which will be created on the device */
193 	uint16_t max_nb_streams;
194 	/**< Max number of streams which will be created on the device */
195 };
196 
197 /**
198  * Configure a device.
199  *
200  * This function must be invoked first before any other function in the
201  * API. This function can also be re-invoked when a device is in the
202  * stopped state.
203  *
204  * @param dev_id
205  *   Compress device identifier
206  * @param config
207  *   The compress device configuration
208  * @return
209  *   - 0: Success, device configured.
210  *   - <0: Error code returned by the driver configuration function.
211  */
212 int
213 rte_compressdev_configure(uint8_t dev_id,
214 			struct rte_compressdev_config *config);
215 
216 /**
217  * Start a device.
218  *
219  * The device start step is called after configuring the device and setting up
220  * its queue pairs.
221  * On success, data-path functions exported by the API (enqueue/dequeue, etc)
222  * can be invoked.
223  *
224  * @param dev_id
225  *   Compress device identifier
226  * @return
227  *   - 0: Success, device started.
228  *   - <0: Error code of the driver device start function.
229  */
230 int
231 rte_compressdev_start(uint8_t dev_id);
232 
233 /**
234  * Stop a device. The device can be restarted with a call to
235  * rte_compressdev_start()
236  *
237  * @param dev_id
238  *   Compress device identifier
239  */
240 void
241 rte_compressdev_stop(uint8_t dev_id);
242 
243 /**
244  * Close an device.
245  * The memory allocated in the device gets freed.
246  * After calling this function, in order to use
247  * the device again, it is required to
248  * configure the device again.
249  *
250  * @param dev_id
251  *   Compress device identifier
252  *
253  * @return
254  *  - 0 on successfully closing device
255  *  - <0 on failure to close device
256  */
257 int
258 rte_compressdev_close(uint8_t dev_id);
259 
260 /**
261  * Allocate and set up a receive queue pair for a device.
262  * This should only be called when the device is stopped.
263  *
264  *
265  * @param dev_id
266  *   Compress device identifier
267  * @param queue_pair_id
268  *   The index of the queue pairs to set up. The
269  *   value must be in the range [0, nb_queue_pair - 1]
270  *   previously supplied to rte_compressdev_configure()
271  * @param max_inflight_ops
272  *   Max number of ops which the qp will have to
273  *   accommodate simultaneously
274  * @param socket_id
275  *   The *socket_id* argument is the socket identifier
276  *   in case of NUMA. The value can be *SOCKET_ID_ANY*
277  *   if there is no NUMA constraint for the DMA memory
278  *   allocated for the receive queue pair
279  * @return
280  *   - 0: Success, queue pair correctly set up.
281  *   - <0: Queue pair configuration failed
282  */
283 int
284 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
285 		uint32_t max_inflight_ops, int socket_id);
286 
287 /**
288  * Get the number of queue pairs on a specific comp device
289  *
290  * @param dev_id
291  *   Compress device identifier
292  * @return
293  *   - The number of configured queue pairs.
294  */
295 uint16_t
296 rte_compressdev_queue_pair_count(uint8_t dev_id);
297 
298 
299 /**
300  * Retrieve the general I/O statistics of a device.
301  *
302  * @param dev_id
303  *   The identifier of the device
304  * @param stats
305  *   A pointer to a structure of type
306  *   *rte_compressdev_stats* to be filled with the
307  *   values of device counters
308  * @return
309  *   - Zero if successful.
310  *   - Non-zero otherwise.
311  */
312 int
313 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
314 
315 /**
316  * Reset the general I/O statistics of a device.
317  *
318  * @param dev_id
319  *   The identifier of the device.
320  */
321 void
322 rte_compressdev_stats_reset(uint8_t dev_id);
323 
324 /**
325  * Retrieve the contextual information of a device.
326  *
327  * @param dev_id
328  *   Compress device identifier
329  * @param dev_info
330  *   A pointer to a structure of type *rte_compressdev_info*
331  *   to be filled with the contextual information of the device
332  *
333  * @note The capabilities field of dev_info is set to point to the first
334  * element of an array of struct rte_compressdev_capabilities.
335  * The element after the last valid element has it's op field set to
336  * RTE_COMP_ALGO_UNSPECIFIED.
337  */
338 void
339 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
340 
341 /**
342  *
343  * Dequeue a burst of processed compression operations from a queue on the comp
344  * device. The dequeued operation are stored in *rte_comp_op* structures
345  * whose pointers are supplied in the *ops* array.
346  *
347  * The rte_compressdev_dequeue_burst() function returns the number of ops
348  * actually dequeued, which is the number of *rte_comp_op* data structures
349  * effectively supplied into the *ops* array.
350  *
351  * A return value equal to *nb_ops* indicates that the queue contained
352  * at least *nb_ops* operations, and this is likely to signify that other
353  * processed operations remain in the devices output queue. Applications
354  * implementing a "retrieve as many processed operations as possible" policy
355  * can check this specific case and keep invoking the
356  * rte_compressdev_dequeue_burst() function until a value less than
357  * *nb_ops* is returned.
358  *
359  * The rte_compressdev_dequeue_burst() function does not provide any error
360  * notification to avoid the corresponding overhead.
361  *
362  * @note: operation ordering is not maintained within the queue pair.
363  *
364  * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the
365  * op must be resubmitted with the same input data and a larger output buffer.
366  * op.produced is usually 0, but in decompression cases a PMD may return > 0
367  * and the application may find it useful to inspect that data.
368  * This status is only returned on STATELESS ops.
369  *
370  * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used
371  * and next op in stream should continue on from op.consumed+1 with a fresh
372  * output buffer.
373  * Consumed=0, produced=0 is an unusual but allowed case. There may be useful
374  * state/history stored in the PMD, even though no output was produced yet.
375  *
376  *
377  * @param dev_id
378  *   Compress device identifier
379  * @param qp_id
380  *   The index of the queue pair from which to retrieve
381  *   processed operations. The value must be in the range
382  *   [0, nb_queue_pair - 1] previously supplied to
383  *   rte_compressdev_configure()
384  * @param ops
385  *   The address of an array of pointers to
386  *   *rte_comp_op* structures that must be
387  *   large enough to store *nb_ops* pointers in it
388  * @param nb_ops
389  *   The maximum number of operations to dequeue
390  * @return
391  *   - The number of operations actually dequeued, which is the number
392  *   of pointers to *rte_comp_op* structures effectively supplied to the
393  *   *ops* array.
394  */
395 uint16_t
396 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
397 		struct rte_comp_op **ops, uint16_t nb_ops);
398 
399 /**
400  * Enqueue a burst of operations for processing on a compression device.
401  *
402  * The rte_compressdev_enqueue_burst() function is invoked to place
403  * comp operations on the queue *qp_id* of the device designated by
404  * its *dev_id*.
405  *
406  * The *nb_ops* parameter is the number of operations to process which are
407  * supplied in the *ops* array of *rte_comp_op* structures.
408  *
409  * The rte_compressdev_enqueue_burst() function returns the number of
410  * operations it actually enqueued for processing. A return value equal to
411  * *nb_ops* means that all packets have been enqueued.
412  *
413  * @note All compression operations are Out-of-place (OOP) operations,
414  * as the size of the output data is different to the size of the input data.
415  *
416  * @note The rte_comp_op contains both input and output parameters and is the
417  * vehicle for the application to pass data into and out of the PMD. While an
418  * op is inflight, i.e. once it has been enqueued, the private_xform or stream
419  * attached to it and any mbufs or memory referenced by it should not be altered
420  * or freed by the application. The PMD may use or change some of this data at
421  * any time until it has been returned in a dequeue operation.
422  *
423  * @note The flush flag only applies to operations which return SUCCESS.
424  * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer
425  * is as if flush flag was FLUSH_NONE.
426  * @note flush flag only applies in compression direction. It has no meaning
427  * for decompression.
428  * @note: operation ordering is not maintained within the queue pair.
429  *
430  * @param dev_id
431  *   Compress device identifier
432  * @param qp_id
433  *   The index of the queue pair on which operations
434  *   are to be enqueued for processing. The value
435  *   must be in the range [0, nb_queue_pairs - 1]
436  *   previously supplied to *rte_compressdev_configure*
437  * @param ops
438  *   The address of an array of *nb_ops* pointers
439  *   to *rte_comp_op* structures which contain
440  *   the operations to be processed
441  * @param nb_ops
442  *   The number of operations to process
443  * @return
444  *   The number of operations actually enqueued on the device. The return
445  *   value can be less than the value of the *nb_ops* parameter when the
446  *   comp devices queue is full or if invalid parameters are specified in
447  *   a *rte_comp_op*.
448  */
449 uint16_t
450 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
451 		struct rte_comp_op **ops, uint16_t nb_ops);
452 
453 /**
454  * This should alloc a stream from the device's mempool and initialise it.
455  * The application should call this API when setting up for the stateful
456  * processing of a set of data on a device. The API can be called multiple
457  * times to set up a stream for each data set. The handle returned is only for
458  * use with ops of op_type STATEFUL and must be passed to the PMD
459  * with every op in the data stream
460  *
461  * @param dev_id
462  *   Compress device identifier
463  * @param xform
464  *   xform data
465  * @param stream
466  *   Pointer to where PMD's private stream handle should be stored
467  *
468  * @return
469  *  - 0 if successful and valid stream handle
470  *  - <0 in error cases
471  *  - Returns -EINVAL if input parameters are invalid.
472  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
473  *  - Returns -ENOTSUP if comp device does not support the comp transform.
474  *  - Returns -ENOMEM if the private stream could not be allocated.
475  */
476 int
477 rte_compressdev_stream_create(uint8_t dev_id,
478 		const struct rte_comp_xform *xform,
479 		void **stream);
480 
481 /**
482  * This should clear the stream and return it to the device's mempool.
483  *
484  * @param dev_id
485  *   Compress device identifier
486  *
487  * @param stream
488  *   PMD's private stream data
489  *
490  * @return
491  *  - 0 if successful
492  *  - <0 in error cases
493  *  - Returns -EINVAL if input parameters are invalid.
494  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
495  *  - Returns -EBUSY if can't free stream as there are inflight operations
496  */
497 int
498 rte_compressdev_stream_free(uint8_t dev_id, void *stream);
499 
500 /**
501  * This should alloc a private_xform from the device's mempool and initialise
502  * it. The application should call this API when setting up for stateless
503  * processing on a device. If it returns non-shareable, then the appl cannot
504  * share this handle with multiple in-flight ops and should call this API again
505  * to get a separate handle for every in-flight op.
506  * The handle returned is only valid for use with ops of op_type STATELESS.
507  *
508  * @param dev_id
509  *   Compress device identifier
510  * @param xform
511  *   xform data
512  * @param private_xform
513  *   Pointer to where PMD's private_xform handle should be stored
514  *
515  * @return
516  *  - if successful returns 0
517  *    and valid private_xform handle
518  *  - <0 in error cases
519  *  - Returns -EINVAL if input parameters are invalid.
520  *  - Returns -ENOTSUP if comp device does not support the comp transform.
521  *  - Returns -ENOMEM if the private_xform could not be allocated.
522  */
523 int
524 rte_compressdev_private_xform_create(uint8_t dev_id,
525 		const struct rte_comp_xform *xform,
526 		void **private_xform);
527 
528 /**
529  * This should clear the private_xform and return it to the device's mempool.
530  * It is the application's responsibility to ensure that private_xform data
531  * is not cleared while there are still in-flight operations using it.
532  *
533  * @param dev_id
534  *   Compress device identifier
535  *
536  * @param private_xform
537  *   PMD's private_xform data
538  *
539  * @return
540  *  - 0 if successful
541  *  - <0 in error cases
542  *  - Returns -EINVAL if input parameters are invalid.
543  */
544 int
545 rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform);
546 
547 #ifdef __cplusplus
548 }
549 #endif
550 
551 #endif /* _RTE_COMPRESSDEV_H_ */
552