xref: /dpdk/lib/compressdev/rte_comp.h (revision 09442498ef736d0a96632cf8b8c15d8ca78a6468)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 
5 #ifndef _RTE_COMP_H_
6 #define _RTE_COMP_H_
7 
8 /**
9  * @file rte_comp.h
10  *
11  * RTE definitions for Data Compression Service
12  *
13  */
14 
15 #ifdef __cplusplus
16 extern "C" {
17 #endif
18 
19 #include <rte_compat.h>
20 #include <rte_mbuf.h>
21 
22 /**
23  * compression service feature flags
24  *
25  * @note New features flags should be added to the end of the list
26  *
27  * Keep these flags synchronised with rte_comp_get_feature_name()
28  */
29 #define RTE_COMP_FF_STATEFUL_COMPRESSION	(1ULL << 0)
30 /**< Stateful compression is supported */
31 #define RTE_COMP_FF_STATEFUL_DECOMPRESSION	(1ULL << 1)
32 /**< Stateful decompression is supported */
33 #define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT		(1ULL << 2)
34 /**< Out-of-place Scatter-gather (SGL) buffers,
35  * with multiple segments, are supported in input and output
36  */
37 #define RTE_COMP_FF_OOP_SGL_IN_LB_OUT		(1ULL << 3)
38 /**< Out-of-place Scatter-gather (SGL) buffers are supported
39  * in input, combined with linear buffers (LB), with a
40  * single segment, in output
41  */
42 #define RTE_COMP_FF_OOP_LB_IN_SGL_OUT		(1ULL << 4)
43 /**< Out-of-place Scatter-gather (SGL) buffers are supported
44  * in output, combined with linear buffers (LB) in input
45  */
46 #define RTE_COMP_FF_ADLER32_CHECKSUM		(1ULL << 5)
47 /**< Adler-32 Checksum is supported */
48 #define RTE_COMP_FF_CRC32_CHECKSUM		(1ULL << 6)
49 /**< CRC32 Checksum is supported */
50 #define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM	(1ULL << 7)
51 /**< Adler-32/CRC32 Checksum is supported */
52 #define RTE_COMP_FF_MULTI_PKT_CHECKSUM		(1ULL << 8)
53 /**< Generation of checksum across multiple stateless packets is supported */
54 #define RTE_COMP_FF_SHA1_HASH			(1ULL << 9)
55 /**< SHA1 Hash is supported */
56 #define RTE_COMP_FF_SHA2_SHA256_HASH		(1ULL << 10)
57 /**< SHA256 Hash of SHA2 family is supported */
58 #define RTE_COMP_FF_NONCOMPRESSED_BLOCKS	(1ULL << 11)
59 /**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */
60 #define RTE_COMP_FF_SHAREABLE_PRIV_XFORM	(1ULL << 12)
61 /**< Private xforms created by the PMD can be shared
62  * across multiple stateless operations. If not set, then app needs
63  * to create as many priv_xforms as it expects to have stateless
64  * operations in-flight.
65  */
66 #define RTE_COMP_FF_HUFFMAN_FIXED		(1ULL << 13)
67 /**< Fixed huffman encoding is supported */
68 #define RTE_COMP_FF_HUFFMAN_DYNAMIC		(1ULL << 14)
69 /**< Dynamic huffman encoding is supported */
70 
71 /** Status of comp operation */
72 enum rte_comp_op_status {
73 	RTE_COMP_OP_STATUS_SUCCESS = 0,
74 	/**< Operation completed successfully */
75 	RTE_COMP_OP_STATUS_NOT_PROCESSED,
76 	/**< Operation has not yet been processed by the device */
77 	RTE_COMP_OP_STATUS_INVALID_ARGS,
78 	/**< Operation failed due to invalid arguments in request */
79 	RTE_COMP_OP_STATUS_ERROR,
80 	/**< Error handling operation */
81 	RTE_COMP_OP_STATUS_INVALID_STATE,
82 	/**< Operation is invoked in invalid state */
83 	RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
84 	/**< Output buffer ran out of space before operation completed.
85 	 * Error case. Application must resubmit all data with a larger
86 	 * output buffer.
87 	 */
88 	RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
89 	/**< Output buffer ran out of space before operation completed, but this
90 	 * is not an error case. Output data up to op.produced can be used and
91 	 * next op in the stream should continue on from op.consumed+1.
92 	 */
93 };
94 
95 /** Compression Algorithms */
96 enum rte_comp_algorithm {
97 	RTE_COMP_ALGO_UNSPECIFIED = 0,
98 	/** No Compression algorithm */
99 	RTE_COMP_ALGO_NULL,
100 	/**< No compression.
101 	 * Pass-through, data is copied unchanged from source buffer to
102 	 * destination buffer.
103 	 */
104 	RTE_COMP_ALGO_DEFLATE,
105 	/**< DEFLATE compression algorithm
106 	 * https://tools.ietf.org/html/rfc1951
107 	 */
108 	RTE_COMP_ALGO_LZS,
109 	/**< LZS compression algorithm
110 	 * https://tools.ietf.org/html/rfc2395
111 	 */
112 };
113 
114 /** Compression Hash Algorithms */
115 enum rte_comp_hash_algorithm {
116 	RTE_COMP_HASH_ALGO_NONE = 0,
117 	/**< No hash */
118 	RTE_COMP_HASH_ALGO_SHA1,
119 	/**< SHA1 hash algorithm */
120 	RTE_COMP_HASH_ALGO_SHA2_256,
121 	/**< SHA256 hash algorithm of SHA2 family */
122 };
123 
124 /**< Compression Level.
125  * The number is interpreted by each PMD differently. However, lower numbers
126  * give fastest compression, at the expense of compression ratio while
127  * higher numbers may give better compression ratios but are likely slower.
128  */
129 #define	RTE_COMP_LEVEL_PMD_DEFAULT	(-1)
130 /** Use PMD Default */
131 #define	RTE_COMP_LEVEL_NONE		(0)
132 /** Output uncompressed blocks if supported by the specified algorithm */
133 #define RTE_COMP_LEVEL_MIN		(1)
134 /** Use minimum compression level supported by the PMD */
135 #define RTE_COMP_LEVEL_MAX		(9)
136 /** Use maximum compression level supported by the PMD */
137 
138 /** Compression checksum types */
139 enum rte_comp_checksum_type {
140 	RTE_COMP_CHECKSUM_NONE,
141 	/**< No checksum generated */
142 	RTE_COMP_CHECKSUM_CRC32,
143 	/**< Generates a CRC32 checksum, as used by gzip */
144 	RTE_COMP_CHECKSUM_ADLER32,
145 	/**< Generates an Adler-32 checksum, as used by zlib */
146 	RTE_COMP_CHECKSUM_CRC32_ADLER32,
147 	/**< Generates both Adler-32 and CRC32 checksums, concatenated.
148 	 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
149 	 */
150 };
151 
152 
153 /** Compression Huffman Type - used by DEFLATE algorithm */
154 enum rte_comp_huffman {
155 	RTE_COMP_HUFFMAN_DEFAULT,
156 	/**< PMD may choose which Huffman codes to use */
157 	RTE_COMP_HUFFMAN_FIXED,
158 	/**< Use Fixed Huffman codes */
159 	RTE_COMP_HUFFMAN_DYNAMIC,
160 	/**< Use Dynamic Huffman codes */
161 };
162 
163 /** Compression flush flags */
164 enum rte_comp_flush_flag {
165 	RTE_COMP_FLUSH_NONE,
166 	/**< Data is not flushed. Output may remain in the compressor and be
167 	 * processed during a following op. It may not be possible to decompress
168 	 * output until a later op with some other flush flag has been sent.
169 	 */
170 	RTE_COMP_FLUSH_SYNC,
171 	/**< All data should be flushed to output buffer. Output data can be
172 	 * decompressed. However state and history is not cleared, so future
173 	 * operations may use history from this operation.
174 	 */
175 	RTE_COMP_FLUSH_FULL,
176 	/**< All data should be flushed to output buffer. Output data can be
177 	 * decompressed. State and history data is cleared, so future
178 	 * ops will be independent of ops processed before this.
179 	 */
180 	RTE_COMP_FLUSH_FINAL
181 	/**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE
182 	 * then bfinal bit is set in the last block.
183 	 */
184 };
185 
186 /** Compression transform types */
187 enum rte_comp_xform_type {
188 	RTE_COMP_COMPRESS,
189 	/**< Compression service - compress */
190 	RTE_COMP_DECOMPRESS,
191 	/**< Compression service - decompress */
192 };
193 
194 /** Compression operation type */
195 enum rte_comp_op_type {
196 	RTE_COMP_OP_STATELESS,
197 	/**< All data to be processed is submitted in the op, no state or
198 	 * history from previous ops is used and none will be stored for future
199 	 * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL.
200 	 */
201 	RTE_COMP_OP_STATEFUL
202 	/**< There may be more data to be processed after this op, it's part of
203 	 * a stream of data. State and history from previous ops can be used
204 	 * and resulting state and history can be stored for future ops,
205 	 * depending on flush flag.
206 	 */
207 };
208 
209 
210 /** Parameters specific to the deflate algorithm */
211 struct rte_comp_deflate_params {
212 	enum rte_comp_huffman huffman;
213 	/**< Compression huffman encoding type */
214 };
215 
216 /** Setup Data for compression */
217 struct rte_comp_compress_xform {
218 	enum rte_comp_algorithm algo;
219 	/**< Algorithm to use for compress operation */
220 	union {
221 		struct rte_comp_deflate_params deflate;
222 		/**< Parameters specific to the deflate algorithm */
223 	}; /**< Algorithm specific parameters */
224 	int level;
225 	/**< Compression level */
226 	uint8_t window_size;
227 	/**< Base two log value of sliding window to be used. If window size
228 	 * can't be supported by the PMD then it may fall back to a smaller
229 	 * size. This is likely to result in a worse compression ratio.
230 	 */
231 	enum rte_comp_checksum_type chksum;
232 	/**< Type of checksum to generate on the uncompressed data */
233 	enum rte_comp_hash_algorithm hash_algo;
234 	/**< Hash algorithm to be used with compress operation. Hash is always
235 	 * done on plaintext.
236 	 */
237 };
238 
239 /**
240  * Setup Data for decompression.
241  */
242 struct rte_comp_decompress_xform {
243 	enum rte_comp_algorithm algo;
244 	/**< Algorithm to use for decompression */
245 	enum rte_comp_checksum_type chksum;
246 	/**< Type of checksum to generate on the decompressed data */
247 	uint8_t window_size;
248 	/**< Base two log value of sliding window which was used to generate
249 	 * compressed data. If window size can't be supported by the PMD then
250 	 * setup of stream or private_xform should fail.
251 	 */
252 	enum rte_comp_hash_algorithm hash_algo;
253 	/**< Hash algorithm to be used with decompress operation. Hash is always
254 	 * done on plaintext.
255 	 */
256 };
257 
258 /**
259  * Compression transform structure.
260  *
261  * This is used to specify the compression transforms required.
262  * Each transform structure can hold a single transform, the type field is
263  * used to specify which transform is contained within the union.
264  */
265 struct rte_comp_xform {
266 	enum rte_comp_xform_type type;
267 	/**< xform type */
268 	union {
269 		struct rte_comp_compress_xform compress;
270 		/**< xform for compress operation */
271 		struct rte_comp_decompress_xform decompress;
272 		/**< decompress xform */
273 	};
274 };
275 
276 /**
277  * Compression Operation.
278  *
279  * This structure contains data relating to performing a compression
280  * operation on the referenced mbuf data buffers.
281  *
282  * Comp operations are enqueued and dequeued in comp PMDs using the
283  * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
284  */
285 struct rte_comp_op {
286 	enum rte_comp_op_type op_type;
287 	union {
288 		void *private_xform;
289 		/**< Stateless private PMD data derived from an rte_comp_xform.
290 		 * A handle returned by rte_compressdev_private_xform_create()
291 		 * must be attached to operations of op_type RTE_COMP_STATELESS.
292 		 */
293 		void *stream;
294 		/**< Private PMD data derived initially from an rte_comp_xform,
295 		 * which holds state and history data and evolves as operations
296 		 * are processed. rte_compressdev_stream_create() must be called
297 		 * on a device for all STATEFUL data streams and the resulting
298 		 * stream attached to the one or more operations associated
299 		 * with the data stream.
300 		 * All operations in a stream must be sent to the same device.
301 		 */
302 	};
303 
304 	struct rte_mempool *mempool;
305 	/**< Pool from which operation is allocated */
306 	rte_iova_t iova_addr;
307 	/**< IOVA address of this operation */
308 	struct rte_mbuf *m_src;
309 	/**< source mbuf
310 	 * The total size of the input buffer(s) can be retrieved using
311 	 * rte_pktmbuf_pkt_len(m_src). The max data size which can fit in a
312 	 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
313 	 * If the input data is bigger than this it can be passed to the PMD in
314 	 * a chain of mbufs if the PMD's capabilities indicate it supports this.
315 	 */
316 	struct rte_mbuf *m_dst;
317 	/**< destination mbuf
318 	 * The total size of the output buffer(s) can be retrieved using
319 	 * rte_pktmbuf_pkt_len(m_dst). The max data size which can fit in a
320 	 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
321 	 * If the output data is expected to be bigger than this a chain of
322 	 * mbufs can be passed to the PMD if the PMD's capabilities indicate
323 	 * it supports this.
324 	 *
325 	 * @note, if incompressible data is passed to an engine for compression
326 	 * using RTE_COMP_ALGO_DEFLATE, it's possible for the output data
327 	 * to be larger than the uncompressed data, due to the inclusion
328 	 * of the DEFLATE header blocks. The size of m_dst should accommodate
329 	 * this, else OUT_OF_SPACE errors can be expected in this case.
330 	 */
331 
332 	struct {
333 		uint32_t offset;
334 		/**< Starting point for compression or decompression,
335 		 * specified as number of bytes from start of packet in
336 		 * source buffer.
337 		 * This offset starts from the first segment
338 		 * of the buffer, in case the m_src is a chain of mbufs.
339 		 * Starting point for checksum generation in compress direction.
340 		 */
341 		uint32_t length;
342 		/**< The length, in bytes, of the data in source buffer
343 		 * to be compressed or decompressed.
344 		 * Also the length of the data over which the checksum
345 		 * should be generated in compress direction
346 		 */
347 	} src;
348 	struct {
349 		uint32_t offset;
350 		/**< Starting point for writing output data, specified as
351 		 * number of bytes from start of packet in dest
352 		 * buffer.
353 		 * This offset starts from the first segment
354 		 * of the buffer, in case the m_dst is a chain of mbufs.
355 		 * Starting point for checksum generation in
356 		 * decompress direction.
357 		 */
358 	} dst;
359 	struct {
360 		uint8_t *digest;
361 		/**< Output buffer to store hash output, if enabled in xform.
362 		 * Buffer would contain valid value only after an op with
363 		 * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed
364 		 * successfully.
365 		 *
366 		 * Length of buffer should be contiguous and large enough to
367 		 * accommodate digest produced by specific hash algo.
368 		 */
369 		rte_iova_t iova_addr;
370 		/**< IO address of the buffer */
371 	} hash;
372 	enum rte_comp_flush_flag flush_flag;
373 	/**< Defines flush characteristics for the output data.
374 	 * Only applicable in compress direction
375 	 */
376 	uint64_t input_chksum;
377 	/**< An input checksum can be provided to generate a
378 	 * cumulative checksum across sequential blocks in a STATELESS stream.
379 	 * Checksum type is as specified in xform chksum_type
380 	 */
381 	uint64_t output_chksum;
382 	/**< If a checksum is generated it will be written in here.
383 	 * Checksum type is as specified in xform chksum_type.
384 	 */
385 	uint32_t consumed;
386 	/**< The number of bytes from the source buffer
387 	 * which were compressed/decompressed.
388 	 */
389 	uint32_t produced;
390 	/**< The number of bytes written to the destination buffer
391 	 * which were compressed/decompressed.
392 	 */
393 	uint64_t debug_status;
394 	/**<
395 	 * Status of the operation is returned in the status param.
396 	 * This field allows the PMD to pass back extra
397 	 * pmd-specific debug information. Value is not defined on the API.
398 	 */
399 	uint8_t status;
400 	/**<
401 	 * Operation status - use values from enum rte_comp_status.
402 	 * This is reset to
403 	 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
404 	 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
405 	 * is successfully processed by a PMD
406 	 */
407 } __rte_cache_aligned;
408 
409 /**
410  * Creates an operation pool
411  *
412  * @param name
413  *   Compress pool name
414  * @param nb_elts
415  *   Number of elements in pool
416  * @param cache_size
417  *   Number of elements to cache on lcore, see
418  *   *rte_mempool_create* for further details about cache size
419  * @param user_size
420  *   Size of private data to allocate for user with each operation
421  * @param socket_id
422  *   Socket to identifier allocate memory on
423  * @return
424  *  - On success pointer to mempool
425  *  - On failure NULL
426  */
427 __rte_experimental
428 struct rte_mempool *
429 rte_comp_op_pool_create(const char *name,
430 		unsigned int nb_elts, unsigned int cache_size,
431 		uint16_t user_size, int socket_id);
432 
433 /**
434  * Allocate an operation from a mempool with default parameters set
435  *
436  * @param mempool
437  *   Compress operation mempool
438  *
439  * @return
440  * - On success returns a valid rte_comp_op structure
441  * - On failure returns NULL
442  */
443 __rte_experimental
444 struct rte_comp_op *
445 rte_comp_op_alloc(struct rte_mempool *mempool);
446 
447 /**
448  * Bulk allocate operations from a mempool with default parameters set
449  *
450  * @param mempool
451  *   Compress operation mempool
452  * @param ops
453  *   Array to place allocated operations
454  * @param nb_ops
455  *   Number of operations to allocate
456  * @return
457  *   - nb_ops: Success, the nb_ops requested was allocated
458  *   - 0: Not enough entries in the mempool; no ops are retrieved.
459  */
460 __rte_experimental
461 int
462 rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
463 		struct rte_comp_op **ops, uint16_t nb_ops);
464 
465 /**
466  * Free operation structure
467  * If operation has been allocate from a rte_mempool, then the operation will
468  * be returned to the mempool.
469  *
470  * @param op
471  *   Compress operation pointer allocated from rte_comp_op_alloc()
472  *   If op is NULL, no operation is performed.
473  */
474 __rte_experimental
475 void
476 rte_comp_op_free(struct rte_comp_op *op);
477 
478 /**
479  * Bulk free operation structures
480  * If operations have been allocated from an rte_mempool, then the operations
481  * will be returned to the mempool.
482  * The array entry will be cleared.
483  *
484  * @param ops
485  *   Array of Compress operations
486  * @param nb_ops
487  *   Number of operations to free
488  */
489 __rte_experimental
490 void
491 rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops);
492 
493 /**
494  * Get the name of a compress service feature flag
495  *
496  * @param flag
497  *   The mask describing the flag
498  *
499  * @return
500  *   The name of this flag, or NULL if it's not a valid feature flag.
501  */
502 __rte_experimental
503 const char *
504 rte_comp_get_feature_name(uint64_t flag);
505 
506 #ifdef __cplusplus
507 }
508 #endif
509 
510 #endif /* _RTE_COMP_H_ */
511