1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #ifndef _RTE_COMP_H_ 6 #define _RTE_COMP_H_ 7 8 /** 9 * @file rte_comp.h 10 * 11 * RTE definitions for Data Compression Service 12 * 13 */ 14 15 #ifdef __cplusplus 16 extern "C" { 17 #endif 18 19 #include <rte_mbuf.h> 20 21 /** 22 * compression service feature flags 23 * 24 * @note New features flags should be added to the end of the list 25 * 26 * Keep these flags synchronised with rte_comp_get_feature_name() 27 */ 28 #define RTE_COMP_FF_STATEFUL_COMPRESSION (1ULL << 0) 29 /**< Stateful compression is supported */ 30 #define RTE_COMP_FF_STATEFUL_DECOMPRESSION (1ULL << 1) 31 /**< Stateful decompression is supported */ 32 #define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT (1ULL << 2) 33 /**< Out-of-place Scatter-gather (SGL) buffers, 34 * with multiple segments, are supported in input and output 35 */ 36 #define RTE_COMP_FF_OOP_SGL_IN_LB_OUT (1ULL << 3) 37 /**< Out-of-place Scatter-gather (SGL) buffers are supported 38 * in input, combined with linear buffers (LB), with a 39 * single segment, in output 40 */ 41 #define RTE_COMP_FF_OOP_LB_IN_SGL_OUT (1ULL << 4) 42 /**< Out-of-place Scatter-gather (SGL) buffers are supported 43 * in output, combined with linear buffers (LB) in input 44 */ 45 #define RTE_COMP_FF_ADLER32_CHECKSUM (1ULL << 5) 46 /**< Adler-32 Checksum is supported */ 47 #define RTE_COMP_FF_CRC32_CHECKSUM (1ULL << 6) 48 /**< CRC32 Checksum is supported */ 49 #define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM (1ULL << 7) 50 /**< Adler-32/CRC32 Checksum is supported */ 51 #define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 8) 52 /**< Generation of checksum across multiple stateless packets is supported */ 53 #define RTE_COMP_FF_SHA1_HASH (1ULL << 9) 54 /**< SHA1 Hash is supported */ 55 #define RTE_COMP_FF_SHA2_SHA256_HASH (1ULL << 10) 56 /**< SHA256 Hash of SHA2 family is supported */ 57 #define RTE_COMP_FF_NONCOMPRESSED_BLOCKS (1ULL << 11) 58 /**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */ 59 #define RTE_COMP_FF_SHAREABLE_PRIV_XFORM (1ULL << 12) 60 /**< Private xforms created by the PMD can be shared 61 * across multiple stateless operations. If not set, then app needs 62 * to create as many priv_xforms as it expects to have stateless 63 * operations in-flight. 64 */ 65 #define RTE_COMP_FF_HUFFMAN_FIXED (1ULL << 13) 66 /**< Fixed huffman encoding is supported */ 67 #define RTE_COMP_FF_HUFFMAN_DYNAMIC (1ULL << 14) 68 /**< Dynamic huffman encoding is supported */ 69 70 /** Status of comp operation */ 71 enum rte_comp_op_status { 72 RTE_COMP_OP_STATUS_SUCCESS = 0, 73 /**< Operation completed successfully */ 74 RTE_COMP_OP_STATUS_NOT_PROCESSED, 75 /**< Operation has not yet been processed by the device */ 76 RTE_COMP_OP_STATUS_INVALID_ARGS, 77 /**< Operation failed due to invalid arguments in request */ 78 RTE_COMP_OP_STATUS_ERROR, 79 /**< Error handling operation */ 80 RTE_COMP_OP_STATUS_INVALID_STATE, 81 /**< Operation is invoked in invalid state */ 82 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED, 83 /**< Output buffer ran out of space before operation completed. 84 * Error case. Application must resubmit all data with a larger 85 * output buffer. 86 */ 87 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE, 88 /**< Output buffer ran out of space before operation completed, but this 89 * is not an error case. Output data up to op.produced can be used and 90 * next op in the stream should continue on from op.consumed+1. 91 */ 92 }; 93 94 /** Compression Algorithms */ 95 enum rte_comp_algorithm { 96 RTE_COMP_ALGO_UNSPECIFIED = 0, 97 /** No Compression algorithm */ 98 RTE_COMP_ALGO_NULL, 99 /**< No compression. 100 * Pass-through, data is copied unchanged from source buffer to 101 * destination buffer. 102 */ 103 RTE_COMP_ALGO_DEFLATE, 104 /**< DEFLATE compression algorithm 105 * https://tools.ietf.org/html/rfc1951 106 */ 107 RTE_COMP_ALGO_LZS, 108 /**< LZS compression algorithm 109 * https://tools.ietf.org/html/rfc2395 110 */ 111 RTE_COMP_ALGO_LIST_END 112 }; 113 114 /** Compression Hash Algorithms */ 115 enum rte_comp_hash_algorithm { 116 RTE_COMP_HASH_ALGO_NONE = 0, 117 /**< No hash */ 118 RTE_COMP_HASH_ALGO_SHA1, 119 /**< SHA1 hash algorithm */ 120 RTE_COMP_HASH_ALGO_SHA2_256, 121 /**< SHA256 hash algorithm of SHA2 family */ 122 RTE_COMP_HASH_ALGO_LIST_END 123 }; 124 125 /**< Compression Level. 126 * The number is interpreted by each PMD differently. However, lower numbers 127 * give fastest compression, at the expense of compression ratio while 128 * higher numbers may give better compression ratios but are likely slower. 129 */ 130 #define RTE_COMP_LEVEL_PMD_DEFAULT (-1) 131 /** Use PMD Default */ 132 #define RTE_COMP_LEVEL_NONE (0) 133 /** Output uncompressed blocks if supported by the specified algorithm */ 134 #define RTE_COMP_LEVEL_MIN (1) 135 /** Use minimum compression level supported by the PMD */ 136 #define RTE_COMP_LEVEL_MAX (9) 137 /** Use maximum compression level supported by the PMD */ 138 139 /** Compression checksum types */ 140 enum rte_comp_checksum_type { 141 RTE_COMP_CHECKSUM_NONE, 142 /**< No checksum generated */ 143 RTE_COMP_CHECKSUM_CRC32, 144 /**< Generates a CRC32 checksum, as used by gzip */ 145 RTE_COMP_CHECKSUM_ADLER32, 146 /**< Generates an Adler-32 checksum, as used by zlib */ 147 RTE_COMP_CHECKSUM_CRC32_ADLER32, 148 /**< Generates both Adler-32 and CRC32 checksums, concatenated. 149 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits. 150 */ 151 }; 152 153 154 /** Compression Huffman Type - used by DEFLATE algorithm */ 155 enum rte_comp_huffman { 156 RTE_COMP_HUFFMAN_DEFAULT, 157 /**< PMD may choose which Huffman codes to use */ 158 RTE_COMP_HUFFMAN_FIXED, 159 /**< Use Fixed Huffman codes */ 160 RTE_COMP_HUFFMAN_DYNAMIC, 161 /**< Use Dynamic Huffman codes */ 162 }; 163 164 /** Compression flush flags */ 165 enum rte_comp_flush_flag { 166 RTE_COMP_FLUSH_NONE, 167 /**< Data is not flushed. Output may remain in the compressor and be 168 * processed during a following op. It may not be possible to decompress 169 * output until a later op with some other flush flag has been sent. 170 */ 171 RTE_COMP_FLUSH_SYNC, 172 /**< All data should be flushed to output buffer. Output data can be 173 * decompressed. However state and history is not cleared, so future 174 * operations may use history from this operation. 175 */ 176 RTE_COMP_FLUSH_FULL, 177 /**< All data should be flushed to output buffer. Output data can be 178 * decompressed. State and history data is cleared, so future 179 * ops will be independent of ops processed before this. 180 */ 181 RTE_COMP_FLUSH_FINAL 182 /**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE 183 * then bfinal bit is set in the last block. 184 */ 185 }; 186 187 /** Compression transform types */ 188 enum rte_comp_xform_type { 189 RTE_COMP_COMPRESS, 190 /**< Compression service - compress */ 191 RTE_COMP_DECOMPRESS, 192 /**< Compression service - decompress */ 193 }; 194 195 /** Compression operation type */ 196 enum rte_comp_op_type { 197 RTE_COMP_OP_STATELESS, 198 /**< All data to be processed is submitted in the op, no state or 199 * history from previous ops is used and none will be stored for future 200 * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL. 201 */ 202 RTE_COMP_OP_STATEFUL 203 /**< There may be more data to be processed after this op, it's part of 204 * a stream of data. State and history from previous ops can be used 205 * and resulting state and history can be stored for future ops, 206 * depending on flush flag. 207 */ 208 }; 209 210 211 /** Parameters specific to the deflate algorithm */ 212 struct rte_comp_deflate_params { 213 enum rte_comp_huffman huffman; 214 /**< Compression huffman encoding type */ 215 }; 216 217 /** Setup Data for compression */ 218 struct rte_comp_compress_xform { 219 enum rte_comp_algorithm algo; 220 /**< Algorithm to use for compress operation */ 221 union { 222 struct rte_comp_deflate_params deflate; 223 /**< Parameters specific to the deflate algorithm */ 224 }; /**< Algorithm specific parameters */ 225 int level; 226 /**< Compression level */ 227 uint8_t window_size; 228 /**< Base two log value of sliding window to be used. If window size 229 * can't be supported by the PMD then it may fall back to a smaller 230 * size. This is likely to result in a worse compression ratio. 231 */ 232 enum rte_comp_checksum_type chksum; 233 /**< Type of checksum to generate on the uncompressed data */ 234 enum rte_comp_hash_algorithm hash_algo; 235 /**< Hash algorithm to be used with compress operation. Hash is always 236 * done on plaintext. 237 */ 238 }; 239 240 /** 241 * Setup Data for decompression. 242 */ 243 struct rte_comp_decompress_xform { 244 enum rte_comp_algorithm algo; 245 /**< Algorithm to use for decompression */ 246 enum rte_comp_checksum_type chksum; 247 /**< Type of checksum to generate on the decompressed data */ 248 uint8_t window_size; 249 /**< Base two log value of sliding window which was used to generate 250 * compressed data. If window size can't be supported by the PMD then 251 * setup of stream or private_xform should fail. 252 */ 253 enum rte_comp_hash_algorithm hash_algo; 254 /**< Hash algorithm to be used with decompress operation. Hash is always 255 * done on plaintext. 256 */ 257 }; 258 259 /** 260 * Compression transform structure. 261 * 262 * This is used to specify the compression transforms required. 263 * Each transform structure can hold a single transform, the type field is 264 * used to specify which transform is contained within the union. 265 */ 266 struct rte_comp_xform { 267 enum rte_comp_xform_type type; 268 /**< xform type */ 269 union { 270 struct rte_comp_compress_xform compress; 271 /**< xform for compress operation */ 272 struct rte_comp_decompress_xform decompress; 273 /**< decompress xform */ 274 }; 275 }; 276 277 /** 278 * Compression Operation. 279 * 280 * This structure contains data relating to performing a compression 281 * operation on the referenced mbuf data buffers. 282 * 283 * Comp operations are enqueued and dequeued in comp PMDs using the 284 * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs 285 */ 286 struct rte_comp_op { 287 enum rte_comp_op_type op_type; 288 union { 289 void *private_xform; 290 /**< Stateless private PMD data derived from an rte_comp_xform. 291 * A handle returned by rte_compressdev_private_xform_create() 292 * must be attached to operations of op_type RTE_COMP_STATELESS. 293 */ 294 void *stream; 295 /**< Private PMD data derived initially from an rte_comp_xform, 296 * which holds state and history data and evolves as operations 297 * are processed. rte_compressdev_stream_create() must be called 298 * on a device for all STATEFUL data streams and the resulting 299 * stream attached to the one or more operations associated 300 * with the data stream. 301 * All operations in a stream must be sent to the same device. 302 */ 303 }; 304 305 struct rte_mempool *mempool; 306 /**< Pool from which operation is allocated */ 307 rte_iova_t iova_addr; 308 /**< IOVA address of this operation */ 309 struct rte_mbuf *m_src; 310 /**< source mbuf 311 * The total size of the input buffer(s) can be retrieved using 312 * rte_pktmbuf_pkt_len(m_src). The max data size which can fit in a 313 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1. 314 * If the input data is bigger than this it can be passed to the PMD in 315 * a chain of mbufs if the PMD's capabilities indicate it supports this. 316 */ 317 struct rte_mbuf *m_dst; 318 /**< destination mbuf 319 * The total size of the output buffer(s) can be retrieved using 320 * rte_pktmbuf_pkt_len(m_dst). The max data size which can fit in a 321 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1. 322 * If the output data is expected to be bigger than this a chain of 323 * mbufs can be passed to the PMD if the PMD's capabilities indicate 324 * it supports this. 325 * 326 * @note, if incompressible data is passed to an engine for compression 327 * using RTE_COMP_ALGO_DEFLATE, it's possible for the output data 328 * to be larger than the uncompressed data, due to the inclusion 329 * of the DEFLATE header blocks. The size of m_dst should accommodate 330 * this, else OUT_OF_SPACE errors can be expected in this case. 331 */ 332 333 struct { 334 uint32_t offset; 335 /**< Starting point for compression or decompression, 336 * specified as number of bytes from start of packet in 337 * source buffer. 338 * This offset starts from the first segment 339 * of the buffer, in case the m_src is a chain of mbufs. 340 * Starting point for checksum generation in compress direction. 341 */ 342 uint32_t length; 343 /**< The length, in bytes, of the data in source buffer 344 * to be compressed or decompressed. 345 * Also the length of the data over which the checksum 346 * should be generated in compress direction 347 */ 348 } src; 349 struct { 350 uint32_t offset; 351 /**< Starting point for writing output data, specified as 352 * number of bytes from start of packet in dest 353 * buffer. 354 * This offset starts from the first segment 355 * of the buffer, in case the m_dst is a chain of mbufs. 356 * Starting point for checksum generation in 357 * decompress direction. 358 */ 359 } dst; 360 struct { 361 uint8_t *digest; 362 /**< Output buffer to store hash output, if enabled in xform. 363 * Buffer would contain valid value only after an op with 364 * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed 365 * successfully. 366 * 367 * Length of buffer should be contiguous and large enough to 368 * accommodate digest produced by specific hash algo. 369 */ 370 rte_iova_t iova_addr; 371 /**< IO address of the buffer */ 372 } hash; 373 enum rte_comp_flush_flag flush_flag; 374 /**< Defines flush characteristics for the output data. 375 * Only applicable in compress direction 376 */ 377 uint64_t input_chksum; 378 /**< An input checksum can be provided to generate a 379 * cumulative checksum across sequential blocks in a STATELESS stream. 380 * Checksum type is as specified in xform chksum_type 381 */ 382 uint64_t output_chksum; 383 /**< If a checksum is generated it will be written in here. 384 * Checksum type is as specified in xform chksum_type. 385 */ 386 uint32_t consumed; 387 /**< The number of bytes from the source buffer 388 * which were compressed/decompressed. 389 */ 390 uint32_t produced; 391 /**< The number of bytes written to the destination buffer 392 * which were compressed/decompressed. 393 */ 394 uint64_t debug_status; 395 /**< 396 * Status of the operation is returned in the status param. 397 * This field allows the PMD to pass back extra 398 * pmd-specific debug information. Value is not defined on the API. 399 */ 400 uint8_t status; 401 /**< 402 * Operation status - use values from enum rte_comp_status. 403 * This is reset to 404 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and 405 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation 406 * is successfully processed by a PMD 407 */ 408 } __rte_cache_aligned; 409 410 /** 411 * Creates an operation pool 412 * 413 * @param name 414 * Compress pool name 415 * @param nb_elts 416 * Number of elements in pool 417 * @param cache_size 418 * Number of elements to cache on lcore, see 419 * *rte_mempool_create* for further details about cache size 420 * @param user_size 421 * Size of private data to allocate for user with each operation 422 * @param socket_id 423 * Socket to identifier allocate memory on 424 * @return 425 * - On success pointer to mempool 426 * - On failure NULL 427 */ 428 __rte_experimental 429 struct rte_mempool * 430 rte_comp_op_pool_create(const char *name, 431 unsigned int nb_elts, unsigned int cache_size, 432 uint16_t user_size, int socket_id); 433 434 /** 435 * Allocate an operation from a mempool with default parameters set 436 * 437 * @param mempool 438 * Compress operation mempool 439 * 440 * @return 441 * - On success returns a valid rte_comp_op structure 442 * - On failure returns NULL 443 */ 444 __rte_experimental 445 struct rte_comp_op * 446 rte_comp_op_alloc(struct rte_mempool *mempool); 447 448 /** 449 * Bulk allocate operations from a mempool with default parameters set 450 * 451 * @param mempool 452 * Compress operation mempool 453 * @param ops 454 * Array to place allocated operations 455 * @param nb_ops 456 * Number of operations to allocate 457 * @return 458 * - nb_ops: Success, the nb_ops requested was allocated 459 * - 0: Not enough entries in the mempool; no ops are retrieved. 460 */ 461 __rte_experimental 462 int 463 rte_comp_op_bulk_alloc(struct rte_mempool *mempool, 464 struct rte_comp_op **ops, uint16_t nb_ops); 465 466 /** 467 * Free operation structure 468 * If operation has been allocate from a rte_mempool, then the operation will 469 * be returned to the mempool. 470 * 471 * @param op 472 * Compress operation pointer allocated from rte_comp_op_alloc() 473 * If op is NULL, no operation is performed. 474 */ 475 __rte_experimental 476 void 477 rte_comp_op_free(struct rte_comp_op *op); 478 479 /** 480 * Bulk free operation structures 481 * If operations have been allocated from an rte_mempool, then the operations 482 * will be returned to the mempool. 483 * The array entry will be cleared. 484 * 485 * @param ops 486 * Array of Compress operations 487 * @param nb_ops 488 * Number of operations to free 489 */ 490 __rte_experimental 491 void 492 rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops); 493 494 /** 495 * Get the name of a compress service feature flag 496 * 497 * @param flag 498 * The mask describing the flag 499 * 500 * @return 501 * The name of this flag, or NULL if it's not a valid feature flag. 502 */ 503 __rte_experimental 504 const char * 505 rte_comp_get_feature_name(uint64_t flag); 506 507 #ifdef __cplusplus 508 } 509 #endif 510 511 #endif /* _RTE_COMP_H_ */ 512