1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #ifndef _RTE_COMP_H_ 6 #define _RTE_COMP_H_ 7 8 /** 9 * @file rte_comp.h 10 * 11 * RTE definitions for Data Compression Service 12 * 13 */ 14 15 #ifdef __cplusplus 16 extern "C" { 17 #endif 18 19 #include <rte_compat.h> 20 #include <rte_mbuf.h> 21 22 /** 23 * compression service feature flags 24 * 25 * @note New features flags should be added to the end of the list 26 * 27 * Keep these flags synchronised with rte_comp_get_feature_name() 28 */ 29 #define RTE_COMP_FF_STATEFUL_COMPRESSION (1ULL << 0) 30 /**< Stateful compression is supported */ 31 #define RTE_COMP_FF_STATEFUL_DECOMPRESSION (1ULL << 1) 32 /**< Stateful decompression is supported */ 33 #define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT (1ULL << 2) 34 /**< Out-of-place Scatter-gather (SGL) buffers, 35 * with multiple segments, are supported in input and output 36 */ 37 #define RTE_COMP_FF_OOP_SGL_IN_LB_OUT (1ULL << 3) 38 /**< Out-of-place Scatter-gather (SGL) buffers are supported 39 * in input, combined with linear buffers (LB), with a 40 * single segment, in output 41 */ 42 #define RTE_COMP_FF_OOP_LB_IN_SGL_OUT (1ULL << 4) 43 /**< Out-of-place Scatter-gather (SGL) buffers are supported 44 * in output, combined with linear buffers (LB) in input 45 */ 46 #define RTE_COMP_FF_ADLER32_CHECKSUM (1ULL << 5) 47 /**< Adler-32 Checksum is supported */ 48 #define RTE_COMP_FF_CRC32_CHECKSUM (1ULL << 6) 49 /**< CRC32 Checksum is supported */ 50 #define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM (1ULL << 7) 51 /**< Adler-32/CRC32 Checksum is supported */ 52 #define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 8) 53 /**< Generation of checksum across multiple stateless packets is supported */ 54 #define RTE_COMP_FF_SHA1_HASH (1ULL << 9) 55 /**< SHA1 Hash is supported */ 56 #define RTE_COMP_FF_SHA2_SHA256_HASH (1ULL << 10) 57 /**< SHA256 Hash of SHA2 family is supported */ 58 #define RTE_COMP_FF_NONCOMPRESSED_BLOCKS (1ULL << 11) 59 /**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */ 60 #define RTE_COMP_FF_SHAREABLE_PRIV_XFORM (1ULL << 12) 61 /**< Private xforms created by the PMD can be shared 62 * across multiple stateless operations. If not set, then app needs 63 * to create as many priv_xforms as it expects to have stateless 64 * operations in-flight. 65 */ 66 #define RTE_COMP_FF_HUFFMAN_FIXED (1ULL << 13) 67 /**< Fixed huffman encoding is supported */ 68 #define RTE_COMP_FF_HUFFMAN_DYNAMIC (1ULL << 14) 69 /**< Dynamic huffman encoding is supported */ 70 71 /** Status of comp operation */ 72 enum rte_comp_op_status { 73 RTE_COMP_OP_STATUS_SUCCESS = 0, 74 /**< Operation completed successfully */ 75 RTE_COMP_OP_STATUS_NOT_PROCESSED, 76 /**< Operation has not yet been processed by the device */ 77 RTE_COMP_OP_STATUS_INVALID_ARGS, 78 /**< Operation failed due to invalid arguments in request */ 79 RTE_COMP_OP_STATUS_ERROR, 80 /**< Error handling operation */ 81 RTE_COMP_OP_STATUS_INVALID_STATE, 82 /**< Operation is invoked in invalid state */ 83 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED, 84 /**< Output buffer ran out of space before operation completed. 85 * Error case. Application must resubmit all data with a larger 86 * output buffer. 87 */ 88 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE, 89 /**< Output buffer ran out of space before operation completed, but this 90 * is not an error case. Output data up to op.produced can be used and 91 * next op in the stream should continue on from op.consumed+1. 92 */ 93 }; 94 95 /** Compression Algorithms */ 96 enum rte_comp_algorithm { 97 RTE_COMP_ALGO_UNSPECIFIED = 0, 98 /** No Compression algorithm */ 99 RTE_COMP_ALGO_NULL, 100 /**< No compression. 101 * Pass-through, data is copied unchanged from source buffer to 102 * destination buffer. 103 */ 104 RTE_COMP_ALGO_DEFLATE, 105 /**< DEFLATE compression algorithm 106 * https://tools.ietf.org/html/rfc1951 107 */ 108 RTE_COMP_ALGO_LZS, 109 /**< LZS compression algorithm 110 * https://tools.ietf.org/html/rfc2395 111 */ 112 RTE_COMP_ALGO_LIST_END 113 }; 114 115 /** Compression Hash Algorithms */ 116 enum rte_comp_hash_algorithm { 117 RTE_COMP_HASH_ALGO_NONE = 0, 118 /**< No hash */ 119 RTE_COMP_HASH_ALGO_SHA1, 120 /**< SHA1 hash algorithm */ 121 RTE_COMP_HASH_ALGO_SHA2_256, 122 /**< SHA256 hash algorithm of SHA2 family */ 123 RTE_COMP_HASH_ALGO_LIST_END 124 }; 125 126 /**< Compression Level. 127 * The number is interpreted by each PMD differently. However, lower numbers 128 * give fastest compression, at the expense of compression ratio while 129 * higher numbers may give better compression ratios but are likely slower. 130 */ 131 #define RTE_COMP_LEVEL_PMD_DEFAULT (-1) 132 /** Use PMD Default */ 133 #define RTE_COMP_LEVEL_NONE (0) 134 /** Output uncompressed blocks if supported by the specified algorithm */ 135 #define RTE_COMP_LEVEL_MIN (1) 136 /** Use minimum compression level supported by the PMD */ 137 #define RTE_COMP_LEVEL_MAX (9) 138 /** Use maximum compression level supported by the PMD */ 139 140 /** Compression checksum types */ 141 enum rte_comp_checksum_type { 142 RTE_COMP_CHECKSUM_NONE, 143 /**< No checksum generated */ 144 RTE_COMP_CHECKSUM_CRC32, 145 /**< Generates a CRC32 checksum, as used by gzip */ 146 RTE_COMP_CHECKSUM_ADLER32, 147 /**< Generates an Adler-32 checksum, as used by zlib */ 148 RTE_COMP_CHECKSUM_CRC32_ADLER32, 149 /**< Generates both Adler-32 and CRC32 checksums, concatenated. 150 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits. 151 */ 152 }; 153 154 155 /** Compression Huffman Type - used by DEFLATE algorithm */ 156 enum rte_comp_huffman { 157 RTE_COMP_HUFFMAN_DEFAULT, 158 /**< PMD may choose which Huffman codes to use */ 159 RTE_COMP_HUFFMAN_FIXED, 160 /**< Use Fixed Huffman codes */ 161 RTE_COMP_HUFFMAN_DYNAMIC, 162 /**< Use Dynamic Huffman codes */ 163 }; 164 165 /** Compression flush flags */ 166 enum rte_comp_flush_flag { 167 RTE_COMP_FLUSH_NONE, 168 /**< Data is not flushed. Output may remain in the compressor and be 169 * processed during a following op. It may not be possible to decompress 170 * output until a later op with some other flush flag has been sent. 171 */ 172 RTE_COMP_FLUSH_SYNC, 173 /**< All data should be flushed to output buffer. Output data can be 174 * decompressed. However state and history is not cleared, so future 175 * operations may use history from this operation. 176 */ 177 RTE_COMP_FLUSH_FULL, 178 /**< All data should be flushed to output buffer. Output data can be 179 * decompressed. State and history data is cleared, so future 180 * ops will be independent of ops processed before this. 181 */ 182 RTE_COMP_FLUSH_FINAL 183 /**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE 184 * then bfinal bit is set in the last block. 185 */ 186 }; 187 188 /** Compression transform types */ 189 enum rte_comp_xform_type { 190 RTE_COMP_COMPRESS, 191 /**< Compression service - compress */ 192 RTE_COMP_DECOMPRESS, 193 /**< Compression service - decompress */ 194 }; 195 196 /** Compression operation type */ 197 enum rte_comp_op_type { 198 RTE_COMP_OP_STATELESS, 199 /**< All data to be processed is submitted in the op, no state or 200 * history from previous ops is used and none will be stored for future 201 * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL. 202 */ 203 RTE_COMP_OP_STATEFUL 204 /**< There may be more data to be processed after this op, it's part of 205 * a stream of data. State and history from previous ops can be used 206 * and resulting state and history can be stored for future ops, 207 * depending on flush flag. 208 */ 209 }; 210 211 212 /** Parameters specific to the deflate algorithm */ 213 struct rte_comp_deflate_params { 214 enum rte_comp_huffman huffman; 215 /**< Compression huffman encoding type */ 216 }; 217 218 /** Setup Data for compression */ 219 struct rte_comp_compress_xform { 220 enum rte_comp_algorithm algo; 221 /**< Algorithm to use for compress operation */ 222 union { 223 struct rte_comp_deflate_params deflate; 224 /**< Parameters specific to the deflate algorithm */ 225 }; /**< Algorithm specific parameters */ 226 int level; 227 /**< Compression level */ 228 uint8_t window_size; 229 /**< Base two log value of sliding window to be used. If window size 230 * can't be supported by the PMD then it may fall back to a smaller 231 * size. This is likely to result in a worse compression ratio. 232 */ 233 enum rte_comp_checksum_type chksum; 234 /**< Type of checksum to generate on the uncompressed data */ 235 enum rte_comp_hash_algorithm hash_algo; 236 /**< Hash algorithm to be used with compress operation. Hash is always 237 * done on plaintext. 238 */ 239 }; 240 241 /** 242 * Setup Data for decompression. 243 */ 244 struct rte_comp_decompress_xform { 245 enum rte_comp_algorithm algo; 246 /**< Algorithm to use for decompression */ 247 enum rte_comp_checksum_type chksum; 248 /**< Type of checksum to generate on the decompressed data */ 249 uint8_t window_size; 250 /**< Base two log value of sliding window which was used to generate 251 * compressed data. If window size can't be supported by the PMD then 252 * setup of stream or private_xform should fail. 253 */ 254 enum rte_comp_hash_algorithm hash_algo; 255 /**< Hash algorithm to be used with decompress operation. Hash is always 256 * done on plaintext. 257 */ 258 }; 259 260 /** 261 * Compression transform structure. 262 * 263 * This is used to specify the compression transforms required. 264 * Each transform structure can hold a single transform, the type field is 265 * used to specify which transform is contained within the union. 266 */ 267 struct rte_comp_xform { 268 enum rte_comp_xform_type type; 269 /**< xform type */ 270 union { 271 struct rte_comp_compress_xform compress; 272 /**< xform for compress operation */ 273 struct rte_comp_decompress_xform decompress; 274 /**< decompress xform */ 275 }; 276 }; 277 278 /** 279 * Compression Operation. 280 * 281 * This structure contains data relating to performing a compression 282 * operation on the referenced mbuf data buffers. 283 * 284 * Comp operations are enqueued and dequeued in comp PMDs using the 285 * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs 286 */ 287 struct rte_comp_op { 288 enum rte_comp_op_type op_type; 289 union { 290 void *private_xform; 291 /**< Stateless private PMD data derived from an rte_comp_xform. 292 * A handle returned by rte_compressdev_private_xform_create() 293 * must be attached to operations of op_type RTE_COMP_STATELESS. 294 */ 295 void *stream; 296 /**< Private PMD data derived initially from an rte_comp_xform, 297 * which holds state and history data and evolves as operations 298 * are processed. rte_compressdev_stream_create() must be called 299 * on a device for all STATEFUL data streams and the resulting 300 * stream attached to the one or more operations associated 301 * with the data stream. 302 * All operations in a stream must be sent to the same device. 303 */ 304 }; 305 306 struct rte_mempool *mempool; 307 /**< Pool from which operation is allocated */ 308 rte_iova_t iova_addr; 309 /**< IOVA address of this operation */ 310 struct rte_mbuf *m_src; 311 /**< source mbuf 312 * The total size of the input buffer(s) can be retrieved using 313 * rte_pktmbuf_pkt_len(m_src). The max data size which can fit in a 314 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1. 315 * If the input data is bigger than this it can be passed to the PMD in 316 * a chain of mbufs if the PMD's capabilities indicate it supports this. 317 */ 318 struct rte_mbuf *m_dst; 319 /**< destination mbuf 320 * The total size of the output buffer(s) can be retrieved using 321 * rte_pktmbuf_pkt_len(m_dst). The max data size which can fit in a 322 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1. 323 * If the output data is expected to be bigger than this a chain of 324 * mbufs can be passed to the PMD if the PMD's capabilities indicate 325 * it supports this. 326 * 327 * @note, if incompressible data is passed to an engine for compression 328 * using RTE_COMP_ALGO_DEFLATE, it's possible for the output data 329 * to be larger than the uncompressed data, due to the inclusion 330 * of the DEFLATE header blocks. The size of m_dst should accommodate 331 * this, else OUT_OF_SPACE errors can be expected in this case. 332 */ 333 334 struct { 335 uint32_t offset; 336 /**< Starting point for compression or decompression, 337 * specified as number of bytes from start of packet in 338 * source buffer. 339 * This offset starts from the first segment 340 * of the buffer, in case the m_src is a chain of mbufs. 341 * Starting point for checksum generation in compress direction. 342 */ 343 uint32_t length; 344 /**< The length, in bytes, of the data in source buffer 345 * to be compressed or decompressed. 346 * Also the length of the data over which the checksum 347 * should be generated in compress direction 348 */ 349 } src; 350 struct { 351 uint32_t offset; 352 /**< Starting point for writing output data, specified as 353 * number of bytes from start of packet in dest 354 * buffer. 355 * This offset starts from the first segment 356 * of the buffer, in case the m_dst is a chain of mbufs. 357 * Starting point for checksum generation in 358 * decompress direction. 359 */ 360 } dst; 361 struct { 362 uint8_t *digest; 363 /**< Output buffer to store hash output, if enabled in xform. 364 * Buffer would contain valid value only after an op with 365 * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed 366 * successfully. 367 * 368 * Length of buffer should be contiguous and large enough to 369 * accommodate digest produced by specific hash algo. 370 */ 371 rte_iova_t iova_addr; 372 /**< IO address of the buffer */ 373 } hash; 374 enum rte_comp_flush_flag flush_flag; 375 /**< Defines flush characteristics for the output data. 376 * Only applicable in compress direction 377 */ 378 uint64_t input_chksum; 379 /**< An input checksum can be provided to generate a 380 * cumulative checksum across sequential blocks in a STATELESS stream. 381 * Checksum type is as specified in xform chksum_type 382 */ 383 uint64_t output_chksum; 384 /**< If a checksum is generated it will be written in here. 385 * Checksum type is as specified in xform chksum_type. 386 */ 387 uint32_t consumed; 388 /**< The number of bytes from the source buffer 389 * which were compressed/decompressed. 390 */ 391 uint32_t produced; 392 /**< The number of bytes written to the destination buffer 393 * which were compressed/decompressed. 394 */ 395 uint64_t debug_status; 396 /**< 397 * Status of the operation is returned in the status param. 398 * This field allows the PMD to pass back extra 399 * pmd-specific debug information. Value is not defined on the API. 400 */ 401 uint8_t status; 402 /**< 403 * Operation status - use values from enum rte_comp_status. 404 * This is reset to 405 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and 406 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation 407 * is successfully processed by a PMD 408 */ 409 } __rte_cache_aligned; 410 411 /** 412 * Creates an operation pool 413 * 414 * @param name 415 * Compress pool name 416 * @param nb_elts 417 * Number of elements in pool 418 * @param cache_size 419 * Number of elements to cache on lcore, see 420 * *rte_mempool_create* for further details about cache size 421 * @param user_size 422 * Size of private data to allocate for user with each operation 423 * @param socket_id 424 * Socket to identifier allocate memory on 425 * @return 426 * - On success pointer to mempool 427 * - On failure NULL 428 */ 429 __rte_experimental 430 struct rte_mempool * 431 rte_comp_op_pool_create(const char *name, 432 unsigned int nb_elts, unsigned int cache_size, 433 uint16_t user_size, int socket_id); 434 435 /** 436 * Allocate an operation from a mempool with default parameters set 437 * 438 * @param mempool 439 * Compress operation mempool 440 * 441 * @return 442 * - On success returns a valid rte_comp_op structure 443 * - On failure returns NULL 444 */ 445 __rte_experimental 446 struct rte_comp_op * 447 rte_comp_op_alloc(struct rte_mempool *mempool); 448 449 /** 450 * Bulk allocate operations from a mempool with default parameters set 451 * 452 * @param mempool 453 * Compress operation mempool 454 * @param ops 455 * Array to place allocated operations 456 * @param nb_ops 457 * Number of operations to allocate 458 * @return 459 * - nb_ops: Success, the nb_ops requested was allocated 460 * - 0: Not enough entries in the mempool; no ops are retrieved. 461 */ 462 __rte_experimental 463 int 464 rte_comp_op_bulk_alloc(struct rte_mempool *mempool, 465 struct rte_comp_op **ops, uint16_t nb_ops); 466 467 /** 468 * Free operation structure 469 * If operation has been allocate from a rte_mempool, then the operation will 470 * be returned to the mempool. 471 * 472 * @param op 473 * Compress operation pointer allocated from rte_comp_op_alloc() 474 * If op is NULL, no operation is performed. 475 */ 476 __rte_experimental 477 void 478 rte_comp_op_free(struct rte_comp_op *op); 479 480 /** 481 * Bulk free operation structures 482 * If operations have been allocated from an rte_mempool, then the operations 483 * will be returned to the mempool. 484 * The array entry will be cleared. 485 * 486 * @param ops 487 * Array of Compress operations 488 * @param nb_ops 489 * Number of operations to free 490 */ 491 __rte_experimental 492 void 493 rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops); 494 495 /** 496 * Get the name of a compress service feature flag 497 * 498 * @param flag 499 * The mask describing the flag 500 * 501 * @return 502 * The name of this flag, or NULL if it's not a valid feature flag. 503 */ 504 __rte_experimental 505 const char * 506 rte_comp_get_feature_name(uint64_t flag); 507 508 #ifdef __cplusplus 509 } 510 #endif 511 512 #endif /* _RTE_COMP_H_ */ 513