1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #ifndef _RTE_COMPRESSDEV_H_ 6 #define _RTE_COMPRESSDEV_H_ 7 8 /** 9 * @file rte_compressdev.h 10 * 11 * RTE Compression Device APIs. 12 * 13 * @warning 14 * @b EXPERIMENTAL: 15 * All functions in this file may be changed or removed without prior notice. 16 * 17 * Defines comp device APIs for the provisioning of compression operations. 18 */ 19 20 #ifdef __cplusplus 21 extern "C" { 22 #endif 23 24 25 #include <rte_compat.h> 26 #include "rte_comp.h" 27 28 /** 29 * Parameter log base 2 range description. 30 * Final value will be 2^value. 31 */ 32 struct rte_param_log2_range { 33 uint8_t min; /**< Minimum log2 value */ 34 uint8_t max; /**< Maximum log2 value */ 35 uint8_t increment; 36 /**< If a range of sizes are supported, 37 * this parameter is used to indicate 38 * increments in base 2 log byte value 39 * that are supported between the minimum and maximum 40 */ 41 }; 42 43 /** Structure used to capture a capability of a comp device */ 44 struct rte_compressdev_capabilities { 45 enum rte_comp_algorithm algo; 46 /* Compression algorithm */ 47 uint64_t comp_feature_flags; 48 /**< Bitmask of flags for compression service features */ 49 struct rte_param_log2_range window_size; 50 /**< Window size range in base two log byte values */ 51 }; 52 53 /** Macro used at end of comp PMD list */ 54 #define RTE_COMP_END_OF_CAPABILITIES_LIST() \ 55 { RTE_COMP_ALGO_UNSPECIFIED } 56 57 __rte_experimental 58 const struct rte_compressdev_capabilities * 59 rte_compressdev_capability_get(uint8_t dev_id, 60 enum rte_comp_algorithm algo); 61 62 /** 63 * compression device supported feature flags 64 * 65 * @note New features flags should be added to the end of the list 66 * 67 * Keep these flags synchronised with rte_compressdev_get_feature_name() 68 */ 69 #define RTE_COMPDEV_FF_HW_ACCELERATED (1ULL << 0) 70 /**< Operations are off-loaded to an external hardware accelerator */ 71 #define RTE_COMPDEV_FF_CPU_SSE (1ULL << 1) 72 /**< Utilises CPU SIMD SSE instructions */ 73 #define RTE_COMPDEV_FF_CPU_AVX (1ULL << 2) 74 /**< Utilises CPU SIMD AVX instructions */ 75 #define RTE_COMPDEV_FF_CPU_AVX2 (1ULL << 3) 76 /**< Utilises CPU SIMD AVX2 instructions */ 77 #define RTE_COMPDEV_FF_CPU_AVX512 (1ULL << 4) 78 /**< Utilises CPU SIMD AVX512 instructions */ 79 #define RTE_COMPDEV_FF_CPU_NEON (1ULL << 5) 80 /**< Utilises CPU NEON instructions */ 81 #define RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE (1ULL << 6) 82 /**< A PMD should set this if the bulk of the 83 * processing is done during the dequeue. It should leave it 84 * cleared if the processing is done during the enqueue (default). 85 * Applications can use this as a hint for tuning. 86 */ 87 88 /** 89 * Get the name of a compress device feature flag. 90 * 91 * @param flag 92 * The mask describing the flag 93 * 94 * @return 95 * The name of this flag, or NULL if it's not a valid feature flag. 96 */ 97 __rte_experimental 98 const char * 99 rte_compressdev_get_feature_name(uint64_t flag); 100 101 /** comp device information */ 102 struct rte_compressdev_info { 103 const char *driver_name; /**< Driver name. */ 104 uint64_t feature_flags; /**< Feature flags */ 105 const struct rte_compressdev_capabilities *capabilities; 106 /**< Array of devices supported capabilities */ 107 uint16_t max_nb_queue_pairs; 108 /**< Maximum number of queues pairs supported by device. 109 * (If 0, there is no limit in maximum number of queue pairs) 110 */ 111 }; 112 113 /** comp device statistics */ 114 struct rte_compressdev_stats { 115 uint64_t enqueued_count; 116 /**< Count of all operations enqueued */ 117 uint64_t dequeued_count; 118 /**< Count of all operations dequeued */ 119 120 uint64_t enqueue_err_count; 121 /**< Total error count on operations enqueued */ 122 uint64_t dequeue_err_count; 123 /**< Total error count on operations dequeued */ 124 }; 125 126 127 /** 128 * Get the device identifier for the named compress device. 129 * 130 * @param name 131 * Device name to select the device structure 132 * @return 133 * - Returns compress device identifier on success. 134 * - Return -1 on failure to find named compress device. 135 */ 136 __rte_experimental 137 int 138 rte_compressdev_get_dev_id(const char *name); 139 140 /** 141 * Get the compress device name given a device identifier. 142 * 143 * @param dev_id 144 * Compress device identifier 145 * @return 146 * - Returns compress device name. 147 * - Returns NULL if compress device is not present. 148 */ 149 __rte_experimental 150 const char * 151 rte_compressdev_name_get(uint8_t dev_id); 152 153 /** 154 * Get the total number of compress devices that have been successfully 155 * initialised. 156 * 157 * @return 158 * - The total number of usable compress devices. 159 */ 160 __rte_experimental 161 uint8_t 162 rte_compressdev_count(void); 163 164 /** 165 * Get number and identifiers of attached comp devices that 166 * use the same compress driver. 167 * 168 * @param driver_name 169 * Driver name 170 * @param devices 171 * Output devices identifiers 172 * @param nb_devices 173 * Maximal number of devices 174 * 175 * @return 176 * Returns number of attached compress devices. 177 */ 178 __rte_experimental 179 uint8_t 180 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices, 181 uint8_t nb_devices); 182 183 /* 184 * Return the NUMA socket to which a device is connected. 185 * 186 * @param dev_id 187 * Compress device identifier 188 * @return 189 * The NUMA socket id to which the device is connected or 190 * a default of zero if the socket could not be determined. 191 * -1 if returned is the dev_id value is out of range. 192 */ 193 __rte_experimental 194 int 195 rte_compressdev_socket_id(uint8_t dev_id); 196 197 /** Compress device configuration structure */ 198 struct rte_compressdev_config { 199 int socket_id; 200 /**< Socket on which to allocate resources */ 201 uint16_t nb_queue_pairs; 202 /**< Total number of queue pairs to configure on a device */ 203 uint16_t max_nb_priv_xforms; 204 /**< Max number of private_xforms which will be created on the device */ 205 uint16_t max_nb_streams; 206 /**< Max number of streams which will be created on the device */ 207 }; 208 209 /** 210 * Configure a device. 211 * 212 * This function must be invoked first before any other function in the 213 * API. This function can also be re-invoked when a device is in the 214 * stopped state. 215 * 216 * @param dev_id 217 * Compress device identifier 218 * @param config 219 * The compress device configuration 220 * @return 221 * - 0: Success, device configured. 222 * - <0: Error code returned by the driver configuration function. 223 */ 224 __rte_experimental 225 int 226 rte_compressdev_configure(uint8_t dev_id, 227 struct rte_compressdev_config *config); 228 229 /** 230 * Start a device. 231 * 232 * The device start step is called after configuring the device and setting up 233 * its queue pairs. 234 * On success, data-path functions exported by the API (enqueue/dequeue, etc) 235 * can be invoked. 236 * 237 * @param dev_id 238 * Compress device identifier 239 * @return 240 * - 0: Success, device started. 241 * - <0: Error code of the driver device start function. 242 */ 243 __rte_experimental 244 int 245 rte_compressdev_start(uint8_t dev_id); 246 247 /** 248 * Stop a device. The device can be restarted with a call to 249 * rte_compressdev_start() 250 * 251 * @param dev_id 252 * Compress device identifier 253 */ 254 __rte_experimental 255 void 256 rte_compressdev_stop(uint8_t dev_id); 257 258 /** 259 * Close an device. 260 * The memory allocated in the device gets freed. 261 * After calling this function, in order to use 262 * the device again, it is required to 263 * configure the device again. 264 * 265 * @param dev_id 266 * Compress device identifier 267 * 268 * @return 269 * - 0 on successfully closing device 270 * - <0 on failure to close device 271 */ 272 __rte_experimental 273 int 274 rte_compressdev_close(uint8_t dev_id); 275 276 /** 277 * Allocate and set up a receive queue pair for a device. 278 * This should only be called when the device is stopped. 279 * 280 * 281 * @param dev_id 282 * Compress device identifier 283 * @param queue_pair_id 284 * The index of the queue pairs to set up. The 285 * value must be in the range [0, nb_queue_pair - 1] 286 * previously supplied to rte_compressdev_configure() 287 * @param max_inflight_ops 288 * Max number of ops which the qp will have to 289 * accommodate simultaneously 290 * @param socket_id 291 * The *socket_id* argument is the socket identifier 292 * in case of NUMA. The value can be *SOCKET_ID_ANY* 293 * if there is no NUMA constraint for the DMA memory 294 * allocated for the receive queue pair 295 * @return 296 * - 0: Success, queue pair correctly set up. 297 * - <0: Queue pair configuration failed 298 */ 299 __rte_experimental 300 int 301 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 302 uint32_t max_inflight_ops, int socket_id); 303 304 /** 305 * Get the number of queue pairs on a specific comp device 306 * 307 * @param dev_id 308 * Compress device identifier 309 * @return 310 * - The number of configured queue pairs. 311 */ 312 __rte_experimental 313 uint16_t 314 rte_compressdev_queue_pair_count(uint8_t dev_id); 315 316 317 /** 318 * Retrieve the general I/O statistics of a device. 319 * 320 * @param dev_id 321 * The identifier of the device 322 * @param stats 323 * A pointer to a structure of type 324 * *rte_compressdev_stats* to be filled with the 325 * values of device counters 326 * @return 327 * - Zero if successful. 328 * - Non-zero otherwise. 329 */ 330 __rte_experimental 331 int 332 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats); 333 334 /** 335 * Reset the general I/O statistics of a device. 336 * 337 * @param dev_id 338 * The identifier of the device. 339 */ 340 __rte_experimental 341 void 342 rte_compressdev_stats_reset(uint8_t dev_id); 343 344 /** 345 * Retrieve the contextual information of a device. 346 * 347 * @param dev_id 348 * Compress device identifier 349 * @param dev_info 350 * A pointer to a structure of type *rte_compressdev_info* 351 * to be filled with the contextual information of the device 352 * 353 * @note The capabilities field of dev_info is set to point to the first 354 * element of an array of struct rte_compressdev_capabilities. 355 * The element after the last valid element has it's op field set to 356 * RTE_COMP_ALGO_UNSPECIFIED. 357 */ 358 __rte_experimental 359 void 360 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info); 361 362 /** 363 * 364 * Dequeue a burst of processed compression operations from a queue on the comp 365 * device. The dequeued operation are stored in *rte_comp_op* structures 366 * whose pointers are supplied in the *ops* array. 367 * 368 * The rte_compressdev_dequeue_burst() function returns the number of ops 369 * actually dequeued, which is the number of *rte_comp_op* data structures 370 * effectively supplied into the *ops* array. 371 * 372 * A return value equal to *nb_ops* indicates that the queue contained 373 * at least *nb_ops* operations, and this is likely to signify that other 374 * processed operations remain in the devices output queue. Applications 375 * implementing a "retrieve as many processed operations as possible" policy 376 * can check this specific case and keep invoking the 377 * rte_compressdev_dequeue_burst() function until a value less than 378 * *nb_ops* is returned. 379 * 380 * The rte_compressdev_dequeue_burst() function does not provide any error 381 * notification to avoid the corresponding overhead. 382 * 383 * @note: operation ordering is not maintained within the queue pair. 384 * 385 * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the 386 * op must be resubmitted with the same input data and a larger output buffer. 387 * op.produced is usually 0, but in decompression cases a PMD may return > 0 388 * and the application may find it useful to inspect that data. 389 * This status is only returned on STATELESS ops. 390 * 391 * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used 392 * and next op in stream should continue on from op.consumed+1 with a fresh 393 * output buffer. 394 * Consumed=0, produced=0 is an unusual but allowed case. There may be useful 395 * state/history stored in the PMD, even though no output was produced yet. 396 * 397 * 398 * @param dev_id 399 * Compress device identifier 400 * @param qp_id 401 * The index of the queue pair from which to retrieve 402 * processed operations. The value must be in the range 403 * [0, nb_queue_pair - 1] previously supplied to 404 * rte_compressdev_configure() 405 * @param ops 406 * The address of an array of pointers to 407 * *rte_comp_op* structures that must be 408 * large enough to store *nb_ops* pointers in it 409 * @param nb_ops 410 * The maximum number of operations to dequeue 411 * @return 412 * - The number of operations actually dequeued, which is the number 413 * of pointers to *rte_comp_op* structures effectively supplied to the 414 * *ops* array. 415 */ 416 __rte_experimental 417 uint16_t 418 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 419 struct rte_comp_op **ops, uint16_t nb_ops); 420 421 /** 422 * Enqueue a burst of operations for processing on a compression device. 423 * 424 * The rte_compressdev_enqueue_burst() function is invoked to place 425 * comp operations on the queue *qp_id* of the device designated by 426 * its *dev_id*. 427 * 428 * The *nb_ops* parameter is the number of operations to process which are 429 * supplied in the *ops* array of *rte_comp_op* structures. 430 * 431 * The rte_compressdev_enqueue_burst() function returns the number of 432 * operations it actually enqueued for processing. A return value equal to 433 * *nb_ops* means that all packets have been enqueued. 434 * 435 * @note All compression operations are Out-of-place (OOP) operations, 436 * as the size of the output data is different to the size of the input data. 437 * 438 * @note The rte_comp_op contains both input and output parameters and is the 439 * vehicle for the application to pass data into and out of the PMD. While an 440 * op is inflight, i.e. once it has been enqueued, the private_xform or stream 441 * attached to it and any mbufs or memory referenced by it should not be altered 442 * or freed by the application. The PMD may use or change some of this data at 443 * any time until it has been returned in a dequeue operation. 444 * 445 * @note The flush flag only applies to operations which return SUCCESS. 446 * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer 447 * is as if flush flag was FLUSH_NONE. 448 * @note flush flag only applies in compression direction. It has no meaning 449 * for decompression. 450 * @note: operation ordering is not maintained within the queue pair. 451 * 452 * @param dev_id 453 * Compress device identifier 454 * @param qp_id 455 * The index of the queue pair on which operations 456 * are to be enqueued for processing. The value 457 * must be in the range [0, nb_queue_pairs - 1] 458 * previously supplied to *rte_compressdev_configure* 459 * @param ops 460 * The address of an array of *nb_ops* pointers 461 * to *rte_comp_op* structures which contain 462 * the operations to be processed 463 * @param nb_ops 464 * The number of operations to process 465 * @return 466 * The number of operations actually enqueued on the device. The return 467 * value can be less than the value of the *nb_ops* parameter when the 468 * comp devices queue is full or if invalid parameters are specified in 469 * a *rte_comp_op*. 470 */ 471 __rte_experimental 472 uint16_t 473 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 474 struct rte_comp_op **ops, uint16_t nb_ops); 475 476 /** 477 * This should alloc a stream from the device's mempool and initialise it. 478 * The application should call this API when setting up for the stateful 479 * processing of a set of data on a device. The API can be called multiple 480 * times to set up a stream for each data set. The handle returned is only for 481 * use with ops of op_type STATEFUL and must be passed to the PMD 482 * with every op in the data stream 483 * 484 * @param dev_id 485 * Compress device identifier 486 * @param xform 487 * xform data 488 * @param stream 489 * Pointer to where PMD's private stream handle should be stored 490 * 491 * @return 492 * - 0 if successful and valid stream handle 493 * - <0 in error cases 494 * - Returns -EINVAL if input parameters are invalid. 495 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 496 * - Returns -ENOTSUP if comp device does not support the comp transform. 497 * - Returns -ENOMEM if the private stream could not be allocated. 498 */ 499 __rte_experimental 500 int 501 rte_compressdev_stream_create(uint8_t dev_id, 502 const struct rte_comp_xform *xform, 503 void **stream); 504 505 /** 506 * This should clear the stream and return it to the device's mempool. 507 * 508 * @param dev_id 509 * Compress device identifier 510 * 511 * @param stream 512 * PMD's private stream data 513 * 514 * @return 515 * - 0 if successful 516 * - <0 in error cases 517 * - Returns -EINVAL if input parameters are invalid. 518 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 519 * - Returns -EBUSY if can't free stream as there are inflight operations 520 */ 521 __rte_experimental 522 int 523 rte_compressdev_stream_free(uint8_t dev_id, void *stream); 524 525 /** 526 * This should alloc a private_xform from the device's mempool and initialise 527 * it. The application should call this API when setting up for stateless 528 * processing on a device. If it returns non-shareable, then the appl cannot 529 * share this handle with multiple in-flight ops and should call this API again 530 * to get a separate handle for every in-flight op. 531 * The handle returned is only valid for use with ops of op_type STATELESS. 532 * 533 * @param dev_id 534 * Compress device identifier 535 * @param xform 536 * xform data 537 * @param private_xform 538 * Pointer to where PMD's private_xform handle should be stored 539 * 540 * @return 541 * - if successful returns 0 542 * and valid private_xform handle 543 * - <0 in error cases 544 * - Returns -EINVAL if input parameters are invalid. 545 * - Returns -ENOTSUP if comp device does not support the comp transform. 546 * - Returns -ENOMEM if the private_xform could not be allocated. 547 */ 548 __rte_experimental 549 int 550 rte_compressdev_private_xform_create(uint8_t dev_id, 551 const struct rte_comp_xform *xform, 552 void **private_xform); 553 554 /** 555 * This should clear the private_xform and return it to the device's mempool. 556 * It is the application's responsibility to ensure that private_xform data 557 * is not cleared while there are still in-flight operations using it. 558 * 559 * @param dev_id 560 * Compress device identifier 561 * 562 * @param private_xform 563 * PMD's private_xform data 564 * 565 * @return 566 * - 0 if successful 567 * - <0 in error cases 568 * - Returns -EINVAL if input parameters are invalid. 569 */ 570 __rte_experimental 571 int 572 rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform); 573 574 #ifdef __cplusplus 575 } 576 #endif 577 578 #endif /* _RTE_COMPRESSDEV_H_ */ 579