1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #ifndef _RTE_COMPRESSDEV_H_ 6 #define _RTE_COMPRESSDEV_H_ 7 8 /** 9 * @file rte_compressdev.h 10 * 11 * RTE Compression Device APIs. 12 * 13 * @warning 14 * @b EXPERIMENTAL: 15 * All functions in this file may be changed or removed without prior notice. 16 * 17 * Defines comp device APIs for the provisioning of compression operations. 18 */ 19 20 #ifdef __cplusplus 21 extern "C" { 22 #endif 23 24 25 #include "rte_comp.h" 26 27 /** 28 * Parameter log base 2 range description. 29 * Final value will be 2^value. 30 */ 31 struct rte_param_log2_range { 32 uint8_t min; /**< Minimum log2 value */ 33 uint8_t max; /**< Maximum log2 value */ 34 uint8_t increment; 35 /**< If a range of sizes are supported, 36 * this parameter is used to indicate 37 * increments in base 2 log byte value 38 * that are supported between the minimum and maximum 39 */ 40 }; 41 42 /** Structure used to capture a capability of a comp device */ 43 struct rte_compressdev_capabilities { 44 enum rte_comp_algorithm algo; 45 /* Compression algorithm */ 46 uint64_t comp_feature_flags; 47 /**< Bitmask of flags for compression service features */ 48 struct rte_param_log2_range window_size; 49 /**< Window size range in base two log byte values */ 50 }; 51 52 /** Macro used at end of comp PMD list */ 53 #define RTE_COMP_END_OF_CAPABILITIES_LIST() \ 54 { RTE_COMP_ALGO_UNSPECIFIED } 55 56 __rte_experimental 57 const struct rte_compressdev_capabilities * 58 rte_compressdev_capability_get(uint8_t dev_id, 59 enum rte_comp_algorithm algo); 60 61 /** 62 * compression device supported feature flags 63 * 64 * @note New features flags should be added to the end of the list 65 * 66 * Keep these flags synchronised with rte_compressdev_get_feature_name() 67 */ 68 #define RTE_COMPDEV_FF_HW_ACCELERATED (1ULL << 0) 69 /**< Operations are off-loaded to an external hardware accelerator */ 70 #define RTE_COMPDEV_FF_CPU_SSE (1ULL << 1) 71 /**< Utilises CPU SIMD SSE instructions */ 72 #define RTE_COMPDEV_FF_CPU_AVX (1ULL << 2) 73 /**< Utilises CPU SIMD AVX instructions */ 74 #define RTE_COMPDEV_FF_CPU_AVX2 (1ULL << 3) 75 /**< Utilises CPU SIMD AVX2 instructions */ 76 #define RTE_COMPDEV_FF_CPU_AVX512 (1ULL << 4) 77 /**< Utilises CPU SIMD AVX512 instructions */ 78 #define RTE_COMPDEV_FF_CPU_NEON (1ULL << 5) 79 /**< Utilises CPU NEON instructions */ 80 #define RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE (1ULL << 6) 81 /**< A PMD should set this if the bulk of the 82 * processing is done during the dequeue. It should leave it 83 * cleared if the processing is done during the enqueue (default). 84 * Applications can use this as a hint for tuning. 85 */ 86 87 /** 88 * Get the name of a compress device feature flag. 89 * 90 * @param flag 91 * The mask describing the flag 92 * 93 * @return 94 * The name of this flag, or NULL if it's not a valid feature flag. 95 */ 96 __rte_experimental 97 const char * 98 rte_compressdev_get_feature_name(uint64_t flag); 99 100 /** comp device information */ 101 struct rte_compressdev_info { 102 const char *driver_name; /**< Driver name. */ 103 uint64_t feature_flags; /**< Feature flags */ 104 const struct rte_compressdev_capabilities *capabilities; 105 /**< Array of devices supported capabilities */ 106 uint16_t max_nb_queue_pairs; 107 /**< Maximum number of queues pairs supported by device. 108 * (If 0, there is no limit in maximum number of queue pairs) 109 */ 110 }; 111 112 /** comp device statistics */ 113 struct rte_compressdev_stats { 114 uint64_t enqueued_count; 115 /**< Count of all operations enqueued */ 116 uint64_t dequeued_count; 117 /**< Count of all operations dequeued */ 118 119 uint64_t enqueue_err_count; 120 /**< Total error count on operations enqueued */ 121 uint64_t dequeue_err_count; 122 /**< Total error count on operations dequeued */ 123 }; 124 125 126 /** 127 * Get the device identifier for the named compress device. 128 * 129 * @param name 130 * Device name to select the device structure 131 * @return 132 * - Returns compress device identifier on success. 133 * - Return -1 on failure to find named compress device. 134 */ 135 __rte_experimental 136 int 137 rte_compressdev_get_dev_id(const char *name); 138 139 /** 140 * Get the compress device name given a device identifier. 141 * 142 * @param dev_id 143 * Compress device identifier 144 * @return 145 * - Returns compress device name. 146 * - Returns NULL if compress device is not present. 147 */ 148 __rte_experimental 149 const char * 150 rte_compressdev_name_get(uint8_t dev_id); 151 152 /** 153 * Get the total number of compress devices that have been successfully 154 * initialised. 155 * 156 * @return 157 * - The total number of usable compress devices. 158 */ 159 __rte_experimental 160 uint8_t 161 rte_compressdev_count(void); 162 163 /** 164 * Get number and identifiers of attached comp devices that 165 * use the same compress driver. 166 * 167 * @param driver_name 168 * Driver name 169 * @param devices 170 * Output devices identifiers 171 * @param nb_devices 172 * Maximal number of devices 173 * 174 * @return 175 * Returns number of attached compress devices. 176 */ 177 __rte_experimental 178 uint8_t 179 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices, 180 uint8_t nb_devices); 181 182 /* 183 * Return the NUMA socket to which a device is connected. 184 * 185 * @param dev_id 186 * Compress device identifier 187 * @return 188 * The NUMA socket id to which the device is connected or 189 * a default of zero if the socket could not be determined. 190 * -1 if returned is the dev_id value is out of range. 191 */ 192 __rte_experimental 193 int 194 rte_compressdev_socket_id(uint8_t dev_id); 195 196 /** Compress device configuration structure */ 197 struct rte_compressdev_config { 198 int socket_id; 199 /**< Socket on which to allocate resources */ 200 uint16_t nb_queue_pairs; 201 /**< Total number of queue pairs to configure on a device */ 202 uint16_t max_nb_priv_xforms; 203 /**< Max number of private_xforms which will be created on the device */ 204 uint16_t max_nb_streams; 205 /**< Max number of streams which will be created on the device */ 206 }; 207 208 /** 209 * Configure a device. 210 * 211 * This function must be invoked first before any other function in the 212 * API. This function can also be re-invoked when a device is in the 213 * stopped state. 214 * 215 * @param dev_id 216 * Compress device identifier 217 * @param config 218 * The compress device configuration 219 * @return 220 * - 0: Success, device configured. 221 * - <0: Error code returned by the driver configuration function. 222 */ 223 __rte_experimental 224 int 225 rte_compressdev_configure(uint8_t dev_id, 226 struct rte_compressdev_config *config); 227 228 /** 229 * Start a device. 230 * 231 * The device start step is called after configuring the device and setting up 232 * its queue pairs. 233 * On success, data-path functions exported by the API (enqueue/dequeue, etc) 234 * can be invoked. 235 * 236 * @param dev_id 237 * Compress device identifier 238 * @return 239 * - 0: Success, device started. 240 * - <0: Error code of the driver device start function. 241 */ 242 __rte_experimental 243 int 244 rte_compressdev_start(uint8_t dev_id); 245 246 /** 247 * Stop a device. The device can be restarted with a call to 248 * rte_compressdev_start() 249 * 250 * @param dev_id 251 * Compress device identifier 252 */ 253 __rte_experimental 254 void 255 rte_compressdev_stop(uint8_t dev_id); 256 257 /** 258 * Close an device. 259 * The memory allocated in the device gets freed. 260 * After calling this function, in order to use 261 * the device again, it is required to 262 * configure the device again. 263 * 264 * @param dev_id 265 * Compress device identifier 266 * 267 * @return 268 * - 0 on successfully closing device 269 * - <0 on failure to close device 270 */ 271 __rte_experimental 272 int 273 rte_compressdev_close(uint8_t dev_id); 274 275 /** 276 * Allocate and set up a receive queue pair for a device. 277 * This should only be called when the device is stopped. 278 * 279 * 280 * @param dev_id 281 * Compress device identifier 282 * @param queue_pair_id 283 * The index of the queue pairs to set up. The 284 * value must be in the range [0, nb_queue_pair - 1] 285 * previously supplied to rte_compressdev_configure() 286 * @param max_inflight_ops 287 * Max number of ops which the qp will have to 288 * accommodate simultaneously 289 * @param socket_id 290 * The *socket_id* argument is the socket identifier 291 * in case of NUMA. The value can be *SOCKET_ID_ANY* 292 * if there is no NUMA constraint for the DMA memory 293 * allocated for the receive queue pair 294 * @return 295 * - 0: Success, queue pair correctly set up. 296 * - <0: Queue pair configuration failed 297 */ 298 __rte_experimental 299 int 300 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, 301 uint32_t max_inflight_ops, int socket_id); 302 303 /** 304 * Get the number of queue pairs on a specific comp device 305 * 306 * @param dev_id 307 * Compress device identifier 308 * @return 309 * - The number of configured queue pairs. 310 */ 311 __rte_experimental 312 uint16_t 313 rte_compressdev_queue_pair_count(uint8_t dev_id); 314 315 316 /** 317 * Retrieve the general I/O statistics of a device. 318 * 319 * @param dev_id 320 * The identifier of the device 321 * @param stats 322 * A pointer to a structure of type 323 * *rte_compressdev_stats* to be filled with the 324 * values of device counters 325 * @return 326 * - Zero if successful. 327 * - Non-zero otherwise. 328 */ 329 __rte_experimental 330 int 331 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats); 332 333 /** 334 * Reset the general I/O statistics of a device. 335 * 336 * @param dev_id 337 * The identifier of the device. 338 */ 339 __rte_experimental 340 void 341 rte_compressdev_stats_reset(uint8_t dev_id); 342 343 /** 344 * Retrieve the contextual information of a device. 345 * 346 * @param dev_id 347 * Compress device identifier 348 * @param dev_info 349 * A pointer to a structure of type *rte_compressdev_info* 350 * to be filled with the contextual information of the device 351 * 352 * @note The capabilities field of dev_info is set to point to the first 353 * element of an array of struct rte_compressdev_capabilities. 354 * The element after the last valid element has it's op field set to 355 * RTE_COMP_ALGO_LIST_END. 356 */ 357 __rte_experimental 358 void 359 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info); 360 361 /** 362 * 363 * Dequeue a burst of processed compression operations from a queue on the comp 364 * device. The dequeued operation are stored in *rte_comp_op* structures 365 * whose pointers are supplied in the *ops* array. 366 * 367 * The rte_compressdev_dequeue_burst() function returns the number of ops 368 * actually dequeued, which is the number of *rte_comp_op* data structures 369 * effectively supplied into the *ops* array. 370 * 371 * A return value equal to *nb_ops* indicates that the queue contained 372 * at least *nb_ops* operations, and this is likely to signify that other 373 * processed operations remain in the devices output queue. Applications 374 * implementing a "retrieve as many processed operations as possible" policy 375 * can check this specific case and keep invoking the 376 * rte_compressdev_dequeue_burst() function until a value less than 377 * *nb_ops* is returned. 378 * 379 * The rte_compressdev_dequeue_burst() function does not provide any error 380 * notification to avoid the corresponding overhead. 381 * 382 * @note: operation ordering is not maintained within the queue pair. 383 * 384 * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the 385 * op must be resubmitted with the same input data and a larger output buffer. 386 * op.produced is usually 0, but in decompression cases a PMD may return > 0 387 * and the application may find it useful to inspect that data. 388 * This status is only returned on STATELESS ops. 389 * 390 * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used 391 * and next op in stream should continue on from op.consumed+1 with a fresh 392 * output buffer. 393 * Consumed=0, produced=0 is an unusual but allowed case. There may be useful 394 * state/history stored in the PMD, even though no output was produced yet. 395 * 396 * 397 * @param dev_id 398 * Compress device identifier 399 * @param qp_id 400 * The index of the queue pair from which to retrieve 401 * processed operations. The value must be in the range 402 * [0, nb_queue_pair - 1] previously supplied to 403 * rte_compressdev_configure() 404 * @param ops 405 * The address of an array of pointers to 406 * *rte_comp_op* structures that must be 407 * large enough to store *nb_ops* pointers in it 408 * @param nb_ops 409 * The maximum number of operations to dequeue 410 * @return 411 * - The number of operations actually dequeued, which is the number 412 * of pointers to *rte_comp_op* structures effectively supplied to the 413 * *ops* array. 414 */ 415 __rte_experimental 416 uint16_t 417 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, 418 struct rte_comp_op **ops, uint16_t nb_ops); 419 420 /** 421 * Enqueue a burst of operations for processing on a compression device. 422 * 423 * The rte_compressdev_enqueue_burst() function is invoked to place 424 * comp operations on the queue *qp_id* of the device designated by 425 * its *dev_id*. 426 * 427 * The *nb_ops* parameter is the number of operations to process which are 428 * supplied in the *ops* array of *rte_comp_op* structures. 429 * 430 * The rte_compressdev_enqueue_burst() function returns the number of 431 * operations it actually enqueued for processing. A return value equal to 432 * *nb_ops* means that all packets have been enqueued. 433 * 434 * @note All compression operations are Out-of-place (OOP) operations, 435 * as the size of the output data is different to the size of the input data. 436 * 437 * @note The rte_comp_op contains both input and output parameters and is the 438 * vehicle for the application to pass data into and out of the PMD. While an 439 * op is inflight, i.e. once it has been enqueued, the private_xform or stream 440 * attached to it and any mbufs or memory referenced by it should not be altered 441 * or freed by the application. The PMD may use or change some of this data at 442 * any time until it has been returned in a dequeue operation. 443 * 444 * @note The flush flag only applies to operations which return SUCCESS. 445 * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer 446 * is as if flush flag was FLUSH_NONE. 447 * @note flush flag only applies in compression direction. It has no meaning 448 * for decompression. 449 * @note: operation ordering is not maintained within the queue pair. 450 * 451 * @param dev_id 452 * Compress device identifier 453 * @param qp_id 454 * The index of the queue pair on which operations 455 * are to be enqueued for processing. The value 456 * must be in the range [0, nb_queue_pairs - 1] 457 * previously supplied to *rte_compressdev_configure* 458 * @param ops 459 * The address of an array of *nb_ops* pointers 460 * to *rte_comp_op* structures which contain 461 * the operations to be processed 462 * @param nb_ops 463 * The number of operations to process 464 * @return 465 * The number of operations actually enqueued on the device. The return 466 * value can be less than the value of the *nb_ops* parameter when the 467 * comp devices queue is full or if invalid parameters are specified in 468 * a *rte_comp_op*. 469 */ 470 __rte_experimental 471 uint16_t 472 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, 473 struct rte_comp_op **ops, uint16_t nb_ops); 474 475 /** 476 * This should alloc a stream from the device's mempool and initialise it. 477 * The application should call this API when setting up for the stateful 478 * processing of a set of data on a device. The API can be called multiple 479 * times to set up a stream for each data set. The handle returned is only for 480 * use with ops of op_type STATEFUL and must be passed to the PMD 481 * with every op in the data stream 482 * 483 * @param dev_id 484 * Compress device identifier 485 * @param xform 486 * xform data 487 * @param stream 488 * Pointer to where PMD's private stream handle should be stored 489 * 490 * @return 491 * - 0 if successful and valid stream handle 492 * - <0 in error cases 493 * - Returns -EINVAL if input parameters are invalid. 494 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 495 * - Returns -ENOTSUP if comp device does not support the comp transform. 496 * - Returns -ENOMEM if the private stream could not be allocated. 497 * 498 */ 499 __rte_experimental 500 int 501 rte_compressdev_stream_create(uint8_t dev_id, 502 const struct rte_comp_xform *xform, 503 void **stream); 504 505 /** 506 * This should clear the stream and return it to the device's mempool. 507 * 508 * @param dev_id 509 * Compress device identifier 510 * 511 * @param stream 512 * PMD's private stream data 513 * 514 * @return 515 * - 0 if successful 516 * - <0 in error cases 517 * - Returns -EINVAL if input parameters are invalid. 518 * - Returns -ENOTSUP if comp device does not support STATEFUL operations. 519 * - Returns -EBUSY if can't free stream as there are inflight operations 520 */ 521 __rte_experimental 522 int 523 rte_compressdev_stream_free(uint8_t dev_id, void *stream); 524 525 /** 526 * This should alloc a private_xform from the device's mempool and initialise 527 * it. The application should call this API when setting up for stateless 528 * processing on a device. If it returns non-shareable, then the appl cannot 529 * share this handle with multiple in-flight ops and should call this API again 530 * to get a separate handle for every in-flight op. 531 * The handle returned is only valid for use with ops of op_type STATELESS. 532 * 533 * @param dev_id 534 * Compress device identifier 535 * @param xform 536 * xform data 537 * @param private_xform 538 * Pointer to where PMD's private_xform handle should be stored 539 * 540 * @return 541 * - if successful returns 0 542 * and valid private_xform handle 543 * - <0 in error cases 544 * - Returns -EINVAL if input parameters are invalid. 545 * - Returns -ENOTSUP if comp device does not support the comp transform. 546 * - Returns -ENOMEM if the private_xform could not be allocated. 547 */ 548 __rte_experimental 549 int 550 rte_compressdev_private_xform_create(uint8_t dev_id, 551 const struct rte_comp_xform *xform, 552 void **private_xform); 553 554 /** 555 * This should clear the private_xform and return it to the device's mempool. 556 * It is the application's responsibility to ensure that private_xform data 557 * is not cleared while there are still in-flight operations using it. 558 * 559 * @param dev_id 560 * Compress device identifier 561 * 562 * @param private_xform 563 * PMD's private_xform data 564 * 565 * @return 566 * - 0 if successful 567 * - <0 in error cases 568 * - Returns -EINVAL if input parameters are invalid. 569 */ 570 __rte_experimental 571 int 572 rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform); 573 574 #ifdef __cplusplus 575 } 576 #endif 577 578 #endif /* _RTE_COMPRESSDEV_H_ */ 579