1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2020 Intel Corporation 3 */ 4 5 #ifndef _RTE_CRYPTO_SYM_H_ 6 #define _RTE_CRYPTO_SYM_H_ 7 8 /** 9 * @file rte_crypto_sym.h 10 * 11 * RTE Definitions for Symmetric Cryptography 12 * 13 * Defines symmetric cipher and authentication algorithms and modes, as well 14 * as supported symmetric crypto operation combinations. 15 */ 16 17 #ifdef __cplusplus 18 extern "C" { 19 #endif 20 21 #include <string.h> 22 23 #include <rte_mbuf.h> 24 #include <rte_memory.h> 25 #include <rte_mempool.h> 26 #include <rte_common.h> 27 28 /** 29 * Crypto IO Vector (in analogy with struct iovec) 30 * Supposed be used to pass input/output data buffers for crypto data-path 31 * functions. 32 */ 33 struct rte_crypto_vec { 34 /** virtual address of the data buffer */ 35 void *base; 36 /** IOVA of the data buffer */ 37 rte_iova_t iova; 38 /** length of the data buffer */ 39 uint32_t len; 40 /** total buffer length */ 41 uint32_t tot_len; 42 }; 43 44 /** 45 * Crypto scatter-gather list descriptor. Consists of a pointer to an array 46 * of Crypto IO vectors with its size. 47 */ 48 struct rte_crypto_sgl { 49 /** start of an array of vectors */ 50 struct rte_crypto_vec *vec; 51 /** size of an array of vectors */ 52 uint32_t num; 53 }; 54 55 /** 56 * Crypto virtual and IOVA address descriptor, used to describe cryptographic 57 * data buffer without the length information. The length information is 58 * normally predefined during session creation. 59 */ 60 struct rte_crypto_va_iova_ptr { 61 void *va; 62 rte_iova_t iova; 63 }; 64 65 /** 66 * Raw data operation descriptor. 67 * Supposed to be used with synchronous CPU crypto API call or asynchronous 68 * RAW data path API call. 69 */ 70 struct rte_crypto_sym_vec { 71 /** number of operations to perform */ 72 uint32_t num; 73 /** array of SGL vectors */ 74 struct rte_crypto_sgl *src_sgl; 75 /** array of SGL vectors for OOP, keep it NULL for inplace*/ 76 struct rte_crypto_sgl *dest_sgl; 77 /** array of pointers to cipher IV */ 78 struct rte_crypto_va_iova_ptr *iv; 79 /** array of pointers to digest */ 80 struct rte_crypto_va_iova_ptr *digest; 81 82 __extension__ 83 union { 84 /** array of pointers to auth IV, used for chain operation */ 85 struct rte_crypto_va_iova_ptr *auth_iv; 86 /** array of pointers to AAD, used for AEAD operation */ 87 struct rte_crypto_va_iova_ptr *aad; 88 }; 89 90 /** 91 * array of statuses for each operation: 92 * - 0 on success 93 * - errno on error 94 */ 95 int32_t *status; 96 }; 97 98 /** 99 * used for cpu_crypto_process_bulk() to specify head/tail offsets 100 * for auth/cipher processing. 101 */ 102 union rte_crypto_sym_ofs { 103 uint64_t raw; 104 struct { 105 struct { 106 uint16_t head; 107 uint16_t tail; 108 } auth, cipher; 109 } ofs; 110 }; 111 112 /** Symmetric Cipher Algorithms 113 * 114 * Note, to avoid ABI breakage across releases 115 * - LIST_END should not be added to this enum 116 * - the order of enums should not be changed 117 * - new algorithms should only be added to the end 118 */ 119 enum rte_crypto_cipher_algorithm { 120 RTE_CRYPTO_CIPHER_NULL = 1, 121 /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */ 122 123 RTE_CRYPTO_CIPHER_3DES_CBC, 124 /**< Triple DES algorithm in CBC mode */ 125 RTE_CRYPTO_CIPHER_3DES_CTR, 126 /**< Triple DES algorithm in CTR mode */ 127 RTE_CRYPTO_CIPHER_3DES_ECB, 128 /**< Triple DES algorithm in ECB mode */ 129 130 RTE_CRYPTO_CIPHER_AES_CBC, 131 /**< AES algorithm in CBC mode */ 132 RTE_CRYPTO_CIPHER_AES_CTR, 133 /**< AES algorithm in Counter mode */ 134 RTE_CRYPTO_CIPHER_AES_ECB, 135 /**< AES algorithm in ECB mode */ 136 RTE_CRYPTO_CIPHER_AES_F8, 137 /**< AES algorithm in F8 mode */ 138 RTE_CRYPTO_CIPHER_AES_XTS, 139 /**< AES algorithm in XTS mode */ 140 141 RTE_CRYPTO_CIPHER_ARC4, 142 /**< (A)RC4 cipher algorithm */ 143 144 RTE_CRYPTO_CIPHER_KASUMI_F8, 145 /**< KASUMI algorithm in F8 mode */ 146 147 RTE_CRYPTO_CIPHER_SNOW3G_UEA2, 148 /**< SNOW 3G algorithm in UEA2 mode */ 149 150 RTE_CRYPTO_CIPHER_ZUC_EEA3, 151 /**< ZUC algorithm in EEA3 mode */ 152 153 RTE_CRYPTO_CIPHER_DES_CBC, 154 /**< DES algorithm in CBC mode */ 155 156 RTE_CRYPTO_CIPHER_AES_DOCSISBPI, 157 /**< AES algorithm using modes required by 158 * DOCSIS Baseline Privacy Plus Spec. 159 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next 160 * for m_src and m_dst in the rte_crypto_sym_op must be NULL. 161 */ 162 163 RTE_CRYPTO_CIPHER_DES_DOCSISBPI, 164 /**< DES algorithm using modes required by 165 * DOCSIS Baseline Privacy Plus Spec. 166 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next 167 * for m_src and m_dst in the rte_crypto_sym_op must be NULL. 168 */ 169 170 RTE_CRYPTO_CIPHER_SM4_ECB, 171 /**< ShangMi 4 (SM4) algorithm in ECB mode */ 172 RTE_CRYPTO_CIPHER_SM4_CBC, 173 /**< ShangMi 4 (SM4) algorithm in CBC mode */ 174 RTE_CRYPTO_CIPHER_SM4_CTR 175 /**< ShangMi 4 (SM4) algorithm in CTR mode */ 176 }; 177 178 /** Cipher algorithm name strings */ 179 extern const char * 180 rte_crypto_cipher_algorithm_strings[]; 181 182 /** Symmetric Cipher Direction */ 183 enum rte_crypto_cipher_operation { 184 RTE_CRYPTO_CIPHER_OP_ENCRYPT, 185 /**< Encrypt cipher operation */ 186 RTE_CRYPTO_CIPHER_OP_DECRYPT 187 /**< Decrypt cipher operation */ 188 }; 189 190 /** Cipher operation name strings */ 191 extern const char * 192 rte_crypto_cipher_operation_strings[]; 193 194 /** 195 * Symmetric Cipher Setup Data. 196 * 197 * This structure contains data relating to Cipher (Encryption and Decryption) 198 * use to create a session. 199 */ 200 struct rte_crypto_cipher_xform { 201 enum rte_crypto_cipher_operation op; 202 /**< This parameter determines if the cipher operation is an encrypt or 203 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes, 204 * only encrypt operations are valid. 205 */ 206 enum rte_crypto_cipher_algorithm algo; 207 /**< Cipher algorithm */ 208 209 struct { 210 const uint8_t *data; /**< pointer to key data */ 211 uint16_t length; /**< key length in bytes */ 212 } key; 213 /**< Cipher key 214 * 215 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the 216 * original key data provided may be wrapped(encrypted) using key wrap 217 * algorithm such as AES key wrap (rfc3394) and hence length of the key 218 * may increase beyond the PMD advertised supported key size. 219 * PMD shall validate the key length and report EMSGSIZE error while 220 * configuring the session and application can skip checking the 221 * capability key length in such cases. 222 * 223 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will 224 * point to a concatenation of the AES encryption key followed by a 225 * keymask. As per RFC3711, the keymask should be padded with trailing 226 * bytes to match the length of the encryption key used. 227 * 228 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes), 229 * 192 bits (24 bytes) or 256 bits (32 bytes). 230 * 231 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length 232 * should be set to the combined length of the encryption key and the 233 * keymask. Since the keymask and the encryption key are the same size, 234 * key.length should be set to 2 x the AES encryption key length. 235 * 236 * For the AES-XTS mode of operation: 237 * - Two keys must be provided and key.length refers to total length of 238 * the two keys. 239 * - key.data must point to the two keys concatenated together 240 * (key1 || key2). 241 * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes). 242 * - Both keys must have the same size. 243 **/ 244 struct { 245 uint16_t offset; 246 /**< Starting point for Initialisation Vector or Counter, 247 * specified as number of bytes from start of crypto 248 * operation (rte_crypto_op). 249 * 250 * - For block ciphers in CBC or F8 mode, or for KASUMI 251 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the 252 * Initialisation Vector (IV) value. 253 * 254 * - For block ciphers in CTR mode, this is the counter. 255 * 256 * - For CCM mode, the first byte is reserved, and the 257 * nonce should be written starting at &iv[1] (to allow 258 * space for the implementation to write in the flags 259 * in the first byte). Note that a full 16 bytes should 260 * be allocated, even though the length field will 261 * have a value less than this. Note that the PMDs may 262 * modify the memory reserved (the first byte and the 263 * final padding) 264 * 265 * - For AES-XTS, this is the 128bit tweak, i, from 266 * IEEE Std 1619-2007. 267 * 268 * For optimum performance, the data pointed to SHOULD 269 * be 8-byte aligned. 270 */ 271 uint16_t length; 272 /**< Length of valid IV data. 273 * 274 * - For block ciphers in CBC or F8 mode, or for KASUMI 275 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the 276 * length of the IV (which must be the same as the 277 * block length of the cipher). 278 * 279 * - For block ciphers in CTR mode, this is the length 280 * of the counter (which must be the same as the block 281 * length of the cipher). 282 * 283 * - For CCM mode, this is the length of the nonce, 284 * which can be in the range 7 to 13 inclusive. 285 */ 286 } iv; /**< Initialisation vector parameters */ 287 288 uint32_t dataunit_len; 289 /**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled, 290 * this is the data-unit length of the algorithm, 291 * otherwise or when the value is 0, use the operation length. 292 * The value should be in the range defined by the dataunit_set field 293 * in the cipher capability. 294 * 295 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007. 296 * For-each data-unit in the operation, the tweak (IV) value is 297 * assigned consecutively starting from the operation assigned IV. 298 */ 299 }; 300 301 /** Symmetric Authentication / Hash Algorithms 302 * 303 * Note, to avoid ABI breakage across releases 304 * - LIST_END should not be added to this enum 305 * - the order of enums should not be changed 306 * - new algorithms should only be added to the end 307 */ 308 enum rte_crypto_auth_algorithm { 309 RTE_CRYPTO_AUTH_NULL = 1, 310 /**< NULL hash algorithm. */ 311 312 RTE_CRYPTO_AUTH_AES_CBC_MAC, 313 /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */ 314 RTE_CRYPTO_AUTH_AES_CMAC, 315 /**< AES CMAC algorithm. */ 316 RTE_CRYPTO_AUTH_AES_GMAC, 317 /**< AES GMAC algorithm. */ 318 RTE_CRYPTO_AUTH_AES_XCBC_MAC, 319 /**< AES XCBC algorithm. */ 320 321 RTE_CRYPTO_AUTH_KASUMI_F9, 322 /**< KASUMI algorithm in F9 mode. */ 323 324 RTE_CRYPTO_AUTH_MD5, 325 /**< MD5 algorithm */ 326 RTE_CRYPTO_AUTH_MD5_HMAC, 327 /**< HMAC using MD5 algorithm */ 328 329 RTE_CRYPTO_AUTH_SHA1, 330 /**< 160 bit SHA algorithm. */ 331 RTE_CRYPTO_AUTH_SHA1_HMAC, 332 /**< HMAC using 160 bit SHA algorithm. 333 * HMAC-SHA-1-96 can be generated by setting 334 * digest_length to 12 bytes in auth/aead xforms. 335 */ 336 RTE_CRYPTO_AUTH_SHA224, 337 /**< 224 bit SHA algorithm. */ 338 RTE_CRYPTO_AUTH_SHA224_HMAC, 339 /**< HMAC using 224 bit SHA algorithm. */ 340 RTE_CRYPTO_AUTH_SHA256, 341 /**< 256 bit SHA algorithm. */ 342 RTE_CRYPTO_AUTH_SHA256_HMAC, 343 /**< HMAC using 256 bit SHA algorithm. */ 344 RTE_CRYPTO_AUTH_SHA384, 345 /**< 384 bit SHA algorithm. */ 346 RTE_CRYPTO_AUTH_SHA384_HMAC, 347 /**< HMAC using 384 bit SHA algorithm. */ 348 RTE_CRYPTO_AUTH_SHA512, 349 /**< 512 bit SHA algorithm. */ 350 RTE_CRYPTO_AUTH_SHA512_HMAC, 351 /**< HMAC using 512 bit SHA algorithm. */ 352 353 RTE_CRYPTO_AUTH_SNOW3G_UIA2, 354 /**< SNOW 3G algorithm in UIA2 mode. */ 355 356 RTE_CRYPTO_AUTH_ZUC_EIA3, 357 /**< ZUC algorithm in EIA3 mode */ 358 359 RTE_CRYPTO_AUTH_SHA3_224, 360 /**< 224 bit SHA3 algorithm. */ 361 RTE_CRYPTO_AUTH_SHA3_224_HMAC, 362 /**< HMAC using 224 bit SHA3 algorithm. */ 363 RTE_CRYPTO_AUTH_SHA3_256, 364 /**< 256 bit SHA3 algorithm. */ 365 RTE_CRYPTO_AUTH_SHA3_256_HMAC, 366 /**< HMAC using 256 bit SHA3 algorithm. */ 367 RTE_CRYPTO_AUTH_SHA3_384, 368 /**< 384 bit SHA3 algorithm. */ 369 RTE_CRYPTO_AUTH_SHA3_384_HMAC, 370 /**< HMAC using 384 bit SHA3 algorithm. */ 371 RTE_CRYPTO_AUTH_SHA3_512, 372 /**< 512 bit SHA3 algorithm. */ 373 RTE_CRYPTO_AUTH_SHA3_512_HMAC 374 /**< HMAC using 512 bit SHA3 algorithm. */ 375 }; 376 377 /** Authentication algorithm name strings */ 378 extern const char * 379 rte_crypto_auth_algorithm_strings[]; 380 381 /** Symmetric Authentication / Hash Operations */ 382 enum rte_crypto_auth_operation { 383 RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */ 384 RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */ 385 }; 386 387 /** Authentication operation name strings */ 388 extern const char * 389 rte_crypto_auth_operation_strings[]; 390 391 /** 392 * Authentication / Hash transform data. 393 * 394 * This structure contains data relating to an authentication/hash crypto 395 * transforms. The fields op, algo and digest_length are common to all 396 * authentication transforms and MUST be set. 397 */ 398 struct rte_crypto_auth_xform { 399 enum rte_crypto_auth_operation op; 400 /**< Authentication operation type */ 401 enum rte_crypto_auth_algorithm algo; 402 /**< Authentication algorithm selection */ 403 404 struct { 405 const uint8_t *data; /**< pointer to key data */ 406 uint16_t length; /**< key length in bytes */ 407 } key; 408 /**< Authentication key data. 409 * The authentication key length MUST be less than or equal to the 410 * block size of the algorithm. It is the callers responsibility to 411 * ensure that the key length is compliant with the standard being used 412 * (for example RFC 2104, FIPS 198a). 413 */ 414 415 struct { 416 uint16_t offset; 417 /**< Starting point for Initialisation Vector or Counter, 418 * specified as number of bytes from start of crypto 419 * operation (rte_crypto_op). 420 * 421 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode 422 * this is the authentication Initialisation Vector 423 * (IV) value. For AES-GMAC IV description please refer 424 * to the field `length` in iv struct. 425 * 426 * - For KASUMI in F9 mode and other authentication 427 * algorithms, this field is not used. 428 * 429 * For optimum performance, the data pointed to SHOULD 430 * be 8-byte aligned. 431 */ 432 uint16_t length; 433 /**< Length of valid IV data. 434 * 435 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and 436 * for AES-GMAC, this is the length of the IV. 437 * 438 * - For KASUMI in F9 mode and other authentication 439 * algorithms, this field is not used. 440 * 441 * - For GMAC mode, this is either: 442 * 1) Number greater or equal to one, which means that IV 443 * is used and J0 will be computed internally, a minimum 444 * of 16 bytes must be allocated. 445 * 2) Zero, in which case data points to J0. In this case 446 * 16 bytes of J0 should be passed where J0 is defined 447 * by NIST SP800-38D. 448 * 449 */ 450 } iv; /**< Initialisation vector parameters */ 451 452 uint16_t digest_length; 453 /**< Length of the digest to be returned. If the verify option is set, 454 * this specifies the length of the digest to be compared for the 455 * session. 456 * 457 * It is the caller's responsibility to ensure that the 458 * digest length is compliant with the hash algorithm being used. 459 * If the value is less than the maximum length allowed by the hash, 460 * the result shall be truncated. 461 */ 462 }; 463 464 465 /** Symmetric AEAD Algorithms 466 * 467 * Note, to avoid ABI breakage across releases 468 * - LIST_END should not be added to this enum 469 * - the order of enums should not be changed 470 * - new algorithms should only be added to the end 471 */ 472 enum rte_crypto_aead_algorithm { 473 RTE_CRYPTO_AEAD_AES_CCM = 1, 474 /**< AES algorithm in CCM mode. */ 475 RTE_CRYPTO_AEAD_AES_GCM, 476 /**< AES algorithm in GCM mode. */ 477 RTE_CRYPTO_AEAD_CHACHA20_POLY1305 478 /**< Chacha20 cipher with poly1305 authenticator */ 479 }; 480 481 /** AEAD algorithm name strings */ 482 extern const char * 483 rte_crypto_aead_algorithm_strings[]; 484 485 /** Symmetric AEAD Operations */ 486 enum rte_crypto_aead_operation { 487 RTE_CRYPTO_AEAD_OP_ENCRYPT, 488 /**< Encrypt and generate digest */ 489 RTE_CRYPTO_AEAD_OP_DECRYPT 490 /**< Verify digest and decrypt */ 491 }; 492 493 /** Authentication operation name strings */ 494 extern const char * 495 rte_crypto_aead_operation_strings[]; 496 497 struct rte_crypto_aead_xform { 498 enum rte_crypto_aead_operation op; 499 /**< AEAD operation type */ 500 enum rte_crypto_aead_algorithm algo; 501 /**< AEAD algorithm selection */ 502 503 struct { 504 const uint8_t *data; /**< pointer to key data */ 505 uint16_t length; /**< key length in bytes */ 506 } key; 507 508 struct { 509 uint16_t offset; 510 /**< Starting point for Initialisation Vector or Counter, 511 * specified as number of bytes from start of crypto 512 * operation (rte_crypto_op). 513 * 514 * - For CCM mode, the first byte is reserved, and the 515 * nonce should be written starting at &iv[1] (to allow 516 * space for the implementation to write in the flags 517 * in the first byte). Note that a full 16 bytes should 518 * be allocated, even though the length field will 519 * have a value less than this. 520 * 521 * - For Chacha20-Poly1305 it is 96-bit nonce. 522 * PMD sets initial counter for Poly1305 key generation 523 * part to 0 and for Chacha20 encryption to 1 as per 524 * rfc8439 2.8. AEAD construction. 525 * 526 * For optimum performance, the data pointed to SHOULD 527 * be 8-byte aligned. 528 */ 529 uint16_t length; 530 /**< Length of valid IV data. 531 * 532 * - For GCM mode, this is either: 533 * 1) Number greater or equal to one, which means that IV 534 * is used and J0 will be computed internally, a minimum 535 * of 16 bytes must be allocated. 536 * 2) Zero, in which case data points to J0. In this case 537 * 16 bytes of J0 should be passed where J0 is defined 538 * by NIST SP800-38D. 539 * 540 * - For CCM mode, this is the length of the nonce, 541 * which can be in the range 7 to 13 inclusive. 542 * 543 * - For Chacha20-Poly1305 this field is always 12. 544 */ 545 } iv; /**< Initialisation vector parameters */ 546 547 uint16_t digest_length; 548 549 uint16_t aad_length; 550 /**< The length of the additional authenticated data (AAD) in bytes. 551 * For CCM mode, this is the length of the actual AAD, even though 552 * it is required to reserve 18 bytes before the AAD and padding 553 * at the end of it, so a multiple of 16 bytes is allocated. 554 */ 555 }; 556 557 /** Crypto transformation types */ 558 enum rte_crypto_sym_xform_type { 559 RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */ 560 RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */ 561 RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */ 562 RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */ 563 }; 564 565 /** 566 * Symmetric crypto transform structure. 567 * 568 * This is used to specify the crypto transforms required, multiple transforms 569 * can be chained together to specify a chain transforms such as authentication 570 * then cipher, or cipher then authentication. Each transform structure can 571 * hold a single transform, the type field is used to specify which transform 572 * is contained within the union 573 */ 574 struct rte_crypto_sym_xform { 575 struct rte_crypto_sym_xform *next; 576 /**< next xform in chain */ 577 enum rte_crypto_sym_xform_type type 578 ; /**< xform type */ 579 RTE_STD_C11 580 union { 581 struct rte_crypto_auth_xform auth; 582 /**< Authentication / hash xform */ 583 struct rte_crypto_cipher_xform cipher; 584 /**< Cipher xform */ 585 struct rte_crypto_aead_xform aead; 586 /**< AEAD xform */ 587 }; 588 }; 589 590 struct rte_cryptodev_sym_session; 591 592 /** 593 * Symmetric Cryptographic Operation. 594 * 595 * This structure contains data relating to performing symmetric cryptographic 596 * processing on a referenced mbuf data buffer. 597 * 598 * When a symmetric crypto operation is enqueued with the device for processing 599 * it must have a valid *rte_mbuf* structure attached, via m_src parameter, 600 * which contains the source data which the crypto operation is to be performed 601 * on. 602 * While the mbuf is in use by a crypto operation no part of the mbuf should be 603 * changed by the application as the device may read or write to any part of the 604 * mbuf. In the case of hardware crypto devices some or all of the mbuf 605 * may be DMAed in and out of the device, so writing over the original data, 606 * though only the part specified by the rte_crypto_sym_op for transformation 607 * will be changed. 608 * Out-of-place (OOP) operation, where the source mbuf is different to the 609 * destination mbuf, is a special case. Data will be copied from m_src to m_dst. 610 * The part copied includes all the parts of the source mbuf that will be 611 * operated on, based on the cipher.data.offset+cipher.data.length and 612 * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part 613 * indicated by the cipher parameters will be transformed, any extra data around 614 * this indicated by the auth parameters will be copied unchanged from source to 615 * destination mbuf. 616 * Also in OOP operation the cipher.data.offset and auth.data.offset apply to 617 * both source and destination mbufs. As these offsets are relative to the 618 * data_off parameter in each mbuf this can result in the data written to the 619 * destination buffer being at a different alignment, relative to buffer start, 620 * to the data in the source buffer. 621 */ 622 struct rte_crypto_sym_op { 623 struct rte_mbuf *m_src; /**< source mbuf */ 624 struct rte_mbuf *m_dst; /**< destination mbuf */ 625 626 RTE_STD_C11 627 union { 628 struct rte_cryptodev_sym_session *session; 629 /**< Handle for the initialised session context */ 630 struct rte_crypto_sym_xform *xform; 631 /**< Session-less API crypto operation parameters */ 632 struct rte_security_session *sec_session; 633 /**< Handle for the initialised security session context */ 634 }; 635 636 RTE_STD_C11 637 union { 638 struct { 639 struct { 640 uint32_t offset; 641 /**< Starting point for AEAD processing, specified as 642 * number of bytes from start of packet in source 643 * buffer. 644 */ 645 uint32_t length; 646 /**< The message length, in bytes, of the source buffer 647 * on which the cryptographic operation will be 648 * computed. This must be a multiple of the block size 649 */ 650 } data; /**< Data offsets and length for AEAD */ 651 struct { 652 uint8_t *data; 653 /**< This points to the location where the digest result 654 * should be inserted (in the case of digest generation) 655 * or where the purported digest exists (in the case of 656 * digest verification). 657 * 658 * At session creation time, the client specified the 659 * digest result length with the digest_length member 660 * of the @ref rte_crypto_auth_xform structure. For 661 * physical crypto devices the caller must allocate at 662 * least digest_length of physically contiguous memory 663 * at this location. 664 * 665 * For digest generation, the digest result will 666 * overwrite any data at this location. 667 * 668 * @note 669 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for 670 * "digest result" read "authentication tag T". 671 */ 672 rte_iova_t phys_addr; 673 /**< Physical address of digest */ 674 } digest; /**< Digest parameters */ 675 struct { 676 uint8_t *data; 677 /**< Pointer to Additional Authenticated Data (AAD) 678 * needed for authenticated cipher mechanisms (CCM and 679 * GCM) 680 * 681 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM), 682 * the caller should setup this field as follows: 683 * 684 * - the additional authentication data itself should 685 * be written starting at an offset of 18 bytes into 686 * the array, leaving room for the first block (16 bytes) 687 * and the length encoding in the first two bytes of the 688 * second block. 689 * 690 * - the array should be big enough to hold the above 691 * fields, plus any padding to round this up to the 692 * nearest multiple of the block size (16 bytes). 693 * Padding will be added by the implementation. 694 * 695 * - Note that PMDs may modify the memory reserved 696 * (first 18 bytes and the final padding). 697 * 698 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the 699 * caller should setup this field as follows: 700 * 701 * - the AAD is written in starting at byte 0 702 * - the array must be big enough to hold the AAD, plus 703 * any space to round this up to the nearest multiple 704 * of the block size (16 bytes). 705 * 706 */ 707 rte_iova_t phys_addr; /**< physical address */ 708 } aad; 709 /**< Additional authentication parameters */ 710 } aead; 711 712 struct { 713 struct { 714 struct { 715 uint32_t offset; 716 /**< Starting point for cipher processing, 717 * specified as number of bytes from start 718 * of data in the source buffer. 719 * The result of the cipher operation will be 720 * written back into the output buffer 721 * starting at this location. 722 * 723 * @note 724 * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2, 725 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 726 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, 727 * this field should be in bits. For 728 * digest-encrypted cases this must be 729 * an 8-bit multiple. 730 */ 731 uint32_t length; 732 /**< The message length, in bytes, of the 733 * source buffer on which the cryptographic 734 * operation will be computed. 735 * This is also the same as the result length. 736 * This must be a multiple of the block size 737 * or a multiple of data-unit length 738 * as described in xform. 739 * 740 * @note 741 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2, 742 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 743 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, 744 * this field should be in bits. For 745 * digest-encrypted cases this must be 746 * an 8-bit multiple. 747 */ 748 } data; /**< Data offsets and length for ciphering */ 749 } cipher; 750 751 struct { 752 struct { 753 uint32_t offset; 754 /**< Starting point for hash processing, 755 * specified as number of bytes from start of 756 * packet in source buffer. 757 * 758 * @note 759 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, 760 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 761 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, 762 * this field should be in bits. For 763 * digest-encrypted cases this must be 764 * an 8-bit multiple. 765 * 766 * @note 767 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, 768 * this offset should be such that 769 * data to authenticate starts at COUNT. 770 * 771 * @note 772 * For DOCSIS security protocol, this 773 * offset is the DOCSIS header length 774 * and, therefore, also the CRC offset 775 * i.e. the number of bytes into the 776 * packet at which CRC calculation 777 * should begin. 778 */ 779 uint32_t length; 780 /**< The message length, in bytes, of the source 781 * buffer that the hash will be computed on. 782 * 783 * @note 784 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, 785 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 786 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, 787 * this field should be in bits. For 788 * digest-encrypted cases this must be 789 * an 8-bit multiple. 790 * 791 * @note 792 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, 793 * the length should include the COUNT, 794 * FRESH, message, direction bit and padding 795 * (to be multiple of 8 bits). 796 * 797 * @note 798 * For DOCSIS security protocol, this 799 * is the CRC length i.e. the number of 800 * bytes in the packet over which the 801 * CRC should be calculated 802 */ 803 } data; 804 /**< Data offsets and length for authentication */ 805 806 struct { 807 uint8_t *data; 808 /**< This points to the location where 809 * the digest result should be inserted 810 * (in the case of digest generation) 811 * or where the purported digest exists 812 * (in the case of digest verification). 813 * 814 * At session creation time, the client 815 * specified the digest result length with 816 * the digest_length member of the 817 * @ref rte_crypto_auth_xform structure. 818 * For physical crypto devices the caller 819 * must allocate at least digest_length of 820 * physically contiguous memory at this 821 * location. 822 * 823 * For digest generation, the digest result 824 * will overwrite any data at this location. 825 * 826 * @note 827 * Digest-encrypted case. 828 * Digest can be generated, appended to 829 * the end of raw data and encrypted 830 * together using chained digest 831 * generation 832 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE) 833 * and encryption 834 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT) 835 * xforms. Similarly, authentication 836 * of the raw data against appended, 837 * decrypted digest, can be performed 838 * using decryption 839 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT) 840 * and digest verification 841 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY) 842 * chained xforms. 843 * To perform those operations, a few 844 * additional conditions must be met: 845 * - caller must allocate at least 846 * digest_length of memory at the end of 847 * source and (in case of out-of-place 848 * operations) destination buffer; those 849 * buffers can be linear or split using 850 * scatter-gather lists, 851 * - digest data pointer must point to 852 * the end of source or (in case of 853 * out-of-place operations) destination 854 * data, which is pointer to the 855 * data buffer + auth.data.offset + 856 * auth.data.length, 857 * - cipher.data.offset + 858 * cipher.data.length must be greater 859 * than auth.data.offset + 860 * auth.data.length and is typically 861 * equal to auth.data.offset + 862 * auth.data.length + digest_length. 863 * - for wireless algorithms, i.e. 864 * SNOW 3G, KASUMI and ZUC, as the 865 * cipher.data.length, 866 * cipher.data.offset, 867 * auth.data.length and 868 * auth.data.offset are in bits, they 869 * must be 8-bit multiples. 870 * 871 * Note, that for security reasons, it 872 * is PMDs' responsibility to not 873 * leave an unencrypted digest in any 874 * buffer after performing auth-cipher 875 * operations. 876 * 877 */ 878 rte_iova_t phys_addr; 879 /**< Physical address of digest */ 880 } digest; /**< Digest parameters */ 881 } auth; 882 }; 883 }; 884 }; 885 886 887 /** 888 * Reset the fields of a symmetric operation to their default values. 889 * 890 * @param op The crypto operation to be reset. 891 */ 892 static inline void 893 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op) 894 { 895 memset(op, 0, sizeof(*op)); 896 } 897 898 899 /** 900 * Allocate space for symmetric crypto xforms in the private data space of the 901 * crypto operation. This also defaults the crypto xform type to 902 * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms 903 * in the crypto operation 904 * 905 * @return 906 * - On success returns pointer to first crypto xform in crypto operations chain 907 * - On failure returns NULL 908 */ 909 static inline struct rte_crypto_sym_xform * 910 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op, 911 void *priv_data, uint8_t nb_xforms) 912 { 913 struct rte_crypto_sym_xform *xform; 914 915 sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data; 916 917 do { 918 xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED; 919 xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL; 920 } while (xform); 921 922 return sym_op->xform; 923 } 924 925 926 /** 927 * Attach a session to a symmetric crypto operation 928 * 929 * @param sym_op crypto operation 930 * @param sess cryptodev session 931 */ 932 static inline int 933 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, 934 struct rte_cryptodev_sym_session *sess) 935 { 936 sym_op->session = sess; 937 938 return 0; 939 } 940 941 /** 942 * Converts portion of mbuf data into a vector representation. 943 * Each segment will be represented as a separate entry in *vec* array. 944 * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*. 945 * @param mb 946 * Pointer to the *rte_mbuf* object. 947 * @param ofs 948 * Offset within mbuf data to start with. 949 * @param len 950 * Length of data to represent. 951 * @param vec 952 * Pointer to an output array of IO vectors. 953 * @param num 954 * Size of an output array. 955 * @return 956 * - number of successfully filled entries in *vec* array. 957 * - negative number of elements in *vec* array required. 958 */ 959 __rte_experimental 960 static inline int 961 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len, 962 struct rte_crypto_vec vec[], uint32_t num) 963 { 964 uint32_t i; 965 struct rte_mbuf *nseg; 966 uint32_t left; 967 uint32_t seglen; 968 969 /* assuming that requested data starts in the first segment */ 970 RTE_ASSERT(mb->data_len > ofs); 971 972 if (mb->nb_segs > num) 973 return -mb->nb_segs; 974 975 vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs); 976 vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs); 977 vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs; 978 979 /* whole data lies in the first segment */ 980 seglen = mb->data_len - ofs; 981 if (len <= seglen) { 982 vec[0].len = len; 983 return 1; 984 } 985 986 /* data spread across segments */ 987 vec[0].len = seglen; 988 left = len - seglen; 989 for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) { 990 991 vec[i].base = rte_pktmbuf_mtod(nseg, void *); 992 vec[i].iova = rte_pktmbuf_iova(nseg); 993 vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs; 994 995 seglen = nseg->data_len; 996 if (left <= seglen) { 997 /* whole requested data is completed */ 998 vec[i].len = left; 999 left = 0; 1000 i++; 1001 break; 1002 } 1003 1004 /* use whole segment */ 1005 vec[i].len = seglen; 1006 left -= seglen; 1007 } 1008 1009 RTE_ASSERT(left == 0); 1010 return i; 1011 } 1012 1013 1014 #ifdef __cplusplus 1015 } 1016 #endif 1017 1018 #endif /* _RTE_CRYPTO_SYM_H_ */ 1019