1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2020 Intel Corporation 3 */ 4 5 #ifndef _RTE_CRYPTO_SYM_H_ 6 #define _RTE_CRYPTO_SYM_H_ 7 8 /** 9 * @file rte_crypto_sym.h 10 * 11 * RTE Definitions for Symmetric Cryptography 12 * 13 * Defines symmetric cipher and authentication algorithms and modes, as well 14 * as supported symmetric crypto operation combinations. 15 */ 16 17 #ifdef __cplusplus 18 extern "C" { 19 #endif 20 21 #include <string.h> 22 23 #include <rte_compat.h> 24 #include <rte_mbuf.h> 25 #include <rte_memory.h> 26 #include <rte_mempool.h> 27 #include <rte_common.h> 28 29 /** 30 * Crypto IO Vector (in analogy with struct iovec) 31 * Supposed be used to pass input/output data buffers for crypto data-path 32 * functions. 33 */ 34 struct rte_crypto_vec { 35 /** virtual address of the data buffer */ 36 void *base; 37 /** IOVA of the data buffer */ 38 rte_iova_t iova; 39 /** length of the data buffer */ 40 uint32_t len; 41 /** total buffer length */ 42 uint32_t tot_len; 43 }; 44 45 /** 46 * Crypto scatter-gather list descriptor. Consists of a pointer to an array 47 * of Crypto IO vectors with its size. 48 */ 49 struct rte_crypto_sgl { 50 /** start of an array of vectors */ 51 struct rte_crypto_vec *vec; 52 /** size of an array of vectors */ 53 uint32_t num; 54 }; 55 56 /** 57 * Crypto virtual and IOVA address descriptor, used to describe cryptographic 58 * data buffer without the length information. The length information is 59 * normally predefined during session creation. 60 */ 61 struct rte_crypto_va_iova_ptr { 62 void *va; 63 rte_iova_t iova; 64 }; 65 66 /** 67 * Raw data operation descriptor. 68 * Supposed to be used with synchronous CPU crypto API call or asynchronous 69 * RAW data path API call. 70 */ 71 struct rte_crypto_sym_vec { 72 /** number of operations to perform */ 73 uint32_t num; 74 /** array of SGL vectors */ 75 struct rte_crypto_sgl *src_sgl; 76 /** array of SGL vectors for OOP, keep it NULL for inplace*/ 77 struct rte_crypto_sgl *dest_sgl; 78 /** array of pointers to cipher IV */ 79 struct rte_crypto_va_iova_ptr *iv; 80 /** array of pointers to digest */ 81 struct rte_crypto_va_iova_ptr *digest; 82 83 __extension__ 84 union { 85 /** array of pointers to auth IV, used for chain operation */ 86 struct rte_crypto_va_iova_ptr *auth_iv; 87 /** array of pointers to AAD, used for AEAD operation */ 88 struct rte_crypto_va_iova_ptr *aad; 89 }; 90 91 /** 92 * array of statuses for each operation: 93 * - 0 on success 94 * - errno on error 95 */ 96 int32_t *status; 97 }; 98 99 /** 100 * used for cpu_crypto_process_bulk() to specify head/tail offsets 101 * for auth/cipher processing. 102 */ 103 union rte_crypto_sym_ofs { 104 uint64_t raw; 105 struct { 106 struct { 107 uint16_t head; 108 uint16_t tail; 109 } auth, cipher; 110 } ofs; 111 }; 112 113 /** Symmetric Cipher Algorithms 114 * 115 * Note, to avoid ABI breakage across releases 116 * - LIST_END should not be added to this enum 117 * - the order of enums should not be changed 118 * - new algorithms should only be added to the end 119 */ 120 enum rte_crypto_cipher_algorithm { 121 RTE_CRYPTO_CIPHER_NULL = 1, 122 /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */ 123 124 RTE_CRYPTO_CIPHER_3DES_CBC, 125 /**< Triple DES algorithm in CBC mode */ 126 RTE_CRYPTO_CIPHER_3DES_CTR, 127 /**< Triple DES algorithm in CTR mode */ 128 RTE_CRYPTO_CIPHER_3DES_ECB, 129 /**< Triple DES algorithm in ECB mode */ 130 131 RTE_CRYPTO_CIPHER_AES_CBC, 132 /**< AES algorithm in CBC mode */ 133 RTE_CRYPTO_CIPHER_AES_CTR, 134 /**< AES algorithm in Counter mode */ 135 RTE_CRYPTO_CIPHER_AES_ECB, 136 /**< AES algorithm in ECB mode */ 137 RTE_CRYPTO_CIPHER_AES_F8, 138 /**< AES algorithm in F8 mode */ 139 RTE_CRYPTO_CIPHER_AES_XTS, 140 /**< AES algorithm in XTS mode */ 141 142 RTE_CRYPTO_CIPHER_ARC4, 143 /**< (A)RC4 cipher algorithm */ 144 145 RTE_CRYPTO_CIPHER_KASUMI_F8, 146 /**< KASUMI algorithm in F8 mode */ 147 148 RTE_CRYPTO_CIPHER_SNOW3G_UEA2, 149 /**< SNOW 3G algorithm in UEA2 mode */ 150 151 RTE_CRYPTO_CIPHER_ZUC_EEA3, 152 /**< ZUC algorithm in EEA3 mode */ 153 154 RTE_CRYPTO_CIPHER_DES_CBC, 155 /**< DES algorithm in CBC mode */ 156 157 RTE_CRYPTO_CIPHER_AES_DOCSISBPI, 158 /**< AES algorithm using modes required by 159 * DOCSIS Baseline Privacy Plus Spec. 160 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next 161 * for m_src and m_dst in the rte_crypto_sym_op must be NULL. 162 */ 163 164 RTE_CRYPTO_CIPHER_DES_DOCSISBPI, 165 /**< DES algorithm using modes required by 166 * DOCSIS Baseline Privacy Plus Spec. 167 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next 168 * for m_src and m_dst in the rte_crypto_sym_op must be NULL. 169 */ 170 171 RTE_CRYPTO_CIPHER_SM4_ECB, 172 /**< ShangMi 4 (SM4) algorithm in ECB mode */ 173 RTE_CRYPTO_CIPHER_SM4_CBC, 174 /**< ShangMi 4 (SM4) algorithm in CBC mode */ 175 RTE_CRYPTO_CIPHER_SM4_CTR, 176 /**< ShangMi 4 (SM4) algorithm in CTR mode */ 177 RTE_CRYPTO_CIPHER_SM4_OFB, 178 /**< ShangMi 4 (SM4) algorithm in OFB mode */ 179 RTE_CRYPTO_CIPHER_SM4_CFB 180 /**< ShangMi 4 (SM4) algorithm in CFB mode */ 181 }; 182 183 /** Symmetric Cipher Direction */ 184 enum rte_crypto_cipher_operation { 185 RTE_CRYPTO_CIPHER_OP_ENCRYPT, 186 /**< Encrypt cipher operation */ 187 RTE_CRYPTO_CIPHER_OP_DECRYPT 188 /**< Decrypt cipher operation */ 189 }; 190 191 /** Cipher operation name strings */ 192 extern const char * 193 rte_crypto_cipher_operation_strings[]; 194 195 /** 196 * Symmetric Cipher Setup Data. 197 * 198 * This structure contains data relating to Cipher (Encryption and Decryption) 199 * use to create a session. 200 */ 201 struct rte_crypto_cipher_xform { 202 enum rte_crypto_cipher_operation op; 203 /**< This parameter determines if the cipher operation is an encrypt or 204 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes, 205 * only encrypt operations are valid. 206 */ 207 enum rte_crypto_cipher_algorithm algo; 208 /**< Cipher algorithm */ 209 210 struct { 211 const uint8_t *data; /**< pointer to key data */ 212 uint16_t length; /**< key length in bytes */ 213 } key; 214 /**< Cipher key 215 * 216 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the 217 * original key data provided may be wrapped(encrypted) using key wrap 218 * algorithm such as AES key wrap (rfc3394) and hence length of the key 219 * may increase beyond the PMD advertised supported key size. 220 * PMD shall validate the key length and report EMSGSIZE error while 221 * configuring the session and application can skip checking the 222 * capability key length in such cases. 223 * 224 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will 225 * point to a concatenation of the AES encryption key followed by a 226 * keymask. As per RFC3711, the keymask should be padded with trailing 227 * bytes to match the length of the encryption key used. 228 * 229 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes), 230 * 192 bits (24 bytes) or 256 bits (32 bytes). 231 * 232 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length 233 * should be set to the combined length of the encryption key and the 234 * keymask. Since the keymask and the encryption key are the same size, 235 * key.length should be set to 2 x the AES encryption key length. 236 * 237 * For the AES-XTS mode of operation: 238 * - Two keys must be provided and key.length refers to total length of 239 * the two keys. 240 * - key.data must point to the two keys concatenated together 241 * (key1 || key2). 242 * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes). 243 * - Both keys must have the same size. 244 */ 245 struct { 246 uint16_t offset; 247 /**< Starting point for Initialisation Vector or Counter, 248 * specified as number of bytes from start of crypto 249 * operation (rte_crypto_op). 250 * 251 * - For block ciphers in CBC or F8 mode, or for KASUMI 252 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the 253 * Initialisation Vector (IV) value. 254 * 255 * - For block ciphers in CTR mode, this is the counter. 256 * 257 * - For CCM mode, the first byte is reserved, and the 258 * nonce should be written starting at &iv[1] (to allow 259 * space for the implementation to write in the flags 260 * in the first byte). Note that a full 16 bytes should 261 * be allocated, even though the length field will 262 * have a value less than this. Note that the PMDs may 263 * modify the memory reserved (the first byte and the 264 * final padding) 265 * 266 * - For AES-XTS, this is the 128bit tweak, i, from 267 * IEEE Std 1619-2007. 268 * 269 * For optimum performance, the data pointed to SHOULD 270 * be 8-byte aligned. 271 */ 272 uint16_t length; 273 /**< Length of valid IV data. 274 * 275 * - For block ciphers in CBC or F8 mode, or for KASUMI 276 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the 277 * length of the IV (which must be the same as the 278 * block length of the cipher). 279 * 280 * - For block ciphers in CTR mode, this is the length 281 * of the counter (which must be the same as the block 282 * length of the cipher). 283 * 284 * - For CCM mode, this is the length of the nonce, 285 * which can be in the range 7 to 13 inclusive. 286 */ 287 } iv; /**< Initialisation vector parameters */ 288 289 uint32_t dataunit_len; 290 /**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled, 291 * this is the data-unit length of the algorithm, 292 * otherwise or when the value is 0, use the operation length. 293 * The value should be in the range defined by the dataunit_set field 294 * in the cipher capability. 295 * 296 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007. 297 * For-each data-unit in the operation, the tweak (IV) value is 298 * assigned consecutively starting from the operation assigned IV. 299 */ 300 }; 301 302 /** Symmetric Authentication / Hash Algorithms 303 * 304 * Note, to avoid ABI breakage across releases 305 * - LIST_END should not be added to this enum 306 * - the order of enums should not be changed 307 * - new algorithms should only be added to the end 308 */ 309 enum rte_crypto_auth_algorithm { 310 RTE_CRYPTO_AUTH_NULL = 1, 311 /**< NULL hash algorithm. */ 312 313 RTE_CRYPTO_AUTH_AES_CBC_MAC, 314 /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */ 315 RTE_CRYPTO_AUTH_AES_CMAC, 316 /**< AES CMAC algorithm. */ 317 RTE_CRYPTO_AUTH_AES_GMAC, 318 /**< AES GMAC algorithm. */ 319 RTE_CRYPTO_AUTH_AES_XCBC_MAC, 320 /**< AES XCBC algorithm. */ 321 322 RTE_CRYPTO_AUTH_KASUMI_F9, 323 /**< KASUMI algorithm in F9 mode. */ 324 325 RTE_CRYPTO_AUTH_MD5, 326 /**< MD5 algorithm */ 327 RTE_CRYPTO_AUTH_MD5_HMAC, 328 /**< HMAC using MD5 algorithm */ 329 330 RTE_CRYPTO_AUTH_SHA1, 331 /**< 160 bit SHA algorithm. */ 332 RTE_CRYPTO_AUTH_SHA1_HMAC, 333 /**< HMAC using 160 bit SHA algorithm. 334 * HMAC-SHA-1-96 can be generated by setting 335 * digest_length to 12 bytes in auth/aead xforms. 336 */ 337 RTE_CRYPTO_AUTH_SHA224, 338 /**< 224 bit SHA algorithm. */ 339 RTE_CRYPTO_AUTH_SHA224_HMAC, 340 /**< HMAC using 224 bit SHA algorithm. */ 341 RTE_CRYPTO_AUTH_SHA256, 342 /**< 256 bit SHA algorithm. */ 343 RTE_CRYPTO_AUTH_SHA256_HMAC, 344 /**< HMAC using 256 bit SHA algorithm. */ 345 RTE_CRYPTO_AUTH_SHA384, 346 /**< 384 bit SHA algorithm. */ 347 RTE_CRYPTO_AUTH_SHA384_HMAC, 348 /**< HMAC using 384 bit SHA algorithm. */ 349 RTE_CRYPTO_AUTH_SHA512, 350 /**< 512 bit SHA algorithm. */ 351 RTE_CRYPTO_AUTH_SHA512_HMAC, 352 /**< HMAC using 512 bit SHA algorithm. */ 353 354 RTE_CRYPTO_AUTH_SNOW3G_UIA2, 355 /**< SNOW 3G algorithm in UIA2 mode. */ 356 357 RTE_CRYPTO_AUTH_ZUC_EIA3, 358 /**< ZUC algorithm in EIA3 mode */ 359 360 RTE_CRYPTO_AUTH_SHA3_224, 361 /**< 224 bit SHA3 algorithm. */ 362 RTE_CRYPTO_AUTH_SHA3_224_HMAC, 363 /**< HMAC using 224 bit SHA3 algorithm. */ 364 RTE_CRYPTO_AUTH_SHA3_256, 365 /**< 256 bit SHA3 algorithm. */ 366 RTE_CRYPTO_AUTH_SHA3_256_HMAC, 367 /**< HMAC using 256 bit SHA3 algorithm. */ 368 RTE_CRYPTO_AUTH_SHA3_384, 369 /**< 384 bit SHA3 algorithm. */ 370 RTE_CRYPTO_AUTH_SHA3_384_HMAC, 371 /**< HMAC using 384 bit SHA3 algorithm. */ 372 RTE_CRYPTO_AUTH_SHA3_512, 373 /**< 512 bit SHA3 algorithm. */ 374 RTE_CRYPTO_AUTH_SHA3_512_HMAC, 375 /**< HMAC using 512 bit SHA3 algorithm. */ 376 RTE_CRYPTO_AUTH_SM3, 377 /**< ShangMi 3 (SM3) algorithm */ 378 379 RTE_CRYPTO_AUTH_SHAKE_128, 380 /**< 128 bit SHAKE algorithm. */ 381 RTE_CRYPTO_AUTH_SHAKE_256, 382 /**< 256 bit SHAKE algorithm. */ 383 RTE_CRYPTO_AUTH_SM3_HMAC, 384 /** < HMAC using ShangMi 3 (SM3) algorithm */ 385 }; 386 387 /** Symmetric Authentication / Hash Operations */ 388 enum rte_crypto_auth_operation { 389 RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */ 390 RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */ 391 }; 392 393 /** Authentication operation name strings */ 394 extern const char * 395 rte_crypto_auth_operation_strings[]; 396 397 /** 398 * Authentication / Hash transform data. 399 * 400 * This structure contains data relating to an authentication/hash crypto 401 * transforms. The fields op, algo and digest_length are common to all 402 * authentication transforms and MUST be set. 403 */ 404 struct rte_crypto_auth_xform { 405 enum rte_crypto_auth_operation op; 406 /**< Authentication operation type */ 407 enum rte_crypto_auth_algorithm algo; 408 /**< Authentication algorithm selection */ 409 410 struct { 411 const uint8_t *data; /**< pointer to key data */ 412 uint16_t length; /**< key length in bytes */ 413 } key; 414 /**< Authentication key data. 415 * The authentication key length MUST be less than or equal to the 416 * block size of the algorithm. It is the callers responsibility to 417 * ensure that the key length is compliant with the standard being used 418 * (for example RFC 2104, FIPS 198a). 419 */ 420 421 struct { 422 uint16_t offset; 423 /**< Starting point for Initialisation Vector or Counter, 424 * specified as number of bytes from start of crypto 425 * operation (rte_crypto_op). 426 * 427 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode 428 * this is the authentication Initialisation Vector 429 * (IV) value. For AES-GMAC IV description please refer 430 * to the field `length` in iv struct. 431 * 432 * - For KASUMI in F9 mode and other authentication 433 * algorithms, this field is not used. 434 * 435 * For optimum performance, the data pointed to SHOULD 436 * be 8-byte aligned. 437 */ 438 uint16_t length; 439 /**< Length of valid IV data. 440 * 441 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and 442 * for AES-GMAC, this is the length of the IV. 443 * 444 * - For KASUMI in F9 mode and other authentication 445 * algorithms, this field is not used. 446 * 447 * - For GMAC mode, this is either: 448 * 1) Number greater or equal to one, which means that IV 449 * is used and J0 will be computed internally, a minimum 450 * of 16 bytes must be allocated. 451 * 2) Zero, in which case data points to J0. In this case 452 * 16 bytes of J0 should be passed where J0 is defined 453 * by NIST SP800-38D. 454 * 455 */ 456 } iv; /**< Initialisation vector parameters */ 457 458 uint16_t digest_length; 459 /**< Length of the digest to be returned. If the verify option is set, 460 * this specifies the length of the digest to be compared for the 461 * session. 462 * 463 * It is the caller's responsibility to ensure that the 464 * digest length is compliant with the hash algorithm being used. 465 * If the value is less than the maximum length allowed by the hash, 466 * the result shall be truncated. 467 */ 468 }; 469 470 471 /** Symmetric AEAD Algorithms 472 * 473 * Note, to avoid ABI breakage across releases 474 * - LIST_END should not be added to this enum 475 * - the order of enums should not be changed 476 * - new algorithms should only be added to the end 477 */ 478 enum rte_crypto_aead_algorithm { 479 RTE_CRYPTO_AEAD_AES_CCM = 1, 480 /**< AES algorithm in CCM mode. */ 481 RTE_CRYPTO_AEAD_AES_GCM, 482 /**< AES algorithm in GCM mode. */ 483 RTE_CRYPTO_AEAD_CHACHA20_POLY1305 484 /**< Chacha20 cipher with poly1305 authenticator */ 485 }; 486 487 /** Symmetric AEAD Operations */ 488 enum rte_crypto_aead_operation { 489 RTE_CRYPTO_AEAD_OP_ENCRYPT, 490 /**< Encrypt and generate digest */ 491 RTE_CRYPTO_AEAD_OP_DECRYPT 492 /**< Verify digest and decrypt */ 493 }; 494 495 /** Authentication operation name strings */ 496 extern const char * 497 rte_crypto_aead_operation_strings[]; 498 499 struct rte_crypto_aead_xform { 500 enum rte_crypto_aead_operation op; 501 /**< AEAD operation type */ 502 enum rte_crypto_aead_algorithm algo; 503 /**< AEAD algorithm selection */ 504 505 struct { 506 const uint8_t *data; /**< pointer to key data */ 507 uint16_t length; /**< key length in bytes */ 508 } key; 509 510 struct { 511 uint16_t offset; 512 /**< Starting point for Initialisation Vector or Counter, 513 * specified as number of bytes from start of crypto 514 * operation (rte_crypto_op). 515 * 516 * - For CCM mode, the first byte is reserved, and the 517 * nonce should be written starting at &iv[1] (to allow 518 * space for the implementation to write in the flags 519 * in the first byte). Note that a full 16 bytes should 520 * be allocated, even though the length field will 521 * have a value less than this. 522 * 523 * - For Chacha20-Poly1305 it is 96-bit nonce. 524 * PMD sets initial counter for Poly1305 key generation 525 * part to 0 and for Chacha20 encryption to 1 as per 526 * rfc8439 2.8. AEAD construction. 527 * 528 * For optimum performance, the data pointed to SHOULD 529 * be 8-byte aligned. 530 */ 531 uint16_t length; 532 /**< Length of valid IV data. 533 * 534 * - For GCM mode, this is either: 535 * 1) Number greater or equal to one, which means that IV 536 * is used and J0 will be computed internally, a minimum 537 * of 16 bytes must be allocated. 538 * 2) Zero, in which case data points to J0. In this case 539 * 16 bytes of J0 should be passed where J0 is defined 540 * by NIST SP800-38D. 541 * 542 * - For CCM mode, this is the length of the nonce, 543 * which can be in the range 7 to 13 inclusive. 544 * 545 * - For Chacha20-Poly1305 this field is always 12. 546 */ 547 } iv; /**< Initialisation vector parameters */ 548 549 uint16_t digest_length; 550 551 uint16_t aad_length; 552 /**< The length of the additional authenticated data (AAD) in bytes. 553 * For CCM mode, this is the length of the actual AAD, even though 554 * it is required to reserve 18 bytes before the AAD and padding 555 * at the end of it, so a multiple of 16 bytes is allocated. 556 */ 557 }; 558 559 /** Crypto transformation types */ 560 enum rte_crypto_sym_xform_type { 561 RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */ 562 RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */ 563 RTE_CRYPTO_SYM_XFORM_CIPHER, /**< Cipher xform */ 564 RTE_CRYPTO_SYM_XFORM_AEAD /**< AEAD xform */ 565 }; 566 567 /** 568 * Symmetric crypto transform structure. 569 * 570 * This is used to specify the crypto transforms required, multiple transforms 571 * can be chained together to specify a chain transforms such as authentication 572 * then cipher, or cipher then authentication. Each transform structure can 573 * hold a single transform, the type field is used to specify which transform 574 * is contained within the union 575 */ 576 /* Structure rte_crypto_sym_xform 8< */ 577 struct rte_crypto_sym_xform { 578 struct rte_crypto_sym_xform *next; 579 /**< next xform in chain */ 580 enum rte_crypto_sym_xform_type type 581 ; /**< xform type */ 582 union { 583 struct rte_crypto_auth_xform auth; 584 /**< Authentication / hash xform */ 585 struct rte_crypto_cipher_xform cipher; 586 /**< Cipher xform */ 587 struct rte_crypto_aead_xform aead; 588 /**< AEAD xform */ 589 }; 590 }; 591 /* >8 End of structure rte_crypto_sym_xform. */ 592 593 /** 594 * Symmetric Cryptographic Operation. 595 * 596 * This structure contains data relating to performing symmetric cryptographic 597 * processing on a referenced mbuf data buffer. 598 * 599 * When a symmetric crypto operation is enqueued with the device for processing 600 * it must have a valid *rte_mbuf* structure attached, via m_src parameter, 601 * which contains the source data which the crypto operation is to be performed 602 * on. 603 * While the mbuf is in use by a crypto operation no part of the mbuf should be 604 * changed by the application as the device may read or write to any part of the 605 * mbuf. In the case of hardware crypto devices some or all of the mbuf 606 * may be DMAed in and out of the device, so writing over the original data, 607 * though only the part specified by the rte_crypto_sym_op for transformation 608 * will be changed. 609 * Out-of-place (OOP) operation, where the source mbuf is different to the 610 * destination mbuf, is a special case. Data will be copied from m_src to m_dst. 611 * The part copied includes all the parts of the source mbuf that will be 612 * operated on, based on the cipher.data.offset+cipher.data.length and 613 * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part 614 * indicated by the cipher parameters will be transformed, any extra data around 615 * this indicated by the auth parameters will be copied unchanged from source to 616 * destination mbuf. 617 * Also in OOP operation the cipher.data.offset and auth.data.offset apply to 618 * both source and destination mbufs. As these offsets are relative to the 619 * data_off parameter in each mbuf this can result in the data written to the 620 * destination buffer being at a different alignment, relative to buffer start, 621 * to the data in the source buffer. 622 */ 623 /* Structure rte_crypto_sym_op 8< */ 624 struct rte_crypto_sym_op { 625 struct rte_mbuf *m_src; /**< source mbuf */ 626 struct rte_mbuf *m_dst; /**< destination mbuf */ 627 628 union { 629 void *session; 630 /**< Handle for the initialised crypto/security session context */ 631 struct rte_crypto_sym_xform *xform; 632 /**< Session-less API crypto operation parameters */ 633 }; 634 635 union { 636 struct { 637 struct { 638 uint32_t offset; 639 /**< Starting point for AEAD processing, specified as 640 * number of bytes from start of packet in source 641 * buffer. 642 */ 643 uint32_t length; 644 /**< The message length, in bytes, of the source buffer 645 * on which the cryptographic operation will be 646 * computed. This must be a multiple of the block size 647 */ 648 } data; /**< Data offsets and length for AEAD */ 649 struct { 650 uint8_t *data; 651 /**< This points to the location where the digest result 652 * should be inserted (in the case of digest generation) 653 * or where the purported digest exists (in the case of 654 * digest verification). 655 * 656 * At session creation time, the client specified the 657 * digest result length with the digest_length member 658 * of the @ref rte_crypto_auth_xform structure. For 659 * physical crypto devices the caller must allocate at 660 * least digest_length of physically contiguous memory 661 * at this location. 662 * 663 * For digest generation, the digest result will 664 * overwrite any data at this location. 665 * 666 * @note 667 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for 668 * "digest result" read "authentication tag T". 669 */ 670 rte_iova_t phys_addr; 671 /**< Physical address of digest */ 672 } digest; /**< Digest parameters */ 673 struct { 674 uint8_t *data; 675 /**< Pointer to Additional Authenticated Data (AAD) 676 * needed for authenticated cipher mechanisms (CCM and 677 * GCM) 678 * 679 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM), 680 * the caller should setup this field as follows: 681 * 682 * - the additional authentication data itself should 683 * be written starting at an offset of 18 bytes into 684 * the array, leaving room for the first block (16 bytes) 685 * and the length encoding in the first two bytes of the 686 * second block. 687 * 688 * - the array should be big enough to hold the above 689 * fields, plus any padding to round this up to the 690 * nearest multiple of the block size (16 bytes). 691 * Padding will be added by the implementation. 692 * 693 * - Note that PMDs may modify the memory reserved 694 * (first 18 bytes and the final padding). 695 * 696 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the 697 * caller should setup this field as follows: 698 * 699 * - the AAD is written in starting at byte 0 700 * - the array must be big enough to hold the AAD, plus 701 * any space to round this up to the nearest multiple 702 * of the block size (16 bytes). 703 * 704 */ 705 rte_iova_t phys_addr; /**< physical address */ 706 } aad; 707 /**< Additional authentication parameters */ 708 } aead; 709 710 struct { 711 struct { 712 struct { 713 uint32_t offset; 714 /**< Starting point for cipher processing, 715 * specified as number of bytes from start 716 * of data in the source buffer. 717 * The result of the cipher operation will be 718 * written back into the output buffer 719 * starting at this location. 720 * 721 * @note 722 * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2, 723 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 724 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, 725 * this field should be in bits. For 726 * digest-encrypted cases this must be 727 * an 8-bit multiple. 728 */ 729 uint32_t length; 730 /**< The message length, in bytes, of the 731 * source buffer on which the cryptographic 732 * operation will be computed. 733 * This is also the same as the result length. 734 * This must be a multiple of the block size 735 * or a multiple of data-unit length 736 * as described in xform. 737 * 738 * @note 739 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2, 740 * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8 741 * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3, 742 * this field should be in bits. For 743 * digest-encrypted cases this must be 744 * an 8-bit multiple. 745 */ 746 } data; /**< Data offsets and length for ciphering */ 747 } cipher; 748 749 struct { 750 struct { 751 uint32_t offset; 752 /**< Starting point for hash processing, 753 * specified as number of bytes from start of 754 * packet in source buffer. 755 * 756 * @note 757 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, 758 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 759 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, 760 * this field should be in bits. For 761 * digest-encrypted cases this must be 762 * an 8-bit multiple. 763 * 764 * @note 765 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, 766 * this offset should be such that 767 * data to authenticate starts at COUNT. 768 * 769 * @note 770 * For DOCSIS security protocol, this 771 * offset is the DOCSIS header length 772 * and, therefore, also the CRC offset 773 * i.e. the number of bytes into the 774 * packet at which CRC calculation 775 * should begin. 776 */ 777 uint32_t length; 778 /**< The message length, in bytes, of the source 779 * buffer that the hash will be computed on. 780 * 781 * @note 782 * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2, 783 * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9 784 * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3, 785 * this field should be in bits. For 786 * digest-encrypted cases this must be 787 * an 8-bit multiple. 788 * 789 * @note 790 * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9, 791 * the length should include the COUNT, 792 * FRESH, message, direction bit and padding 793 * (to be multiple of 8 bits). 794 * 795 * @note 796 * For DOCSIS security protocol, this 797 * is the CRC length i.e. the number of 798 * bytes in the packet over which the 799 * CRC should be calculated 800 */ 801 } data; 802 /**< Data offsets and length for authentication */ 803 804 struct { 805 uint8_t *data; 806 /**< This points to the location where 807 * the digest result should be inserted 808 * (in the case of digest generation) 809 * or where the purported digest exists 810 * (in the case of digest verification). 811 * 812 * At session creation time, the client 813 * specified the digest result length with 814 * the digest_length member of the 815 * @ref rte_crypto_auth_xform structure. 816 * For physical crypto devices the caller 817 * must allocate at least digest_length of 818 * physically contiguous memory at this 819 * location. 820 * 821 * For digest generation, the digest result 822 * will overwrite any data at this location. 823 * 824 * @note 825 * Digest-encrypted case. 826 * Digest can be generated, appended to 827 * the end of raw data and encrypted 828 * together using chained digest 829 * generation 830 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE) 831 * and encryption 832 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT) 833 * xforms. Similarly, authentication 834 * of the raw data against appended, 835 * decrypted digest, can be performed 836 * using decryption 837 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT) 838 * and digest verification 839 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY) 840 * chained xforms. 841 * To perform those operations, a few 842 * additional conditions must be met: 843 * - caller must allocate at least 844 * digest_length of memory at the end of 845 * source and (in case of out-of-place 846 * operations) destination buffer; those 847 * buffers can be linear or split using 848 * scatter-gather lists, 849 * - digest data pointer must point to 850 * the end of source or (in case of 851 * out-of-place operations) destination 852 * data, which is pointer to the 853 * data buffer + auth.data.offset + 854 * auth.data.length, 855 * - cipher.data.offset + 856 * cipher.data.length must be greater 857 * than auth.data.offset + 858 * auth.data.length and is typically 859 * equal to auth.data.offset + 860 * auth.data.length + digest_length. 861 * - for wireless algorithms, i.e. 862 * SNOW 3G, KASUMI and ZUC, as the 863 * cipher.data.length, 864 * cipher.data.offset, 865 * auth.data.length and 866 * auth.data.offset are in bits, they 867 * must be 8-bit multiples. 868 * 869 * Note, that for security reasons, it 870 * is PMDs' responsibility to not 871 * leave an unencrypted digest in any 872 * buffer after performing auth-cipher 873 * operations. 874 * 875 */ 876 rte_iova_t phys_addr; 877 /**< Physical address of digest */ 878 } digest; /**< Digest parameters */ 879 } auth; 880 }; 881 }; 882 }; 883 /* >8 End of structure rte_crypto_sym_op. */ 884 885 886 /** 887 * Reset the fields of a symmetric operation to their default values. 888 * 889 * @param op The crypto operation to be reset. 890 */ 891 static inline void 892 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op) 893 { 894 memset(op, 0, sizeof(*op)); 895 } 896 897 898 /** 899 * Allocate space for symmetric crypto xforms in the private data space of the 900 * crypto operation. This also defaults the crypto xform type to 901 * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms 902 * in the crypto operation 903 * 904 * @return 905 * - On success returns pointer to first crypto xform in crypto operations chain 906 * - On failure returns NULL 907 */ 908 static inline struct rte_crypto_sym_xform * 909 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op, 910 void *priv_data, uint8_t nb_xforms) 911 { 912 struct rte_crypto_sym_xform *xform; 913 914 sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data; 915 916 do { 917 xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED; 918 xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL; 919 } while (xform); 920 921 return sym_op->xform; 922 } 923 924 925 /** 926 * Attach a session to a symmetric crypto operation 927 * 928 * @param sym_op crypto operation 929 * @param sess cryptodev session 930 */ 931 static inline int 932 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, void *sess) 933 { 934 sym_op->session = sess; 935 936 return 0; 937 } 938 939 /** 940 * Converts portion of mbuf data into a vector representation. 941 * Each segment will be represented as a separate entry in *vec* array. 942 * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*. 943 * @param mb 944 * Pointer to the *rte_mbuf* object. 945 * @param ofs 946 * Offset within mbuf data to start with. 947 * @param len 948 * Length of data to represent. 949 * @param vec 950 * Pointer to an output array of IO vectors. 951 * @param num 952 * Size of an output array. 953 * @return 954 * - number of successfully filled entries in *vec* array. 955 * - negative number of elements in *vec* array required. 956 */ 957 __rte_experimental 958 static inline int 959 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len, 960 struct rte_crypto_vec vec[], uint32_t num) 961 { 962 uint32_t i; 963 struct rte_mbuf *nseg; 964 uint32_t left; 965 uint32_t seglen; 966 967 /* assuming that requested data starts in the first segment */ 968 RTE_ASSERT(mb->data_len > ofs); 969 970 if (mb->nb_segs > num) 971 return -mb->nb_segs; 972 973 vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs); 974 vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs); 975 vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs; 976 977 /* whole data lies in the first segment */ 978 seglen = mb->data_len - ofs; 979 if (len <= seglen) { 980 vec[0].len = len; 981 return 1; 982 } 983 984 /* data spread across segments */ 985 vec[0].len = seglen; 986 left = len - seglen; 987 for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) { 988 989 vec[i].base = rte_pktmbuf_mtod(nseg, void *); 990 vec[i].iova = rte_pktmbuf_iova(nseg); 991 vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs; 992 993 seglen = nseg->data_len; 994 if (left <= seglen) { 995 /* whole requested data is completed */ 996 vec[i].len = left; 997 left = 0; 998 i++; 999 break; 1000 } 1001 1002 /* use whole segment */ 1003 vec[i].len = seglen; 1004 left -= seglen; 1005 } 1006 1007 RTE_ASSERT(left == 0); 1008 return i; 1009 } 1010 1011 1012 #ifdef __cplusplus 1013 } 1014 #endif 1015 1016 #endif /* _RTE_CRYPTO_SYM_H_ */ 1017