xref: /dpdk/lib/cryptodev/rte_crypto_sym.h (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
7 
8 /**
9  * @file rte_crypto_sym.h
10  *
11  * RTE Definitions for Symmetric Cryptography
12  *
13  * Defines symmetric cipher and authentication algorithms and modes, as well
14  * as supported symmetric crypto operation combinations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <string.h>
22 
23 #include <rte_compat.h>
24 #include <rte_mbuf.h>
25 #include <rte_memory.h>
26 #include <rte_mempool.h>
27 #include <rte_common.h>
28 
29 /**
30  * Crypto IO Vector (in analogy with struct iovec)
31  * Supposed be used to pass input/output data buffers for crypto data-path
32  * functions.
33  */
34 struct rte_crypto_vec {
35 	/** virtual address of the data buffer */
36 	void *base;
37 	/** IOVA of the data buffer */
38 	rte_iova_t iova;
39 	/** length of the data buffer */
40 	uint32_t len;
41 	/** total buffer length */
42 	uint32_t tot_len;
43 };
44 
45 /**
46  * Crypto scatter-gather list descriptor. Consists of a pointer to an array
47  * of Crypto IO vectors with its size.
48  */
49 struct rte_crypto_sgl {
50 	/** start of an array of vectors */
51 	struct rte_crypto_vec *vec;
52 	/** size of an array of vectors */
53 	uint32_t num;
54 };
55 
56 /**
57  * Crypto virtual and IOVA address descriptor, used to describe cryptographic
58  * data buffer without the length information. The length information is
59  * normally predefined during session creation.
60  */
61 struct rte_crypto_va_iova_ptr {
62 	void *va;
63 	rte_iova_t iova;
64 };
65 
66 /**
67  * Raw data operation descriptor.
68  * Supposed to be used with synchronous CPU crypto API call or asynchronous
69  * RAW data path API call.
70  */
71 struct rte_crypto_sym_vec {
72 	/** number of operations to perform */
73 	uint32_t num;
74 	/** array of SGL vectors */
75 	struct rte_crypto_sgl *src_sgl;
76 	/** array of SGL vectors for OOP, keep it NULL for inplace*/
77 	struct rte_crypto_sgl *dest_sgl;
78 	/** array of pointers to cipher IV */
79 	struct rte_crypto_va_iova_ptr *iv;
80 	/** array of pointers to digest */
81 	struct rte_crypto_va_iova_ptr *digest;
82 
83 	__extension__
84 	union {
85 		/** array of pointers to auth IV, used for chain operation */
86 		struct rte_crypto_va_iova_ptr *auth_iv;
87 		/** array of pointers to AAD, used for AEAD operation */
88 		struct rte_crypto_va_iova_ptr *aad;
89 	};
90 
91 	/**
92 	 * array of statuses for each operation:
93 	 * - 0 on success
94 	 * - errno on error
95 	 */
96 	int32_t *status;
97 };
98 
99 /**
100  * used for cpu_crypto_process_bulk() to specify head/tail offsets
101  * for auth/cipher processing.
102  */
103 union rte_crypto_sym_ofs {
104 	uint64_t raw;
105 	struct {
106 		struct {
107 			uint16_t head;
108 			uint16_t tail;
109 		} auth, cipher;
110 	} ofs;
111 };
112 
113 /** Symmetric Cipher Algorithms
114  *
115  * Note, to avoid ABI breakage across releases
116  * - LIST_END should not be added to this enum
117  * - the order of enums should not be changed
118  * - new algorithms should only be added to the end
119  */
120 enum rte_crypto_cipher_algorithm {
121 	RTE_CRYPTO_CIPHER_NULL = 1,
122 	/**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
123 
124 	RTE_CRYPTO_CIPHER_3DES_CBC,
125 	/**< Triple DES algorithm in CBC mode */
126 	RTE_CRYPTO_CIPHER_3DES_CTR,
127 	/**< Triple DES algorithm in CTR mode */
128 	RTE_CRYPTO_CIPHER_3DES_ECB,
129 	/**< Triple DES algorithm in ECB mode */
130 
131 	RTE_CRYPTO_CIPHER_AES_CBC,
132 	/**< AES algorithm in CBC mode */
133 	RTE_CRYPTO_CIPHER_AES_CTR,
134 	/**< AES algorithm in Counter mode */
135 	RTE_CRYPTO_CIPHER_AES_ECB,
136 	/**< AES algorithm in ECB mode */
137 	RTE_CRYPTO_CIPHER_AES_F8,
138 	/**< AES algorithm in F8 mode */
139 	RTE_CRYPTO_CIPHER_AES_XTS,
140 	/**< AES algorithm in XTS mode */
141 
142 	RTE_CRYPTO_CIPHER_ARC4,
143 	/**< (A)RC4 cipher algorithm */
144 
145 	RTE_CRYPTO_CIPHER_KASUMI_F8,
146 	/**< KASUMI algorithm in F8 mode */
147 
148 	RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
149 	/**< SNOW 3G algorithm in UEA2 mode */
150 
151 	RTE_CRYPTO_CIPHER_ZUC_EEA3,
152 	/**< ZUC algorithm in EEA3 mode */
153 
154 	RTE_CRYPTO_CIPHER_DES_CBC,
155 	/**< DES algorithm in CBC mode */
156 
157 	RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
158 	/**< AES algorithm using modes required by
159 	 * DOCSIS Baseline Privacy Plus Spec.
160 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
161 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
162 	 */
163 
164 	RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
165 	/**< DES algorithm using modes required by
166 	 * DOCSIS Baseline Privacy Plus Spec.
167 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
168 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
169 	 */
170 
171 	RTE_CRYPTO_CIPHER_SM4_ECB,
172 	/**< ShangMi 4 (SM4) algorithm in ECB mode */
173 	RTE_CRYPTO_CIPHER_SM4_CBC,
174 	/**< ShangMi 4 (SM4) algorithm in CBC mode */
175 	RTE_CRYPTO_CIPHER_SM4_CTR
176 	/**< ShangMi 4 (SM4) algorithm in CTR mode */
177 };
178 
179 /** Cipher algorithm name strings */
180 __rte_deprecated
181 extern const char *
182 rte_crypto_cipher_algorithm_strings[];
183 
184 /** Symmetric Cipher Direction */
185 enum rte_crypto_cipher_operation {
186 	RTE_CRYPTO_CIPHER_OP_ENCRYPT,
187 	/**< Encrypt cipher operation */
188 	RTE_CRYPTO_CIPHER_OP_DECRYPT
189 	/**< Decrypt cipher operation */
190 };
191 
192 /** Cipher operation name strings */
193 extern const char *
194 rte_crypto_cipher_operation_strings[];
195 
196 /**
197  * Symmetric Cipher Setup Data.
198  *
199  * This structure contains data relating to Cipher (Encryption and Decryption)
200  *  use to create a session.
201  */
202 struct rte_crypto_cipher_xform {
203 	enum rte_crypto_cipher_operation op;
204 	/**< This parameter determines if the cipher operation is an encrypt or
205 	 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
206 	 * only encrypt operations are valid.
207 	 */
208 	enum rte_crypto_cipher_algorithm algo;
209 	/**< Cipher algorithm */
210 
211 	struct {
212 		const uint8_t *data;	/**< pointer to key data */
213 		uint16_t length;	/**< key length in bytes */
214 	} key;
215 	/**< Cipher key
216 	 *
217 	 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the
218 	 * original key data provided may be wrapped(encrypted) using key wrap
219 	 * algorithm such as AES key wrap (rfc3394) and hence length of the key
220 	 * may increase beyond the PMD advertised supported key size.
221 	 * PMD shall validate the key length and report EMSGSIZE error while
222 	 * configuring the session and application can skip checking the
223 	 * capability key length in such cases.
224 	 *
225 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
226 	 * point to a concatenation of the AES encryption key followed by a
227 	 * keymask. As per RFC3711, the keymask should be padded with trailing
228 	 * bytes to match the length of the encryption key used.
229 	 *
230 	 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
231 	 * 192 bits (24 bytes) or 256 bits (32 bytes).
232 	 *
233 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
234 	 * should be set to the combined length of the encryption key and the
235 	 * keymask. Since the keymask and the encryption key are the same size,
236 	 * key.length should be set to 2 x the AES encryption key length.
237 	 *
238 	 * For the AES-XTS mode of operation:
239 	 *  - Two keys must be provided and key.length refers to total length of
240 	 *    the two keys.
241 	 *  - key.data must point to the two keys concatenated together
242 	 *    (key1 || key2).
243 	 *  - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
244 	 *  - Both keys must have the same size.
245 	 **/
246 	struct {
247 		uint16_t offset;
248 		/**< Starting point for Initialisation Vector or Counter,
249 		 * specified as number of bytes from start of crypto
250 		 * operation (rte_crypto_op).
251 		 *
252 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
253 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
254 		 * Initialisation Vector (IV) value.
255 		 *
256 		 * - For block ciphers in CTR mode, this is the counter.
257 		 *
258 		 * - For CCM mode, the first byte is reserved, and the
259 		 * nonce should be written starting at &iv[1] (to allow
260 		 * space for the implementation to write in the flags
261 		 * in the first byte). Note that a full 16 bytes should
262 		 * be allocated, even though the length field will
263 		 * have a value less than this. Note that the PMDs may
264 		 * modify the memory reserved (the first byte and the
265 		 * final padding)
266 		 *
267 		 * - For AES-XTS, this is the 128bit tweak, i, from
268 		 * IEEE Std 1619-2007.
269 		 *
270 		 * For optimum performance, the data pointed to SHOULD
271 		 * be 8-byte aligned.
272 		 */
273 		uint16_t length;
274 		/**< Length of valid IV data.
275 		 *
276 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
277 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
278 		 * length of the IV (which must be the same as the
279 		 * block length of the cipher).
280 		 *
281 		 * - For block ciphers in CTR mode, this is the length
282 		 * of the counter (which must be the same as the block
283 		 * length of the cipher).
284 		 *
285 		 * - For CCM mode, this is the length of the nonce,
286 		 * which can be in the range 7 to 13 inclusive.
287 		 */
288 	} iv;	/**< Initialisation vector parameters */
289 
290 	uint32_t dataunit_len;
291 	/**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled,
292 	 * this is the data-unit length of the algorithm,
293 	 * otherwise or when the value is 0, use the operation length.
294 	 * The value should be in the range defined by the dataunit_set field
295 	 * in the cipher capability.
296 	 *
297 	 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007.
298 	 * For-each data-unit in the operation, the tweak (IV) value is
299 	 * assigned consecutively starting from the operation assigned IV.
300 	 */
301 };
302 
303 /** Symmetric Authentication / Hash Algorithms
304  *
305  * Note, to avoid ABI breakage across releases
306  * - LIST_END should not be added to this enum
307  * - the order of enums should not be changed
308  * - new algorithms should only be added to the end
309  */
310 enum rte_crypto_auth_algorithm {
311 	RTE_CRYPTO_AUTH_NULL = 1,
312 	/**< NULL hash algorithm. */
313 
314 	RTE_CRYPTO_AUTH_AES_CBC_MAC,
315 	/**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
316 	RTE_CRYPTO_AUTH_AES_CMAC,
317 	/**< AES CMAC algorithm. */
318 	RTE_CRYPTO_AUTH_AES_GMAC,
319 	/**< AES GMAC algorithm. */
320 	RTE_CRYPTO_AUTH_AES_XCBC_MAC,
321 	/**< AES XCBC algorithm. */
322 
323 	RTE_CRYPTO_AUTH_KASUMI_F9,
324 	/**< KASUMI algorithm in F9 mode. */
325 
326 	RTE_CRYPTO_AUTH_MD5,
327 	/**< MD5 algorithm */
328 	RTE_CRYPTO_AUTH_MD5_HMAC,
329 	/**< HMAC using MD5 algorithm */
330 
331 	RTE_CRYPTO_AUTH_SHA1,
332 	/**< 160 bit SHA algorithm. */
333 	RTE_CRYPTO_AUTH_SHA1_HMAC,
334 	/**< HMAC using 160 bit SHA algorithm.
335 	 * HMAC-SHA-1-96 can be generated by setting
336 	 * digest_length to 12 bytes in auth/aead xforms.
337 	 */
338 	RTE_CRYPTO_AUTH_SHA224,
339 	/**< 224 bit SHA algorithm. */
340 	RTE_CRYPTO_AUTH_SHA224_HMAC,
341 	/**< HMAC using 224 bit SHA algorithm. */
342 	RTE_CRYPTO_AUTH_SHA256,
343 	/**< 256 bit SHA algorithm. */
344 	RTE_CRYPTO_AUTH_SHA256_HMAC,
345 	/**< HMAC using 256 bit SHA algorithm. */
346 	RTE_CRYPTO_AUTH_SHA384,
347 	/**< 384 bit SHA algorithm. */
348 	RTE_CRYPTO_AUTH_SHA384_HMAC,
349 	/**< HMAC using 384 bit SHA algorithm. */
350 	RTE_CRYPTO_AUTH_SHA512,
351 	/**< 512 bit SHA algorithm. */
352 	RTE_CRYPTO_AUTH_SHA512_HMAC,
353 	/**< HMAC using 512 bit SHA algorithm. */
354 
355 	RTE_CRYPTO_AUTH_SNOW3G_UIA2,
356 	/**< SNOW 3G algorithm in UIA2 mode. */
357 
358 	RTE_CRYPTO_AUTH_ZUC_EIA3,
359 	/**< ZUC algorithm in EIA3 mode */
360 
361 	RTE_CRYPTO_AUTH_SHA3_224,
362 	/**< 224 bit SHA3 algorithm. */
363 	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
364 	/**< HMAC using 224 bit SHA3 algorithm. */
365 	RTE_CRYPTO_AUTH_SHA3_256,
366 	/**< 256 bit SHA3 algorithm. */
367 	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
368 	/**< HMAC using 256 bit SHA3 algorithm. */
369 	RTE_CRYPTO_AUTH_SHA3_384,
370 	/**< 384 bit SHA3 algorithm. */
371 	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
372 	/**< HMAC using 384 bit SHA3 algorithm. */
373 	RTE_CRYPTO_AUTH_SHA3_512,
374 	/**< 512 bit SHA3 algorithm. */
375 	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
376 	/**< HMAC using 512 bit SHA3 algorithm. */
377 	RTE_CRYPTO_AUTH_SM3,
378 	/**< ShangMi 3 (SM3) algorithm */
379 
380 	RTE_CRYPTO_AUTH_SHAKE_128,
381 	/**< 128 bit SHAKE algorithm. */
382 	RTE_CRYPTO_AUTH_SHAKE_256,
383 	/**< 256 bit SHAKE algorithm. */
384 };
385 
386 /** Authentication algorithm name strings */
387 __rte_deprecated
388 extern const char *
389 rte_crypto_auth_algorithm_strings[];
390 
391 /** Symmetric Authentication / Hash Operations */
392 enum rte_crypto_auth_operation {
393 	RTE_CRYPTO_AUTH_OP_VERIFY,	/**< Verify authentication digest */
394 	RTE_CRYPTO_AUTH_OP_GENERATE	/**< Generate authentication digest */
395 };
396 
397 /** Authentication operation name strings */
398 extern const char *
399 rte_crypto_auth_operation_strings[];
400 
401 /**
402  * Authentication / Hash transform data.
403  *
404  * This structure contains data relating to an authentication/hash crypto
405  * transforms. The fields op, algo and digest_length are common to all
406  * authentication transforms and MUST be set.
407  */
408 struct rte_crypto_auth_xform {
409 	enum rte_crypto_auth_operation op;
410 	/**< Authentication operation type */
411 	enum rte_crypto_auth_algorithm algo;
412 	/**< Authentication algorithm selection */
413 
414 	struct {
415 		const uint8_t *data;	/**< pointer to key data */
416 		uint16_t length;	/**< key length in bytes */
417 	} key;
418 	/**< Authentication key data.
419 	 * The authentication key length MUST be less than or equal to the
420 	 * block size of the algorithm. It is the callers responsibility to
421 	 * ensure that the key length is compliant with the standard being used
422 	 * (for example RFC 2104, FIPS 198a).
423 	 */
424 
425 	struct {
426 		uint16_t offset;
427 		/**< Starting point for Initialisation Vector or Counter,
428 		 * specified as number of bytes from start of crypto
429 		 * operation (rte_crypto_op).
430 		 *
431 		 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
432 		 *   this is the authentication Initialisation Vector
433 		 *   (IV) value. For AES-GMAC IV description please refer
434 		 *   to the field `length` in iv struct.
435 		 *
436 		 * - For KASUMI in F9 mode and other authentication
437 		 *   algorithms, this field is not used.
438 		 *
439 		 * For optimum performance, the data pointed to SHOULD
440 		 * be 8-byte aligned.
441 		 */
442 		uint16_t length;
443 		/**< Length of valid IV data.
444 		 *
445 		 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
446 		 *   for AES-GMAC, this is the length of the IV.
447 		 *
448 		 * - For KASUMI in F9 mode and other authentication
449 		 *   algorithms, this field is not used.
450 		 *
451 		 * - For GMAC mode, this is either:
452 		 * 1) Number greater or equal to one, which means that IV
453 		 *    is used and J0 will be computed internally, a minimum
454 		 *    of 16 bytes must be allocated.
455 		 * 2) Zero, in which case data points to J0. In this case
456 		 *    16 bytes of J0 should be passed where J0 is defined
457 		 *    by NIST SP800-38D.
458 		 *
459 		 */
460 	} iv;	/**< Initialisation vector parameters */
461 
462 	uint16_t digest_length;
463 	/**< Length of the digest to be returned. If the verify option is set,
464 	 * this specifies the length of the digest to be compared for the
465 	 * session.
466 	 *
467 	 * It is the caller's responsibility to ensure that the
468 	 * digest length is compliant with the hash algorithm being used.
469 	 * If the value is less than the maximum length allowed by the hash,
470 	 * the result shall be truncated.
471 	 */
472 };
473 
474 
475 /** Symmetric AEAD Algorithms
476  *
477  * Note, to avoid ABI breakage across releases
478  * - LIST_END should not be added to this enum
479  * - the order of enums should not be changed
480  * - new algorithms should only be added to the end
481  */
482 enum rte_crypto_aead_algorithm {
483 	RTE_CRYPTO_AEAD_AES_CCM = 1,
484 	/**< AES algorithm in CCM mode. */
485 	RTE_CRYPTO_AEAD_AES_GCM,
486 	/**< AES algorithm in GCM mode. */
487 	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
488 	/**< Chacha20 cipher with poly1305 authenticator */
489 };
490 
491 /** AEAD algorithm name strings */
492 __rte_deprecated
493 extern const char *
494 rte_crypto_aead_algorithm_strings[];
495 
496 /** Symmetric AEAD Operations */
497 enum rte_crypto_aead_operation {
498 	RTE_CRYPTO_AEAD_OP_ENCRYPT,
499 	/**< Encrypt and generate digest */
500 	RTE_CRYPTO_AEAD_OP_DECRYPT
501 	/**< Verify digest and decrypt */
502 };
503 
504 /** Authentication operation name strings */
505 extern const char *
506 rte_crypto_aead_operation_strings[];
507 
508 struct rte_crypto_aead_xform {
509 	enum rte_crypto_aead_operation op;
510 	/**< AEAD operation type */
511 	enum rte_crypto_aead_algorithm algo;
512 	/**< AEAD algorithm selection */
513 
514 	struct {
515 		const uint8_t *data;	/**< pointer to key data */
516 		uint16_t length;	/**< key length in bytes */
517 	} key;
518 
519 	struct {
520 		uint16_t offset;
521 		/**< Starting point for Initialisation Vector or Counter,
522 		 * specified as number of bytes from start of crypto
523 		 * operation (rte_crypto_op).
524 		 *
525 		 * - For CCM mode, the first byte is reserved, and the
526 		 * nonce should be written starting at &iv[1] (to allow
527 		 * space for the implementation to write in the flags
528 		 * in the first byte). Note that a full 16 bytes should
529 		 * be allocated, even though the length field will
530 		 * have a value less than this.
531 		 *
532 		 * - For Chacha20-Poly1305 it is 96-bit nonce.
533 		 * PMD sets initial counter for Poly1305 key generation
534 		 * part to 0 and for Chacha20 encryption to 1 as per
535 		 * rfc8439 2.8. AEAD construction.
536 		 *
537 		 * For optimum performance, the data pointed to SHOULD
538 		 * be 8-byte aligned.
539 		 */
540 		uint16_t length;
541 		/**< Length of valid IV data.
542 		 *
543 		 * - For GCM mode, this is either:
544 		 * 1) Number greater or equal to one, which means that IV
545 		 *    is used and J0 will be computed internally, a minimum
546 		 *    of 16 bytes must be allocated.
547 		 * 2) Zero, in which case data points to J0. In this case
548 		 *    16 bytes of J0 should be passed where J0 is defined
549 		 *    by NIST SP800-38D.
550 		 *
551 		 * - For CCM mode, this is the length of the nonce,
552 		 * which can be in the range 7 to 13 inclusive.
553 		 *
554 		 * - For Chacha20-Poly1305 this field is always 12.
555 		 */
556 	} iv;	/**< Initialisation vector parameters */
557 
558 	uint16_t digest_length;
559 
560 	uint16_t aad_length;
561 	/**< The length of the additional authenticated data (AAD) in bytes.
562 	 * For CCM mode, this is the length of the actual AAD, even though
563 	 * it is required to reserve 18 bytes before the AAD and padding
564 	 * at the end of it, so a multiple of 16 bytes is allocated.
565 	 */
566 };
567 
568 /** Crypto transformation types */
569 enum rte_crypto_sym_xform_type {
570 	RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0,	/**< No xform specified */
571 	RTE_CRYPTO_SYM_XFORM_AUTH,		/**< Authentication xform */
572 	RTE_CRYPTO_SYM_XFORM_CIPHER,		/**< Cipher xform  */
573 	RTE_CRYPTO_SYM_XFORM_AEAD		/**< AEAD xform  */
574 };
575 
576 /**
577  * Symmetric crypto transform structure.
578  *
579  * This is used to specify the crypto transforms required, multiple transforms
580  * can be chained together to specify a chain transforms such as authentication
581  * then cipher, or cipher then authentication. Each transform structure can
582  * hold a single transform, the type field is used to specify which transform
583  * is contained within the union
584  */
585 struct rte_crypto_sym_xform {
586 	struct rte_crypto_sym_xform *next;
587 	/**< next xform in chain */
588 	enum rte_crypto_sym_xform_type type
589 	; /**< xform type */
590 	RTE_STD_C11
591 	union {
592 		struct rte_crypto_auth_xform auth;
593 		/**< Authentication / hash xform */
594 		struct rte_crypto_cipher_xform cipher;
595 		/**< Cipher xform */
596 		struct rte_crypto_aead_xform aead;
597 		/**< AEAD xform */
598 	};
599 };
600 
601 /**
602  * Symmetric Cryptographic Operation.
603  *
604  * This structure contains data relating to performing symmetric cryptographic
605  * processing on a referenced mbuf data buffer.
606  *
607  * When a symmetric crypto operation is enqueued with the device for processing
608  * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
609  * which contains the source data which the crypto operation is to be performed
610  * on.
611  * While the mbuf is in use by a crypto operation no part of the mbuf should be
612  * changed by the application as the device may read or write to any part of the
613  * mbuf. In the case of hardware crypto devices some or all of the mbuf
614  * may be DMAed in and out of the device, so writing over the original data,
615  * though only the part specified by the rte_crypto_sym_op for transformation
616  * will be changed.
617  * Out-of-place (OOP) operation, where the source mbuf is different to the
618  * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
619  * The part copied includes all the parts of the source mbuf that will be
620  * operated on, based on the cipher.data.offset+cipher.data.length and
621  * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
622  * indicated by the cipher parameters will be transformed, any extra data around
623  * this indicated by the auth parameters will be copied unchanged from source to
624  * destination mbuf.
625  * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
626  * both source and destination mbufs. As these offsets are relative to the
627  * data_off parameter in each mbuf this can result in the data written to the
628  * destination buffer being at a different alignment, relative to buffer start,
629  * to the data in the source buffer.
630  */
631 struct rte_crypto_sym_op {
632 	struct rte_mbuf *m_src;	/**< source mbuf */
633 	struct rte_mbuf *m_dst;	/**< destination mbuf */
634 
635 	RTE_STD_C11
636 	union {
637 		void *session;
638 		/**< Handle for the initialised crypto/security session context */
639 		struct rte_crypto_sym_xform *xform;
640 		/**< Session-less API crypto operation parameters */
641 	};
642 
643 	RTE_STD_C11
644 	union {
645 		struct {
646 			struct {
647 				uint32_t offset;
648 				 /**< Starting point for AEAD processing, specified as
649 				  * number of bytes from start of packet in source
650 				  * buffer.
651 				  */
652 				uint32_t length;
653 				 /**< The message length, in bytes, of the source buffer
654 				  * on which the cryptographic operation will be
655 				  * computed. This must be a multiple of the block size
656 				  */
657 			} data; /**< Data offsets and length for AEAD */
658 			struct {
659 				uint8_t *data;
660 				/**< This points to the location where the digest result
661 				 * should be inserted (in the case of digest generation)
662 				 * or where the purported digest exists (in the case of
663 				 * digest verification).
664 				 *
665 				 * At session creation time, the client specified the
666 				 * digest result length with the digest_length member
667 				 * of the @ref rte_crypto_auth_xform structure. For
668 				 * physical crypto devices the caller must allocate at
669 				 * least digest_length of physically contiguous memory
670 				 * at this location.
671 				 *
672 				 * For digest generation, the digest result will
673 				 * overwrite any data at this location.
674 				 *
675 				 * @note
676 				 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
677 				 * "digest result" read "authentication tag T".
678 				 */
679 				rte_iova_t phys_addr;
680 				/**< Physical address of digest */
681 			} digest; /**< Digest parameters */
682 			struct {
683 				uint8_t *data;
684 				/**< Pointer to Additional Authenticated Data (AAD)
685 				 * needed for authenticated cipher mechanisms (CCM and
686 				 * GCM)
687 				 *
688 				 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
689 				 * the caller should setup this field as follows:
690 				 *
691 				 * - the additional authentication data itself should
692 				 * be written starting at an offset of 18 bytes into
693 				 * the array, leaving room for the first block (16 bytes)
694 				 * and the length encoding in the first two bytes of the
695 				 * second block.
696 				 *
697 				 * - the array should be big enough to hold the above
698 				 * fields, plus any padding to round this up to the
699 				 * nearest multiple of the block size (16 bytes).
700 				 * Padding will be added by the implementation.
701 				 *
702 				 * - Note that PMDs may modify the memory reserved
703 				 * (first 18 bytes and the final padding).
704 				 *
705 				 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
706 				 * caller should setup this field as follows:
707 				 *
708 				 * - the AAD is written in starting at byte 0
709 				 * - the array must be big enough to hold the AAD, plus
710 				 * any space to round this up to the nearest multiple
711 				 * of the block size (16 bytes).
712 				 *
713 				 */
714 				rte_iova_t phys_addr;	/**< physical address */
715 			} aad;
716 			/**< Additional authentication parameters */
717 		} aead;
718 
719 		struct {
720 			struct {
721 				struct {
722 					uint32_t offset;
723 					 /**< Starting point for cipher processing,
724 					  * specified as number of bytes from start
725 					  * of data in the source buffer.
726 					  * The result of the cipher operation will be
727 					  * written back into the output buffer
728 					  * starting at this location.
729 					  *
730 					  * @note
731 					  * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
732 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
733 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
734 					  * this field should be in bits. For
735 					  * digest-encrypted cases this must be
736 					  * an 8-bit multiple.
737 					  */
738 					uint32_t length;
739 					 /**< The message length, in bytes, of the
740 					  * source buffer on which the cryptographic
741 					  * operation will be computed.
742 					  * This is also the same as the result length.
743 					  * This must be a multiple of the block size
744 					  * or a multiple of data-unit length
745 					  * as described in xform.
746 					  *
747 					  * @note
748 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
749 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
750 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
751 					  * this field should be in bits. For
752 					  * digest-encrypted cases this must be
753 					  * an 8-bit multiple.
754 					  */
755 				} data; /**< Data offsets and length for ciphering */
756 			} cipher;
757 
758 			struct {
759 				struct {
760 					uint32_t offset;
761 					 /**< Starting point for hash processing,
762 					  * specified as number of bytes from start of
763 					  * packet in source buffer.
764 					  *
765 					  * @note
766 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
767 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
768 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
769 					  * this field should be in bits. For
770 					  * digest-encrypted cases this must be
771 					  * an 8-bit multiple.
772 					  *
773 					  * @note
774 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
775 					  * this offset should be such that
776 					  * data to authenticate starts at COUNT.
777 					  *
778 					  * @note
779 					  * For DOCSIS security protocol, this
780 					  * offset is the DOCSIS header length
781 					  * and, therefore, also the CRC offset
782 					  * i.e. the number of bytes into the
783 					  * packet at which CRC calculation
784 					  * should begin.
785 					  */
786 					uint32_t length;
787 					 /**< The message length, in bytes, of the source
788 					  * buffer that the hash will be computed on.
789 					  *
790 					  * @note
791 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
792 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
793 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
794 					  * this field should be in bits. For
795 					  * digest-encrypted cases this must be
796 					  * an 8-bit multiple.
797 					  *
798 					  * @note
799 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
800 					  * the length should include the COUNT,
801 					  * FRESH, message, direction bit and padding
802 					  * (to be multiple of 8 bits).
803 					  *
804 					  * @note
805 					  * For DOCSIS security protocol, this
806 					  * is the CRC length i.e. the number of
807 					  * bytes in the packet over which the
808 					  * CRC should be calculated
809 					  */
810 				} data;
811 				/**< Data offsets and length for authentication */
812 
813 				struct {
814 					uint8_t *data;
815 					/**< This points to the location where
816 					 * the digest result should be inserted
817 					 * (in the case of digest generation)
818 					 * or where the purported digest exists
819 					 * (in the case of digest verification).
820 					 *
821 					 * At session creation time, the client
822 					 * specified the digest result length with
823 					 * the digest_length member of the
824 					 * @ref rte_crypto_auth_xform structure.
825 					 * For physical crypto devices the caller
826 					 * must allocate at least digest_length of
827 					 * physically contiguous memory at this
828 					 * location.
829 					 *
830 					 * For digest generation, the digest result
831 					 * will overwrite any data at this location.
832 					 *
833 					 * @note
834 					 * Digest-encrypted case.
835 					 * Digest can be generated, appended to
836 					 * the end of raw data and encrypted
837 					 * together using chained digest
838 					 * generation
839 					 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
840 					 * and encryption
841 					 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
842 					 * xforms. Similarly, authentication
843 					 * of the raw data against appended,
844 					 * decrypted digest, can be performed
845 					 * using decryption
846 					 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
847 					 * and digest verification
848 					 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
849 					 * chained xforms.
850 					 * To perform those operations, a few
851 					 * additional conditions must be met:
852 					 * - caller must allocate at least
853 					 * digest_length of memory at the end of
854 					 * source and (in case of out-of-place
855 					 * operations) destination buffer; those
856 					 * buffers can be linear or split using
857 					 * scatter-gather lists,
858 					 * - digest data pointer must point to
859 					 * the end of source or (in case of
860 					 * out-of-place operations) destination
861 					 * data, which is pointer to the
862 					 * data buffer + auth.data.offset +
863 					 * auth.data.length,
864 					 * - cipher.data.offset +
865 					 * cipher.data.length must be greater
866 					 * than auth.data.offset +
867 					 * auth.data.length and is typically
868 					 * equal to auth.data.offset +
869 					 * auth.data.length + digest_length.
870 					 * - for wireless algorithms, i.e.
871 					 * SNOW 3G, KASUMI and ZUC, as the
872 					 * cipher.data.length,
873 					 * cipher.data.offset,
874 					 * auth.data.length and
875 					 * auth.data.offset are in bits, they
876 					 * must be 8-bit multiples.
877 					 *
878 					 * Note, that for security reasons, it
879 					 * is PMDs' responsibility to not
880 					 * leave an unencrypted digest in any
881 					 * buffer after performing auth-cipher
882 					 * operations.
883 					 *
884 					 */
885 					rte_iova_t phys_addr;
886 					/**< Physical address of digest */
887 				} digest; /**< Digest parameters */
888 			} auth;
889 		};
890 	};
891 };
892 
893 
894 /**
895  * Reset the fields of a symmetric operation to their default values.
896  *
897  * @param	op	The crypto operation to be reset.
898  */
899 static inline void
900 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
901 {
902 	memset(op, 0, sizeof(*op));
903 }
904 
905 
906 /**
907  * Allocate space for symmetric crypto xforms in the private data space of the
908  * crypto operation. This also defaults the crypto xform type to
909  * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
910  * in the crypto operation
911  *
912  * @return
913  * - On success returns pointer to first crypto xform in crypto operations chain
914  * - On failure returns NULL
915  */
916 static inline struct rte_crypto_sym_xform *
917 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
918 		void *priv_data, uint8_t nb_xforms)
919 {
920 	struct rte_crypto_sym_xform *xform;
921 
922 	sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
923 
924 	do {
925 		xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
926 		xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
927 	} while (xform);
928 
929 	return sym_op->xform;
930 }
931 
932 
933 /**
934  * Attach a session to a symmetric crypto operation
935  *
936  * @param	sym_op	crypto operation
937  * @param	sess	cryptodev session
938  */
939 static inline int
940 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, void *sess)
941 {
942 	sym_op->session = sess;
943 
944 	return 0;
945 }
946 
947 /**
948  * Converts portion of mbuf data into a vector representation.
949  * Each segment will be represented as a separate entry in *vec* array.
950  * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
951  * @param mb
952  *   Pointer to the *rte_mbuf* object.
953  * @param ofs
954  *   Offset within mbuf data to start with.
955  * @param len
956  *   Length of data to represent.
957  * @param vec
958  *   Pointer to an output array of IO vectors.
959  * @param num
960  *   Size of an output array.
961  * @return
962  *   - number of successfully filled entries in *vec* array.
963  *   - negative number of elements in *vec* array required.
964  */
965 __rte_experimental
966 static inline int
967 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
968 	struct rte_crypto_vec vec[], uint32_t num)
969 {
970 	uint32_t i;
971 	struct rte_mbuf *nseg;
972 	uint32_t left;
973 	uint32_t seglen;
974 
975 	/* assuming that requested data starts in the first segment */
976 	RTE_ASSERT(mb->data_len > ofs);
977 
978 	if (mb->nb_segs > num)
979 		return -mb->nb_segs;
980 
981 	vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
982 	vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
983 	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
984 
985 	/* whole data lies in the first segment */
986 	seglen = mb->data_len - ofs;
987 	if (len <= seglen) {
988 		vec[0].len = len;
989 		return 1;
990 	}
991 
992 	/* data spread across segments */
993 	vec[0].len = seglen;
994 	left = len - seglen;
995 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
996 
997 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
998 		vec[i].iova = rte_pktmbuf_iova(nseg);
999 		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
1000 
1001 		seglen = nseg->data_len;
1002 		if (left <= seglen) {
1003 			/* whole requested data is completed */
1004 			vec[i].len = left;
1005 			left = 0;
1006 			i++;
1007 			break;
1008 		}
1009 
1010 		/* use whole segment */
1011 		vec[i].len = seglen;
1012 		left -= seglen;
1013 	}
1014 
1015 	RTE_ASSERT(left == 0);
1016 	return i;
1017 }
1018 
1019 
1020 #ifdef __cplusplus
1021 }
1022 #endif
1023 
1024 #endif /* _RTE_CRYPTO_SYM_H_ */
1025