xref: /dpdk/lib/cryptodev/rte_crypto_sym.h (revision c56185fc183fc0532d2f03aaf04bbf0989ea91a5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
7 
8 /**
9  * @file rte_crypto_sym.h
10  *
11  * RTE Definitions for Symmetric Cryptography
12  *
13  * Defines symmetric cipher and authentication algorithms and modes, as well
14  * as supported symmetric crypto operation combinations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <string.h>
22 
23 #include <rte_compat.h>
24 #include <rte_mbuf.h>
25 #include <rte_memory.h>
26 #include <rte_mempool.h>
27 #include <rte_common.h>
28 
29 /**
30  * Crypto IO Vector (in analogy with struct iovec)
31  * Supposed be used to pass input/output data buffers for crypto data-path
32  * functions.
33  */
34 struct rte_crypto_vec {
35 	/** virtual address of the data buffer */
36 	void *base;
37 	/** IOVA of the data buffer */
38 	rte_iova_t iova;
39 	/** length of the data buffer */
40 	uint32_t len;
41 	/** total buffer length */
42 	uint32_t tot_len;
43 };
44 
45 /**
46  * Crypto scatter-gather list descriptor. Consists of a pointer to an array
47  * of Crypto IO vectors with its size.
48  */
49 struct rte_crypto_sgl {
50 	/** start of an array of vectors */
51 	struct rte_crypto_vec *vec;
52 	/** size of an array of vectors */
53 	uint32_t num;
54 };
55 
56 /**
57  * Crypto virtual and IOVA address descriptor, used to describe cryptographic
58  * data buffer without the length information. The length information is
59  * normally predefined during session creation.
60  */
61 struct rte_crypto_va_iova_ptr {
62 	void *va;
63 	rte_iova_t iova;
64 };
65 
66 /**
67  * Raw data operation descriptor.
68  * Supposed to be used with synchronous CPU crypto API call or asynchronous
69  * RAW data path API call.
70  */
71 struct rte_crypto_sym_vec {
72 	/** number of operations to perform */
73 	uint32_t num;
74 	/** array of SGL vectors */
75 	struct rte_crypto_sgl *src_sgl;
76 	/** array of SGL vectors for OOP, keep it NULL for inplace*/
77 	struct rte_crypto_sgl *dest_sgl;
78 	/** array of pointers to cipher IV */
79 	struct rte_crypto_va_iova_ptr *iv;
80 	/** array of pointers to digest */
81 	struct rte_crypto_va_iova_ptr *digest;
82 
83 	__extension__
84 	union {
85 		/** array of pointers to auth IV, used for chain operation */
86 		struct rte_crypto_va_iova_ptr *auth_iv;
87 		/** array of pointers to AAD, used for AEAD operation */
88 		struct rte_crypto_va_iova_ptr *aad;
89 	};
90 
91 	/**
92 	 * array of statuses for each operation:
93 	 * - 0 on success
94 	 * - errno on error
95 	 */
96 	int32_t *status;
97 };
98 
99 /**
100  * used for cpu_crypto_process_bulk() to specify head/tail offsets
101  * for auth/cipher processing.
102  */
103 union rte_crypto_sym_ofs {
104 	uint64_t raw;
105 	struct {
106 		struct {
107 			uint16_t head;
108 			uint16_t tail;
109 		} auth, cipher;
110 	} ofs;
111 };
112 
113 /** Symmetric Cipher Algorithms
114  *
115  * Note, to avoid ABI breakage across releases
116  * - LIST_END should not be added to this enum
117  * - the order of enums should not be changed
118  * - new algorithms should only be added to the end
119  */
120 enum rte_crypto_cipher_algorithm {
121 	RTE_CRYPTO_CIPHER_NULL = 1,
122 	/**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
123 
124 	RTE_CRYPTO_CIPHER_3DES_CBC,
125 	/**< Triple DES algorithm in CBC mode */
126 	RTE_CRYPTO_CIPHER_3DES_CTR,
127 	/**< Triple DES algorithm in CTR mode */
128 	RTE_CRYPTO_CIPHER_3DES_ECB,
129 	/**< Triple DES algorithm in ECB mode */
130 
131 	RTE_CRYPTO_CIPHER_AES_CBC,
132 	/**< AES algorithm in CBC mode */
133 	RTE_CRYPTO_CIPHER_AES_CTR,
134 	/**< AES algorithm in Counter mode */
135 	RTE_CRYPTO_CIPHER_AES_ECB,
136 	/**< AES algorithm in ECB mode */
137 	RTE_CRYPTO_CIPHER_AES_F8,
138 	/**< AES algorithm in F8 mode */
139 	RTE_CRYPTO_CIPHER_AES_XTS,
140 	/**< AES algorithm in XTS mode */
141 
142 	RTE_CRYPTO_CIPHER_ARC4,
143 	/**< (A)RC4 cipher algorithm */
144 
145 	RTE_CRYPTO_CIPHER_KASUMI_F8,
146 	/**< KASUMI algorithm in F8 mode */
147 
148 	RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
149 	/**< SNOW 3G algorithm in UEA2 mode */
150 
151 	RTE_CRYPTO_CIPHER_ZUC_EEA3,
152 	/**< ZUC algorithm in EEA3 mode */
153 
154 	RTE_CRYPTO_CIPHER_DES_CBC,
155 	/**< DES algorithm in CBC mode */
156 
157 	RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
158 	/**< AES algorithm using modes required by
159 	 * DOCSIS Baseline Privacy Plus Spec.
160 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
161 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
162 	 */
163 
164 	RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
165 	/**< DES algorithm using modes required by
166 	 * DOCSIS Baseline Privacy Plus Spec.
167 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
168 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
169 	 */
170 
171 	RTE_CRYPTO_CIPHER_SM4_ECB,
172 	/**< ShangMi 4 (SM4) algorithm in ECB mode */
173 	RTE_CRYPTO_CIPHER_SM4_CBC,
174 	/**< ShangMi 4 (SM4) algorithm in CBC mode */
175 	RTE_CRYPTO_CIPHER_SM4_CTR,
176 	/**< ShangMi 4 (SM4) algorithm in CTR mode */
177 	RTE_CRYPTO_CIPHER_SM4_OFB,
178 	/**< ShangMi 4 (SM4) algorithm in OFB mode */
179 	RTE_CRYPTO_CIPHER_SM4_CFB
180 	/**< ShangMi 4 (SM4) algorithm in CFB mode */
181 };
182 
183 /** Cipher algorithm name strings */
184 __rte_deprecated
185 extern const char *
186 rte_crypto_cipher_algorithm_strings[];
187 
188 /** Symmetric Cipher Direction */
189 enum rte_crypto_cipher_operation {
190 	RTE_CRYPTO_CIPHER_OP_ENCRYPT,
191 	/**< Encrypt cipher operation */
192 	RTE_CRYPTO_CIPHER_OP_DECRYPT
193 	/**< Decrypt cipher operation */
194 };
195 
196 /** Cipher operation name strings */
197 extern const char *
198 rte_crypto_cipher_operation_strings[];
199 
200 /**
201  * Symmetric Cipher Setup Data.
202  *
203  * This structure contains data relating to Cipher (Encryption and Decryption)
204  *  use to create a session.
205  */
206 struct rte_crypto_cipher_xform {
207 	enum rte_crypto_cipher_operation op;
208 	/**< This parameter determines if the cipher operation is an encrypt or
209 	 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
210 	 * only encrypt operations are valid.
211 	 */
212 	enum rte_crypto_cipher_algorithm algo;
213 	/**< Cipher algorithm */
214 
215 	struct {
216 		const uint8_t *data;	/**< pointer to key data */
217 		uint16_t length;	/**< key length in bytes */
218 	} key;
219 	/**< Cipher key
220 	 *
221 	 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the
222 	 * original key data provided may be wrapped(encrypted) using key wrap
223 	 * algorithm such as AES key wrap (rfc3394) and hence length of the key
224 	 * may increase beyond the PMD advertised supported key size.
225 	 * PMD shall validate the key length and report EMSGSIZE error while
226 	 * configuring the session and application can skip checking the
227 	 * capability key length in such cases.
228 	 *
229 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
230 	 * point to a concatenation of the AES encryption key followed by a
231 	 * keymask. As per RFC3711, the keymask should be padded with trailing
232 	 * bytes to match the length of the encryption key used.
233 	 *
234 	 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
235 	 * 192 bits (24 bytes) or 256 bits (32 bytes).
236 	 *
237 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
238 	 * should be set to the combined length of the encryption key and the
239 	 * keymask. Since the keymask and the encryption key are the same size,
240 	 * key.length should be set to 2 x the AES encryption key length.
241 	 *
242 	 * For the AES-XTS mode of operation:
243 	 *  - Two keys must be provided and key.length refers to total length of
244 	 *    the two keys.
245 	 *  - key.data must point to the two keys concatenated together
246 	 *    (key1 || key2).
247 	 *  - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
248 	 *  - Both keys must have the same size.
249 	 */
250 	struct {
251 		uint16_t offset;
252 		/**< Starting point for Initialisation Vector or Counter,
253 		 * specified as number of bytes from start of crypto
254 		 * operation (rte_crypto_op).
255 		 *
256 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
257 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
258 		 * Initialisation Vector (IV) value.
259 		 *
260 		 * - For block ciphers in CTR mode, this is the counter.
261 		 *
262 		 * - For CCM mode, the first byte is reserved, and the
263 		 * nonce should be written starting at &iv[1] (to allow
264 		 * space for the implementation to write in the flags
265 		 * in the first byte). Note that a full 16 bytes should
266 		 * be allocated, even though the length field will
267 		 * have a value less than this. Note that the PMDs may
268 		 * modify the memory reserved (the first byte and the
269 		 * final padding)
270 		 *
271 		 * - For AES-XTS, this is the 128bit tweak, i, from
272 		 * IEEE Std 1619-2007.
273 		 *
274 		 * For optimum performance, the data pointed to SHOULD
275 		 * be 8-byte aligned.
276 		 */
277 		uint16_t length;
278 		/**< Length of valid IV data.
279 		 *
280 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
281 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
282 		 * length of the IV (which must be the same as the
283 		 * block length of the cipher).
284 		 *
285 		 * - For block ciphers in CTR mode, this is the length
286 		 * of the counter (which must be the same as the block
287 		 * length of the cipher).
288 		 *
289 		 * - For CCM mode, this is the length of the nonce,
290 		 * which can be in the range 7 to 13 inclusive.
291 		 */
292 	} iv;	/**< Initialisation vector parameters */
293 
294 	uint32_t dataunit_len;
295 	/**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled,
296 	 * this is the data-unit length of the algorithm,
297 	 * otherwise or when the value is 0, use the operation length.
298 	 * The value should be in the range defined by the dataunit_set field
299 	 * in the cipher capability.
300 	 *
301 	 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007.
302 	 * For-each data-unit in the operation, the tweak (IV) value is
303 	 * assigned consecutively starting from the operation assigned IV.
304 	 */
305 };
306 
307 /** Symmetric Authentication / Hash Algorithms
308  *
309  * Note, to avoid ABI breakage across releases
310  * - LIST_END should not be added to this enum
311  * - the order of enums should not be changed
312  * - new algorithms should only be added to the end
313  */
314 enum rte_crypto_auth_algorithm {
315 	RTE_CRYPTO_AUTH_NULL = 1,
316 	/**< NULL hash algorithm. */
317 
318 	RTE_CRYPTO_AUTH_AES_CBC_MAC,
319 	/**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
320 	RTE_CRYPTO_AUTH_AES_CMAC,
321 	/**< AES CMAC algorithm. */
322 	RTE_CRYPTO_AUTH_AES_GMAC,
323 	/**< AES GMAC algorithm. */
324 	RTE_CRYPTO_AUTH_AES_XCBC_MAC,
325 	/**< AES XCBC algorithm. */
326 
327 	RTE_CRYPTO_AUTH_KASUMI_F9,
328 	/**< KASUMI algorithm in F9 mode. */
329 
330 	RTE_CRYPTO_AUTH_MD5,
331 	/**< MD5 algorithm */
332 	RTE_CRYPTO_AUTH_MD5_HMAC,
333 	/**< HMAC using MD5 algorithm */
334 
335 	RTE_CRYPTO_AUTH_SHA1,
336 	/**< 160 bit SHA algorithm. */
337 	RTE_CRYPTO_AUTH_SHA1_HMAC,
338 	/**< HMAC using 160 bit SHA algorithm.
339 	 * HMAC-SHA-1-96 can be generated by setting
340 	 * digest_length to 12 bytes in auth/aead xforms.
341 	 */
342 	RTE_CRYPTO_AUTH_SHA224,
343 	/**< 224 bit SHA algorithm. */
344 	RTE_CRYPTO_AUTH_SHA224_HMAC,
345 	/**< HMAC using 224 bit SHA algorithm. */
346 	RTE_CRYPTO_AUTH_SHA256,
347 	/**< 256 bit SHA algorithm. */
348 	RTE_CRYPTO_AUTH_SHA256_HMAC,
349 	/**< HMAC using 256 bit SHA algorithm. */
350 	RTE_CRYPTO_AUTH_SHA384,
351 	/**< 384 bit SHA algorithm. */
352 	RTE_CRYPTO_AUTH_SHA384_HMAC,
353 	/**< HMAC using 384 bit SHA algorithm. */
354 	RTE_CRYPTO_AUTH_SHA512,
355 	/**< 512 bit SHA algorithm. */
356 	RTE_CRYPTO_AUTH_SHA512_HMAC,
357 	/**< HMAC using 512 bit SHA algorithm. */
358 
359 	RTE_CRYPTO_AUTH_SNOW3G_UIA2,
360 	/**< SNOW 3G algorithm in UIA2 mode. */
361 
362 	RTE_CRYPTO_AUTH_ZUC_EIA3,
363 	/**< ZUC algorithm in EIA3 mode */
364 
365 	RTE_CRYPTO_AUTH_SHA3_224,
366 	/**< 224 bit SHA3 algorithm. */
367 	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
368 	/**< HMAC using 224 bit SHA3 algorithm. */
369 	RTE_CRYPTO_AUTH_SHA3_256,
370 	/**< 256 bit SHA3 algorithm. */
371 	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
372 	/**< HMAC using 256 bit SHA3 algorithm. */
373 	RTE_CRYPTO_AUTH_SHA3_384,
374 	/**< 384 bit SHA3 algorithm. */
375 	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
376 	/**< HMAC using 384 bit SHA3 algorithm. */
377 	RTE_CRYPTO_AUTH_SHA3_512,
378 	/**< 512 bit SHA3 algorithm. */
379 	RTE_CRYPTO_AUTH_SHA3_512_HMAC,
380 	/**< HMAC using 512 bit SHA3 algorithm. */
381 	RTE_CRYPTO_AUTH_SM3,
382 	/**< ShangMi 3 (SM3) algorithm */
383 
384 	RTE_CRYPTO_AUTH_SHAKE_128,
385 	/**< 128 bit SHAKE algorithm. */
386 	RTE_CRYPTO_AUTH_SHAKE_256,
387 	/**< 256 bit SHAKE algorithm. */
388 	RTE_CRYPTO_AUTH_SM3_HMAC,
389 	/** < HMAC using ShangMi 3 (SM3) algorithm */
390 };
391 
392 /** Authentication algorithm name strings */
393 __rte_deprecated
394 extern const char *
395 rte_crypto_auth_algorithm_strings[];
396 
397 /** Symmetric Authentication / Hash Operations */
398 enum rte_crypto_auth_operation {
399 	RTE_CRYPTO_AUTH_OP_VERIFY,	/**< Verify authentication digest */
400 	RTE_CRYPTO_AUTH_OP_GENERATE	/**< Generate authentication digest */
401 };
402 
403 /** Authentication operation name strings */
404 extern const char *
405 rte_crypto_auth_operation_strings[];
406 
407 /**
408  * Authentication / Hash transform data.
409  *
410  * This structure contains data relating to an authentication/hash crypto
411  * transforms. The fields op, algo and digest_length are common to all
412  * authentication transforms and MUST be set.
413  */
414 struct rte_crypto_auth_xform {
415 	enum rte_crypto_auth_operation op;
416 	/**< Authentication operation type */
417 	enum rte_crypto_auth_algorithm algo;
418 	/**< Authentication algorithm selection */
419 
420 	struct {
421 		const uint8_t *data;	/**< pointer to key data */
422 		uint16_t length;	/**< key length in bytes */
423 	} key;
424 	/**< Authentication key data.
425 	 * The authentication key length MUST be less than or equal to the
426 	 * block size of the algorithm. It is the callers responsibility to
427 	 * ensure that the key length is compliant with the standard being used
428 	 * (for example RFC 2104, FIPS 198a).
429 	 */
430 
431 	struct {
432 		uint16_t offset;
433 		/**< Starting point for Initialisation Vector or Counter,
434 		 * specified as number of bytes from start of crypto
435 		 * operation (rte_crypto_op).
436 		 *
437 		 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
438 		 *   this is the authentication Initialisation Vector
439 		 *   (IV) value. For AES-GMAC IV description please refer
440 		 *   to the field `length` in iv struct.
441 		 *
442 		 * - For KASUMI in F9 mode and other authentication
443 		 *   algorithms, this field is not used.
444 		 *
445 		 * For optimum performance, the data pointed to SHOULD
446 		 * be 8-byte aligned.
447 		 */
448 		uint16_t length;
449 		/**< Length of valid IV data.
450 		 *
451 		 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
452 		 *   for AES-GMAC, this is the length of the IV.
453 		 *
454 		 * - For KASUMI in F9 mode and other authentication
455 		 *   algorithms, this field is not used.
456 		 *
457 		 * - For GMAC mode, this is either:
458 		 * 1) Number greater or equal to one, which means that IV
459 		 *    is used and J0 will be computed internally, a minimum
460 		 *    of 16 bytes must be allocated.
461 		 * 2) Zero, in which case data points to J0. In this case
462 		 *    16 bytes of J0 should be passed where J0 is defined
463 		 *    by NIST SP800-38D.
464 		 *
465 		 */
466 	} iv;	/**< Initialisation vector parameters */
467 
468 	uint16_t digest_length;
469 	/**< Length of the digest to be returned. If the verify option is set,
470 	 * this specifies the length of the digest to be compared for the
471 	 * session.
472 	 *
473 	 * It is the caller's responsibility to ensure that the
474 	 * digest length is compliant with the hash algorithm being used.
475 	 * If the value is less than the maximum length allowed by the hash,
476 	 * the result shall be truncated.
477 	 */
478 };
479 
480 
481 /** Symmetric AEAD Algorithms
482  *
483  * Note, to avoid ABI breakage across releases
484  * - LIST_END should not be added to this enum
485  * - the order of enums should not be changed
486  * - new algorithms should only be added to the end
487  */
488 enum rte_crypto_aead_algorithm {
489 	RTE_CRYPTO_AEAD_AES_CCM = 1,
490 	/**< AES algorithm in CCM mode. */
491 	RTE_CRYPTO_AEAD_AES_GCM,
492 	/**< AES algorithm in GCM mode. */
493 	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
494 	/**< Chacha20 cipher with poly1305 authenticator */
495 };
496 
497 /** AEAD algorithm name strings */
498 __rte_deprecated
499 extern const char *
500 rte_crypto_aead_algorithm_strings[];
501 
502 /** Symmetric AEAD Operations */
503 enum rte_crypto_aead_operation {
504 	RTE_CRYPTO_AEAD_OP_ENCRYPT,
505 	/**< Encrypt and generate digest */
506 	RTE_CRYPTO_AEAD_OP_DECRYPT
507 	/**< Verify digest and decrypt */
508 };
509 
510 /** Authentication operation name strings */
511 extern const char *
512 rte_crypto_aead_operation_strings[];
513 
514 struct rte_crypto_aead_xform {
515 	enum rte_crypto_aead_operation op;
516 	/**< AEAD operation type */
517 	enum rte_crypto_aead_algorithm algo;
518 	/**< AEAD algorithm selection */
519 
520 	struct {
521 		const uint8_t *data;	/**< pointer to key data */
522 		uint16_t length;	/**< key length in bytes */
523 	} key;
524 
525 	struct {
526 		uint16_t offset;
527 		/**< Starting point for Initialisation Vector or Counter,
528 		 * specified as number of bytes from start of crypto
529 		 * operation (rte_crypto_op).
530 		 *
531 		 * - For CCM mode, the first byte is reserved, and the
532 		 * nonce should be written starting at &iv[1] (to allow
533 		 * space for the implementation to write in the flags
534 		 * in the first byte). Note that a full 16 bytes should
535 		 * be allocated, even though the length field will
536 		 * have a value less than this.
537 		 *
538 		 * - For Chacha20-Poly1305 it is 96-bit nonce.
539 		 * PMD sets initial counter for Poly1305 key generation
540 		 * part to 0 and for Chacha20 encryption to 1 as per
541 		 * rfc8439 2.8. AEAD construction.
542 		 *
543 		 * For optimum performance, the data pointed to SHOULD
544 		 * be 8-byte aligned.
545 		 */
546 		uint16_t length;
547 		/**< Length of valid IV data.
548 		 *
549 		 * - For GCM mode, this is either:
550 		 * 1) Number greater or equal to one, which means that IV
551 		 *    is used and J0 will be computed internally, a minimum
552 		 *    of 16 bytes must be allocated.
553 		 * 2) Zero, in which case data points to J0. In this case
554 		 *    16 bytes of J0 should be passed where J0 is defined
555 		 *    by NIST SP800-38D.
556 		 *
557 		 * - For CCM mode, this is the length of the nonce,
558 		 * which can be in the range 7 to 13 inclusive.
559 		 *
560 		 * - For Chacha20-Poly1305 this field is always 12.
561 		 */
562 	} iv;	/**< Initialisation vector parameters */
563 
564 	uint16_t digest_length;
565 
566 	uint16_t aad_length;
567 	/**< The length of the additional authenticated data (AAD) in bytes.
568 	 * For CCM mode, this is the length of the actual AAD, even though
569 	 * it is required to reserve 18 bytes before the AAD and padding
570 	 * at the end of it, so a multiple of 16 bytes is allocated.
571 	 */
572 };
573 
574 /** Crypto transformation types */
575 enum rte_crypto_sym_xform_type {
576 	RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0,	/**< No xform specified */
577 	RTE_CRYPTO_SYM_XFORM_AUTH,		/**< Authentication xform */
578 	RTE_CRYPTO_SYM_XFORM_CIPHER,		/**< Cipher xform  */
579 	RTE_CRYPTO_SYM_XFORM_AEAD		/**< AEAD xform  */
580 };
581 
582 /**
583  * Symmetric crypto transform structure.
584  *
585  * This is used to specify the crypto transforms required, multiple transforms
586  * can be chained together to specify a chain transforms such as authentication
587  * then cipher, or cipher then authentication. Each transform structure can
588  * hold a single transform, the type field is used to specify which transform
589  * is contained within the union
590  */
591 /* Structure rte_crypto_sym_xform 8< */
592 struct rte_crypto_sym_xform {
593 	struct rte_crypto_sym_xform *next;
594 	/**< next xform in chain */
595 	enum rte_crypto_sym_xform_type type
596 	; /**< xform type */
597 	union {
598 		struct rte_crypto_auth_xform auth;
599 		/**< Authentication / hash xform */
600 		struct rte_crypto_cipher_xform cipher;
601 		/**< Cipher xform */
602 		struct rte_crypto_aead_xform aead;
603 		/**< AEAD xform */
604 	};
605 };
606 /* >8 End of structure rte_crypto_sym_xform. */
607 
608 /**
609  * Symmetric Cryptographic Operation.
610  *
611  * This structure contains data relating to performing symmetric cryptographic
612  * processing on a referenced mbuf data buffer.
613  *
614  * When a symmetric crypto operation is enqueued with the device for processing
615  * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
616  * which contains the source data which the crypto operation is to be performed
617  * on.
618  * While the mbuf is in use by a crypto operation no part of the mbuf should be
619  * changed by the application as the device may read or write to any part of the
620  * mbuf. In the case of hardware crypto devices some or all of the mbuf
621  * may be DMAed in and out of the device, so writing over the original data,
622  * though only the part specified by the rte_crypto_sym_op for transformation
623  * will be changed.
624  * Out-of-place (OOP) operation, where the source mbuf is different to the
625  * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
626  * The part copied includes all the parts of the source mbuf that will be
627  * operated on, based on the cipher.data.offset+cipher.data.length and
628  * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
629  * indicated by the cipher parameters will be transformed, any extra data around
630  * this indicated by the auth parameters will be copied unchanged from source to
631  * destination mbuf.
632  * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
633  * both source and destination mbufs. As these offsets are relative to the
634  * data_off parameter in each mbuf this can result in the data written to the
635  * destination buffer being at a different alignment, relative to buffer start,
636  * to the data in the source buffer.
637  */
638 /* Structure rte_crypto_sym_op 8< */
639 struct rte_crypto_sym_op {
640 	struct rte_mbuf *m_src;	/**< source mbuf */
641 	struct rte_mbuf *m_dst;	/**< destination mbuf */
642 
643 	union {
644 		void *session;
645 		/**< Handle for the initialised crypto/security session context */
646 		struct rte_crypto_sym_xform *xform;
647 		/**< Session-less API crypto operation parameters */
648 	};
649 
650 	union {
651 		struct {
652 			struct {
653 				uint32_t offset;
654 				 /**< Starting point for AEAD processing, specified as
655 				  * number of bytes from start of packet in source
656 				  * buffer.
657 				  */
658 				uint32_t length;
659 				 /**< The message length, in bytes, of the source buffer
660 				  * on which the cryptographic operation will be
661 				  * computed. This must be a multiple of the block size
662 				  */
663 			} data; /**< Data offsets and length for AEAD */
664 			struct {
665 				uint8_t *data;
666 				/**< This points to the location where the digest result
667 				 * should be inserted (in the case of digest generation)
668 				 * or where the purported digest exists (in the case of
669 				 * digest verification).
670 				 *
671 				 * At session creation time, the client specified the
672 				 * digest result length with the digest_length member
673 				 * of the @ref rte_crypto_auth_xform structure. For
674 				 * physical crypto devices the caller must allocate at
675 				 * least digest_length of physically contiguous memory
676 				 * at this location.
677 				 *
678 				 * For digest generation, the digest result will
679 				 * overwrite any data at this location.
680 				 *
681 				 * @note
682 				 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
683 				 * "digest result" read "authentication tag T".
684 				 */
685 				rte_iova_t phys_addr;
686 				/**< Physical address of digest */
687 			} digest; /**< Digest parameters */
688 			struct {
689 				uint8_t *data;
690 				/**< Pointer to Additional Authenticated Data (AAD)
691 				 * needed for authenticated cipher mechanisms (CCM and
692 				 * GCM)
693 				 *
694 				 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
695 				 * the caller should setup this field as follows:
696 				 *
697 				 * - the additional authentication data itself should
698 				 * be written starting at an offset of 18 bytes into
699 				 * the array, leaving room for the first block (16 bytes)
700 				 * and the length encoding in the first two bytes of the
701 				 * second block.
702 				 *
703 				 * - the array should be big enough to hold the above
704 				 * fields, plus any padding to round this up to the
705 				 * nearest multiple of the block size (16 bytes).
706 				 * Padding will be added by the implementation.
707 				 *
708 				 * - Note that PMDs may modify the memory reserved
709 				 * (first 18 bytes and the final padding).
710 				 *
711 				 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
712 				 * caller should setup this field as follows:
713 				 *
714 				 * - the AAD is written in starting at byte 0
715 				 * - the array must be big enough to hold the AAD, plus
716 				 * any space to round this up to the nearest multiple
717 				 * of the block size (16 bytes).
718 				 *
719 				 */
720 				rte_iova_t phys_addr;	/**< physical address */
721 			} aad;
722 			/**< Additional authentication parameters */
723 		} aead;
724 
725 		struct {
726 			struct {
727 				struct {
728 					uint32_t offset;
729 					 /**< Starting point for cipher processing,
730 					  * specified as number of bytes from start
731 					  * of data in the source buffer.
732 					  * The result of the cipher operation will be
733 					  * written back into the output buffer
734 					  * starting at this location.
735 					  *
736 					  * @note
737 					  * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
738 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
739 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
740 					  * this field should be in bits. For
741 					  * digest-encrypted cases this must be
742 					  * an 8-bit multiple.
743 					  */
744 					uint32_t length;
745 					 /**< The message length, in bytes, of the
746 					  * source buffer on which the cryptographic
747 					  * operation will be computed.
748 					  * This is also the same as the result length.
749 					  * This must be a multiple of the block size
750 					  * or a multiple of data-unit length
751 					  * as described in xform.
752 					  *
753 					  * @note
754 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
755 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
756 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
757 					  * this field should be in bits. For
758 					  * digest-encrypted cases this must be
759 					  * an 8-bit multiple.
760 					  */
761 				} data; /**< Data offsets and length for ciphering */
762 			} cipher;
763 
764 			struct {
765 				struct {
766 					uint32_t offset;
767 					 /**< Starting point for hash processing,
768 					  * specified as number of bytes from start of
769 					  * packet in source buffer.
770 					  *
771 					  * @note
772 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
773 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
774 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
775 					  * this field should be in bits. For
776 					  * digest-encrypted cases this must be
777 					  * an 8-bit multiple.
778 					  *
779 					  * @note
780 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
781 					  * this offset should be such that
782 					  * data to authenticate starts at COUNT.
783 					  *
784 					  * @note
785 					  * For DOCSIS security protocol, this
786 					  * offset is the DOCSIS header length
787 					  * and, therefore, also the CRC offset
788 					  * i.e. the number of bytes into the
789 					  * packet at which CRC calculation
790 					  * should begin.
791 					  */
792 					uint32_t length;
793 					 /**< The message length, in bytes, of the source
794 					  * buffer that the hash will be computed on.
795 					  *
796 					  * @note
797 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
798 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
799 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
800 					  * this field should be in bits. For
801 					  * digest-encrypted cases this must be
802 					  * an 8-bit multiple.
803 					  *
804 					  * @note
805 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
806 					  * the length should include the COUNT,
807 					  * FRESH, message, direction bit and padding
808 					  * (to be multiple of 8 bits).
809 					  *
810 					  * @note
811 					  * For DOCSIS security protocol, this
812 					  * is the CRC length i.e. the number of
813 					  * bytes in the packet over which the
814 					  * CRC should be calculated
815 					  */
816 				} data;
817 				/**< Data offsets and length for authentication */
818 
819 				struct {
820 					uint8_t *data;
821 					/**< This points to the location where
822 					 * the digest result should be inserted
823 					 * (in the case of digest generation)
824 					 * or where the purported digest exists
825 					 * (in the case of digest verification).
826 					 *
827 					 * At session creation time, the client
828 					 * specified the digest result length with
829 					 * the digest_length member of the
830 					 * @ref rte_crypto_auth_xform structure.
831 					 * For physical crypto devices the caller
832 					 * must allocate at least digest_length of
833 					 * physically contiguous memory at this
834 					 * location.
835 					 *
836 					 * For digest generation, the digest result
837 					 * will overwrite any data at this location.
838 					 *
839 					 * @note
840 					 * Digest-encrypted case.
841 					 * Digest can be generated, appended to
842 					 * the end of raw data and encrypted
843 					 * together using chained digest
844 					 * generation
845 					 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
846 					 * and encryption
847 					 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
848 					 * xforms. Similarly, authentication
849 					 * of the raw data against appended,
850 					 * decrypted digest, can be performed
851 					 * using decryption
852 					 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
853 					 * and digest verification
854 					 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
855 					 * chained xforms.
856 					 * To perform those operations, a few
857 					 * additional conditions must be met:
858 					 * - caller must allocate at least
859 					 * digest_length of memory at the end of
860 					 * source and (in case of out-of-place
861 					 * operations) destination buffer; those
862 					 * buffers can be linear or split using
863 					 * scatter-gather lists,
864 					 * - digest data pointer must point to
865 					 * the end of source or (in case of
866 					 * out-of-place operations) destination
867 					 * data, which is pointer to the
868 					 * data buffer + auth.data.offset +
869 					 * auth.data.length,
870 					 * - cipher.data.offset +
871 					 * cipher.data.length must be greater
872 					 * than auth.data.offset +
873 					 * auth.data.length and is typically
874 					 * equal to auth.data.offset +
875 					 * auth.data.length + digest_length.
876 					 * - for wireless algorithms, i.e.
877 					 * SNOW 3G, KASUMI and ZUC, as the
878 					 * cipher.data.length,
879 					 * cipher.data.offset,
880 					 * auth.data.length and
881 					 * auth.data.offset are in bits, they
882 					 * must be 8-bit multiples.
883 					 *
884 					 * Note, that for security reasons, it
885 					 * is PMDs' responsibility to not
886 					 * leave an unencrypted digest in any
887 					 * buffer after performing auth-cipher
888 					 * operations.
889 					 *
890 					 */
891 					rte_iova_t phys_addr;
892 					/**< Physical address of digest */
893 				} digest; /**< Digest parameters */
894 			} auth;
895 		};
896 	};
897 };
898 /* >8 End of structure rte_crypto_sym_op. */
899 
900 
901 /**
902  * Reset the fields of a symmetric operation to their default values.
903  *
904  * @param	op	The crypto operation to be reset.
905  */
906 static inline void
907 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
908 {
909 	memset(op, 0, sizeof(*op));
910 }
911 
912 
913 /**
914  * Allocate space for symmetric crypto xforms in the private data space of the
915  * crypto operation. This also defaults the crypto xform type to
916  * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
917  * in the crypto operation
918  *
919  * @return
920  * - On success returns pointer to first crypto xform in crypto operations chain
921  * - On failure returns NULL
922  */
923 static inline struct rte_crypto_sym_xform *
924 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
925 		void *priv_data, uint8_t nb_xforms)
926 {
927 	struct rte_crypto_sym_xform *xform;
928 
929 	sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
930 
931 	do {
932 		xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
933 		xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
934 	} while (xform);
935 
936 	return sym_op->xform;
937 }
938 
939 
940 /**
941  * Attach a session to a symmetric crypto operation
942  *
943  * @param	sym_op	crypto operation
944  * @param	sess	cryptodev session
945  */
946 static inline int
947 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, void *sess)
948 {
949 	sym_op->session = sess;
950 
951 	return 0;
952 }
953 
954 /**
955  * Converts portion of mbuf data into a vector representation.
956  * Each segment will be represented as a separate entry in *vec* array.
957  * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
958  * @param mb
959  *   Pointer to the *rte_mbuf* object.
960  * @param ofs
961  *   Offset within mbuf data to start with.
962  * @param len
963  *   Length of data to represent.
964  * @param vec
965  *   Pointer to an output array of IO vectors.
966  * @param num
967  *   Size of an output array.
968  * @return
969  *   - number of successfully filled entries in *vec* array.
970  *   - negative number of elements in *vec* array required.
971  */
972 __rte_experimental
973 static inline int
974 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
975 	struct rte_crypto_vec vec[], uint32_t num)
976 {
977 	uint32_t i;
978 	struct rte_mbuf *nseg;
979 	uint32_t left;
980 	uint32_t seglen;
981 
982 	/* assuming that requested data starts in the first segment */
983 	RTE_ASSERT(mb->data_len > ofs);
984 
985 	if (mb->nb_segs > num)
986 		return -mb->nb_segs;
987 
988 	vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
989 	vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
990 	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
991 
992 	/* whole data lies in the first segment */
993 	seglen = mb->data_len - ofs;
994 	if (len <= seglen) {
995 		vec[0].len = len;
996 		return 1;
997 	}
998 
999 	/* data spread across segments */
1000 	vec[0].len = seglen;
1001 	left = len - seglen;
1002 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
1003 
1004 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
1005 		vec[i].iova = rte_pktmbuf_iova(nseg);
1006 		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
1007 
1008 		seglen = nseg->data_len;
1009 		if (left <= seglen) {
1010 			/* whole requested data is completed */
1011 			vec[i].len = left;
1012 			left = 0;
1013 			i++;
1014 			break;
1015 		}
1016 
1017 		/* use whole segment */
1018 		vec[i].len = seglen;
1019 		left -= seglen;
1020 	}
1021 
1022 	RTE_ASSERT(left == 0);
1023 	return i;
1024 }
1025 
1026 
1027 #ifdef __cplusplus
1028 }
1029 #endif
1030 
1031 #endif /* _RTE_CRYPTO_SYM_H_ */
1032