xref: /dpdk/lib/cryptodev/rte_crypto_sym.h (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
7 
8 /**
9  * @file rte_crypto_sym.h
10  *
11  * RTE Definitions for Symmetric Cryptography
12  *
13  * Defines symmetric cipher and authentication algorithms and modes, as well
14  * as supported symmetric crypto operation combinations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <string.h>
22 
23 #include <rte_mbuf.h>
24 #include <rte_memory.h>
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 
28 /**
29  * Crypto IO Vector (in analogy with struct iovec)
30  * Supposed be used to pass input/output data buffers for crypto data-path
31  * functions.
32  */
33 struct rte_crypto_vec {
34 	/** virtual address of the data buffer */
35 	void *base;
36 	/** IOVA of the data buffer */
37 	rte_iova_t iova;
38 	/** length of the data buffer */
39 	uint32_t len;
40 	/** total buffer length */
41 	uint32_t tot_len;
42 };
43 
44 /**
45  * Crypto scatter-gather list descriptor. Consists of a pointer to an array
46  * of Crypto IO vectors with its size.
47  */
48 struct rte_crypto_sgl {
49 	/** start of an array of vectors */
50 	struct rte_crypto_vec *vec;
51 	/** size of an array of vectors */
52 	uint32_t num;
53 };
54 
55 /**
56  * Crypto virtual and IOVA address descriptor, used to describe cryptographic
57  * data buffer without the length information. The length information is
58  * normally predefined during session creation.
59  */
60 struct rte_crypto_va_iova_ptr {
61 	void *va;
62 	rte_iova_t iova;
63 };
64 
65 /**
66  * Raw data operation descriptor.
67  * Supposed to be used with synchronous CPU crypto API call or asynchronous
68  * RAW data path API call.
69  */
70 struct rte_crypto_sym_vec {
71 	/** number of operations to perform */
72 	uint32_t num;
73 	/** array of SGL vectors */
74 	struct rte_crypto_sgl *src_sgl;
75 	/** array of SGL vectors for OOP, keep it NULL for inplace*/
76 	struct rte_crypto_sgl *dest_sgl;
77 	/** array of pointers to cipher IV */
78 	struct rte_crypto_va_iova_ptr *iv;
79 	/** array of pointers to digest */
80 	struct rte_crypto_va_iova_ptr *digest;
81 
82 	__extension__
83 	union {
84 		/** array of pointers to auth IV, used for chain operation */
85 		struct rte_crypto_va_iova_ptr *auth_iv;
86 		/** array of pointers to AAD, used for AEAD operation */
87 		struct rte_crypto_va_iova_ptr *aad;
88 	};
89 
90 	/**
91 	 * array of statuses for each operation:
92 	 * - 0 on success
93 	 * - errno on error
94 	 */
95 	int32_t *status;
96 };
97 
98 /**
99  * used for cpu_crypto_process_bulk() to specify head/tail offsets
100  * for auth/cipher processing.
101  */
102 union rte_crypto_sym_ofs {
103 	uint64_t raw;
104 	struct {
105 		struct {
106 			uint16_t head;
107 			uint16_t tail;
108 		} auth, cipher;
109 	} ofs;
110 };
111 
112 /** Symmetric Cipher Algorithms
113  *
114  * Note, to avoid ABI breakage across releases
115  * - LIST_END should not be added to this enum
116  * - the order of enums should not be changed
117  * - new algorithms should only be added to the end
118  */
119 enum rte_crypto_cipher_algorithm {
120 	RTE_CRYPTO_CIPHER_NULL = 1,
121 	/**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
122 
123 	RTE_CRYPTO_CIPHER_3DES_CBC,
124 	/**< Triple DES algorithm in CBC mode */
125 	RTE_CRYPTO_CIPHER_3DES_CTR,
126 	/**< Triple DES algorithm in CTR mode */
127 	RTE_CRYPTO_CIPHER_3DES_ECB,
128 	/**< Triple DES algorithm in ECB mode */
129 
130 	RTE_CRYPTO_CIPHER_AES_CBC,
131 	/**< AES algorithm in CBC mode */
132 	RTE_CRYPTO_CIPHER_AES_CTR,
133 	/**< AES algorithm in Counter mode */
134 	RTE_CRYPTO_CIPHER_AES_ECB,
135 	/**< AES algorithm in ECB mode */
136 	RTE_CRYPTO_CIPHER_AES_F8,
137 	/**< AES algorithm in F8 mode */
138 	RTE_CRYPTO_CIPHER_AES_XTS,
139 	/**< AES algorithm in XTS mode */
140 
141 	RTE_CRYPTO_CIPHER_ARC4,
142 	/**< (A)RC4 cipher algorithm */
143 
144 	RTE_CRYPTO_CIPHER_KASUMI_F8,
145 	/**< KASUMI algorithm in F8 mode */
146 
147 	RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
148 	/**< SNOW 3G algorithm in UEA2 mode */
149 
150 	RTE_CRYPTO_CIPHER_ZUC_EEA3,
151 	/**< ZUC algorithm in EEA3 mode */
152 
153 	RTE_CRYPTO_CIPHER_DES_CBC,
154 	/**< DES algorithm in CBC mode */
155 
156 	RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
157 	/**< AES algorithm using modes required by
158 	 * DOCSIS Baseline Privacy Plus Spec.
159 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
160 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
161 	 */
162 
163 	RTE_CRYPTO_CIPHER_DES_DOCSISBPI
164 	/**< DES algorithm using modes required by
165 	 * DOCSIS Baseline Privacy Plus Spec.
166 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
167 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
168 	 */
169 };
170 
171 /** Cipher algorithm name strings */
172 extern const char *
173 rte_crypto_cipher_algorithm_strings[];
174 
175 /** Symmetric Cipher Direction */
176 enum rte_crypto_cipher_operation {
177 	RTE_CRYPTO_CIPHER_OP_ENCRYPT,
178 	/**< Encrypt cipher operation */
179 	RTE_CRYPTO_CIPHER_OP_DECRYPT
180 	/**< Decrypt cipher operation */
181 };
182 
183 /** Cipher operation name strings */
184 extern const char *
185 rte_crypto_cipher_operation_strings[];
186 
187 /**
188  * Symmetric Cipher Setup Data.
189  *
190  * This structure contains data relating to Cipher (Encryption and Decryption)
191  *  use to create a session.
192  */
193 struct rte_crypto_cipher_xform {
194 	enum rte_crypto_cipher_operation op;
195 	/**< This parameter determines if the cipher operation is an encrypt or
196 	 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
197 	 * only encrypt operations are valid.
198 	 */
199 	enum rte_crypto_cipher_algorithm algo;
200 	/**< Cipher algorithm */
201 
202 	struct {
203 		const uint8_t *data;	/**< pointer to key data */
204 		uint16_t length;	/**< key length in bytes */
205 	} key;
206 	/**< Cipher key
207 	 *
208 	 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the
209 	 * original key data provided may be wrapped(encrypted) using key wrap
210 	 * algorithm such as AES key wrap (rfc3394) and hence length of the key
211 	 * may increase beyond the PMD advertised supported key size.
212 	 * PMD shall validate the key length and report EMSGSIZE error while
213 	 * configuring the session and application can skip checking the
214 	 * capability key length in such cases.
215 	 *
216 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
217 	 * point to a concatenation of the AES encryption key followed by a
218 	 * keymask. As per RFC3711, the keymask should be padded with trailing
219 	 * bytes to match the length of the encryption key used.
220 	 *
221 	 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
222 	 * 192 bits (24 bytes) or 256 bits (32 bytes).
223 	 *
224 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
225 	 * should be set to the combined length of the encryption key and the
226 	 * keymask. Since the keymask and the encryption key are the same size,
227 	 * key.length should be set to 2 x the AES encryption key length.
228 	 *
229 	 * For the AES-XTS mode of operation:
230 	 *  - Two keys must be provided and key.length refers to total length of
231 	 *    the two keys.
232 	 *  - key.data must point to the two keys concatenated together
233 	 *    (key1 || key2).
234 	 *  - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
235 	 *  - Both keys must have the same size.
236 	 **/
237 	struct {
238 		uint16_t offset;
239 		/**< Starting point for Initialisation Vector or Counter,
240 		 * specified as number of bytes from start of crypto
241 		 * operation (rte_crypto_op).
242 		 *
243 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
244 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
245 		 * Initialisation Vector (IV) value.
246 		 *
247 		 * - For block ciphers in CTR mode, this is the counter.
248 		 *
249 		 * - For CCM mode, the first byte is reserved, and the
250 		 * nonce should be written starting at &iv[1] (to allow
251 		 * space for the implementation to write in the flags
252 		 * in the first byte). Note that a full 16 bytes should
253 		 * be allocated, even though the length field will
254 		 * have a value less than this. Note that the PMDs may
255 		 * modify the memory reserved (the first byte and the
256 		 * final padding)
257 		 *
258 		 * - For AES-XTS, this is the 128bit tweak, i, from
259 		 * IEEE Std 1619-2007.
260 		 *
261 		 * For optimum performance, the data pointed to SHOULD
262 		 * be 8-byte aligned.
263 		 */
264 		uint16_t length;
265 		/**< Length of valid IV data.
266 		 *
267 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
268 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
269 		 * length of the IV (which must be the same as the
270 		 * block length of the cipher).
271 		 *
272 		 * - For block ciphers in CTR mode, this is the length
273 		 * of the counter (which must be the same as the block
274 		 * length of the cipher).
275 		 *
276 		 * - For CCM mode, this is the length of the nonce,
277 		 * which can be in the range 7 to 13 inclusive.
278 		 */
279 	} iv;	/**< Initialisation vector parameters */
280 
281 	uint32_t dataunit_len;
282 	/**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled,
283 	 * this is the data-unit length of the algorithm,
284 	 * otherwise or when the value is 0, use the operation length.
285 	 * The value should be in the range defined by the dataunit_set field
286 	 * in the cipher capability.
287 	 *
288 	 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007.
289 	 * For-each data-unit in the operation, the tweak (IV) value is
290 	 * assigned consecutively starting from the operation assigned IV.
291 	 */
292 };
293 
294 /** Symmetric Authentication / Hash Algorithms
295  *
296  * Note, to avoid ABI breakage across releases
297  * - LIST_END should not be added to this enum
298  * - the order of enums should not be changed
299  * - new algorithms should only be added to the end
300  */
301 enum rte_crypto_auth_algorithm {
302 	RTE_CRYPTO_AUTH_NULL = 1,
303 	/**< NULL hash algorithm. */
304 
305 	RTE_CRYPTO_AUTH_AES_CBC_MAC,
306 	/**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
307 	RTE_CRYPTO_AUTH_AES_CMAC,
308 	/**< AES CMAC algorithm. */
309 	RTE_CRYPTO_AUTH_AES_GMAC,
310 	/**< AES GMAC algorithm. */
311 	RTE_CRYPTO_AUTH_AES_XCBC_MAC,
312 	/**< AES XCBC algorithm. */
313 
314 	RTE_CRYPTO_AUTH_KASUMI_F9,
315 	/**< KASUMI algorithm in F9 mode. */
316 
317 	RTE_CRYPTO_AUTH_MD5,
318 	/**< MD5 algorithm */
319 	RTE_CRYPTO_AUTH_MD5_HMAC,
320 	/**< HMAC using MD5 algorithm */
321 
322 	RTE_CRYPTO_AUTH_SHA1,
323 	/**< 160 bit SHA algorithm. */
324 	RTE_CRYPTO_AUTH_SHA1_HMAC,
325 	/**< HMAC using 160 bit SHA algorithm.
326 	 * HMAC-SHA-1-96 can be generated by setting
327 	 * digest_length to 12 bytes in auth/aead xforms.
328 	 */
329 	RTE_CRYPTO_AUTH_SHA224,
330 	/**< 224 bit SHA algorithm. */
331 	RTE_CRYPTO_AUTH_SHA224_HMAC,
332 	/**< HMAC using 224 bit SHA algorithm. */
333 	RTE_CRYPTO_AUTH_SHA256,
334 	/**< 256 bit SHA algorithm. */
335 	RTE_CRYPTO_AUTH_SHA256_HMAC,
336 	/**< HMAC using 256 bit SHA algorithm. */
337 	RTE_CRYPTO_AUTH_SHA384,
338 	/**< 384 bit SHA algorithm. */
339 	RTE_CRYPTO_AUTH_SHA384_HMAC,
340 	/**< HMAC using 384 bit SHA algorithm. */
341 	RTE_CRYPTO_AUTH_SHA512,
342 	/**< 512 bit SHA algorithm. */
343 	RTE_CRYPTO_AUTH_SHA512_HMAC,
344 	/**< HMAC using 512 bit SHA algorithm. */
345 
346 	RTE_CRYPTO_AUTH_SNOW3G_UIA2,
347 	/**< SNOW 3G algorithm in UIA2 mode. */
348 
349 	RTE_CRYPTO_AUTH_ZUC_EIA3,
350 	/**< ZUC algorithm in EIA3 mode */
351 
352 	RTE_CRYPTO_AUTH_SHA3_224,
353 	/**< 224 bit SHA3 algorithm. */
354 	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
355 	/**< HMAC using 224 bit SHA3 algorithm. */
356 	RTE_CRYPTO_AUTH_SHA3_256,
357 	/**< 256 bit SHA3 algorithm. */
358 	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
359 	/**< HMAC using 256 bit SHA3 algorithm. */
360 	RTE_CRYPTO_AUTH_SHA3_384,
361 	/**< 384 bit SHA3 algorithm. */
362 	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
363 	/**< HMAC using 384 bit SHA3 algorithm. */
364 	RTE_CRYPTO_AUTH_SHA3_512,
365 	/**< 512 bit SHA3 algorithm. */
366 	RTE_CRYPTO_AUTH_SHA3_512_HMAC
367 	/**< HMAC using 512 bit SHA3 algorithm. */
368 };
369 
370 /** Authentication algorithm name strings */
371 extern const char *
372 rte_crypto_auth_algorithm_strings[];
373 
374 /** Symmetric Authentication / Hash Operations */
375 enum rte_crypto_auth_operation {
376 	RTE_CRYPTO_AUTH_OP_VERIFY,	/**< Verify authentication digest */
377 	RTE_CRYPTO_AUTH_OP_GENERATE	/**< Generate authentication digest */
378 };
379 
380 /** Authentication operation name strings */
381 extern const char *
382 rte_crypto_auth_operation_strings[];
383 
384 /**
385  * Authentication / Hash transform data.
386  *
387  * This structure contains data relating to an authentication/hash crypto
388  * transforms. The fields op, algo and digest_length are common to all
389  * authentication transforms and MUST be set.
390  */
391 struct rte_crypto_auth_xform {
392 	enum rte_crypto_auth_operation op;
393 	/**< Authentication operation type */
394 	enum rte_crypto_auth_algorithm algo;
395 	/**< Authentication algorithm selection */
396 
397 	struct {
398 		const uint8_t *data;	/**< pointer to key data */
399 		uint16_t length;	/**< key length in bytes */
400 	} key;
401 	/**< Authentication key data.
402 	 * The authentication key length MUST be less than or equal to the
403 	 * block size of the algorithm. It is the callers responsibility to
404 	 * ensure that the key length is compliant with the standard being used
405 	 * (for example RFC 2104, FIPS 198a).
406 	 */
407 
408 	struct {
409 		uint16_t offset;
410 		/**< Starting point for Initialisation Vector or Counter,
411 		 * specified as number of bytes from start of crypto
412 		 * operation (rte_crypto_op).
413 		 *
414 		 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
415 		 *   this is the authentication Initialisation Vector
416 		 *   (IV) value. For AES-GMAC IV description please refer
417 		 *   to the field `length` in iv struct.
418 		 *
419 		 * - For KASUMI in F9 mode and other authentication
420 		 *   algorithms, this field is not used.
421 		 *
422 		 * For optimum performance, the data pointed to SHOULD
423 		 * be 8-byte aligned.
424 		 */
425 		uint16_t length;
426 		/**< Length of valid IV data.
427 		 *
428 		 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
429 		 *   for AES-GMAC, this is the length of the IV.
430 		 *
431 		 * - For KASUMI in F9 mode and other authentication
432 		 *   algorithms, this field is not used.
433 		 *
434 		 * - For GMAC mode, this is either:
435 		 * 1) Number greater or equal to one, which means that IV
436 		 *    is used and J0 will be computed internally, a minimum
437 		 *    of 16 bytes must be allocated.
438 		 * 2) Zero, in which case data points to J0. In this case
439 		 *    16 bytes of J0 should be passed where J0 is defined
440 		 *    by NIST SP800-38D.
441 		 *
442 		 */
443 	} iv;	/**< Initialisation vector parameters */
444 
445 	uint16_t digest_length;
446 	/**< Length of the digest to be returned. If the verify option is set,
447 	 * this specifies the length of the digest to be compared for the
448 	 * session.
449 	 *
450 	 * It is the caller's responsibility to ensure that the
451 	 * digest length is compliant with the hash algorithm being used.
452 	 * If the value is less than the maximum length allowed by the hash,
453 	 * the result shall be truncated.
454 	 */
455 };
456 
457 
458 /** Symmetric AEAD Algorithms
459  *
460  * Note, to avoid ABI breakage across releases
461  * - LIST_END should not be added to this enum
462  * - the order of enums should not be changed
463  * - new algorithms should only be added to the end
464  */
465 enum rte_crypto_aead_algorithm {
466 	RTE_CRYPTO_AEAD_AES_CCM = 1,
467 	/**< AES algorithm in CCM mode. */
468 	RTE_CRYPTO_AEAD_AES_GCM,
469 	/**< AES algorithm in GCM mode. */
470 	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
471 	/**< Chacha20 cipher with poly1305 authenticator */
472 };
473 
474 /** AEAD algorithm name strings */
475 extern const char *
476 rte_crypto_aead_algorithm_strings[];
477 
478 /** Symmetric AEAD Operations */
479 enum rte_crypto_aead_operation {
480 	RTE_CRYPTO_AEAD_OP_ENCRYPT,
481 	/**< Encrypt and generate digest */
482 	RTE_CRYPTO_AEAD_OP_DECRYPT
483 	/**< Verify digest and decrypt */
484 };
485 
486 /** Authentication operation name strings */
487 extern const char *
488 rte_crypto_aead_operation_strings[];
489 
490 struct rte_crypto_aead_xform {
491 	enum rte_crypto_aead_operation op;
492 	/**< AEAD operation type */
493 	enum rte_crypto_aead_algorithm algo;
494 	/**< AEAD algorithm selection */
495 
496 	struct {
497 		const uint8_t *data;	/**< pointer to key data */
498 		uint16_t length;	/**< key length in bytes */
499 	} key;
500 
501 	struct {
502 		uint16_t offset;
503 		/**< Starting point for Initialisation Vector or Counter,
504 		 * specified as number of bytes from start of crypto
505 		 * operation (rte_crypto_op).
506 		 *
507 		 * - For CCM mode, the first byte is reserved, and the
508 		 * nonce should be written starting at &iv[1] (to allow
509 		 * space for the implementation to write in the flags
510 		 * in the first byte). Note that a full 16 bytes should
511 		 * be allocated, even though the length field will
512 		 * have a value less than this.
513 		 *
514 		 * - For Chacha20-Poly1305 it is 96-bit nonce.
515 		 * PMD sets initial counter for Poly1305 key generation
516 		 * part to 0 and for Chacha20 encryption to 1 as per
517 		 * rfc8439 2.8. AEAD construction.
518 		 *
519 		 * For optimum performance, the data pointed to SHOULD
520 		 * be 8-byte aligned.
521 		 */
522 		uint16_t length;
523 		/**< Length of valid IV data.
524 		 *
525 		 * - For GCM mode, this is either:
526 		 * 1) Number greater or equal to one, which means that IV
527 		 *    is used and J0 will be computed internally, a minimum
528 		 *    of 16 bytes must be allocated.
529 		 * 2) Zero, in which case data points to J0. In this case
530 		 *    16 bytes of J0 should be passed where J0 is defined
531 		 *    by NIST SP800-38D.
532 		 *
533 		 * - For CCM mode, this is the length of the nonce,
534 		 * which can be in the range 7 to 13 inclusive.
535 		 *
536 		 * - For Chacha20-Poly1305 this field is always 12.
537 		 */
538 	} iv;	/**< Initialisation vector parameters */
539 
540 	uint16_t digest_length;
541 
542 	uint16_t aad_length;
543 	/**< The length of the additional authenticated data (AAD) in bytes.
544 	 * For CCM mode, this is the length of the actual AAD, even though
545 	 * it is required to reserve 18 bytes before the AAD and padding
546 	 * at the end of it, so a multiple of 16 bytes is allocated.
547 	 */
548 };
549 
550 /** Crypto transformation types */
551 enum rte_crypto_sym_xform_type {
552 	RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0,	/**< No xform specified */
553 	RTE_CRYPTO_SYM_XFORM_AUTH,		/**< Authentication xform */
554 	RTE_CRYPTO_SYM_XFORM_CIPHER,		/**< Cipher xform  */
555 	RTE_CRYPTO_SYM_XFORM_AEAD		/**< AEAD xform  */
556 };
557 
558 /**
559  * Symmetric crypto transform structure.
560  *
561  * This is used to specify the crypto transforms required, multiple transforms
562  * can be chained together to specify a chain transforms such as authentication
563  * then cipher, or cipher then authentication. Each transform structure can
564  * hold a single transform, the type field is used to specify which transform
565  * is contained within the union
566  */
567 struct rte_crypto_sym_xform {
568 	struct rte_crypto_sym_xform *next;
569 	/**< next xform in chain */
570 	enum rte_crypto_sym_xform_type type
571 	; /**< xform type */
572 	RTE_STD_C11
573 	union {
574 		struct rte_crypto_auth_xform auth;
575 		/**< Authentication / hash xform */
576 		struct rte_crypto_cipher_xform cipher;
577 		/**< Cipher xform */
578 		struct rte_crypto_aead_xform aead;
579 		/**< AEAD xform */
580 	};
581 };
582 
583 struct rte_cryptodev_sym_session;
584 
585 /**
586  * Symmetric Cryptographic Operation.
587  *
588  * This structure contains data relating to performing symmetric cryptographic
589  * processing on a referenced mbuf data buffer.
590  *
591  * When a symmetric crypto operation is enqueued with the device for processing
592  * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
593  * which contains the source data which the crypto operation is to be performed
594  * on.
595  * While the mbuf is in use by a crypto operation no part of the mbuf should be
596  * changed by the application as the device may read or write to any part of the
597  * mbuf. In the case of hardware crypto devices some or all of the mbuf
598  * may be DMAed in and out of the device, so writing over the original data,
599  * though only the part specified by the rte_crypto_sym_op for transformation
600  * will be changed.
601  * Out-of-place (OOP) operation, where the source mbuf is different to the
602  * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
603  * The part copied includes all the parts of the source mbuf that will be
604  * operated on, based on the cipher.data.offset+cipher.data.length and
605  * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
606  * indicated by the cipher parameters will be transformed, any extra data around
607  * this indicated by the auth parameters will be copied unchanged from source to
608  * destination mbuf.
609  * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
610  * both source and destination mbufs. As these offsets are relative to the
611  * data_off parameter in each mbuf this can result in the data written to the
612  * destination buffer being at a different alignment, relative to buffer start,
613  * to the data in the source buffer.
614  */
615 struct rte_crypto_sym_op {
616 	struct rte_mbuf *m_src;	/**< source mbuf */
617 	struct rte_mbuf *m_dst;	/**< destination mbuf */
618 
619 	RTE_STD_C11
620 	union {
621 		struct rte_cryptodev_sym_session *session;
622 		/**< Handle for the initialised session context */
623 		struct rte_crypto_sym_xform *xform;
624 		/**< Session-less API crypto operation parameters */
625 		struct rte_security_session *sec_session;
626 		/**< Handle for the initialised security session context */
627 	};
628 
629 	RTE_STD_C11
630 	union {
631 		struct {
632 			struct {
633 				uint32_t offset;
634 				 /**< Starting point for AEAD processing, specified as
635 				  * number of bytes from start of packet in source
636 				  * buffer.
637 				  */
638 				uint32_t length;
639 				 /**< The message length, in bytes, of the source buffer
640 				  * on which the cryptographic operation will be
641 				  * computed. This must be a multiple of the block size
642 				  */
643 			} data; /**< Data offsets and length for AEAD */
644 			struct {
645 				uint8_t *data;
646 				/**< This points to the location where the digest result
647 				 * should be inserted (in the case of digest generation)
648 				 * or where the purported digest exists (in the case of
649 				 * digest verification).
650 				 *
651 				 * At session creation time, the client specified the
652 				 * digest result length with the digest_length member
653 				 * of the @ref rte_crypto_auth_xform structure. For
654 				 * physical crypto devices the caller must allocate at
655 				 * least digest_length of physically contiguous memory
656 				 * at this location.
657 				 *
658 				 * For digest generation, the digest result will
659 				 * overwrite any data at this location.
660 				 *
661 				 * @note
662 				 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
663 				 * "digest result" read "authentication tag T".
664 				 */
665 				rte_iova_t phys_addr;
666 				/**< Physical address of digest */
667 			} digest; /**< Digest parameters */
668 			struct {
669 				uint8_t *data;
670 				/**< Pointer to Additional Authenticated Data (AAD)
671 				 * needed for authenticated cipher mechanisms (CCM and
672 				 * GCM)
673 				 *
674 				 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
675 				 * the caller should setup this field as follows:
676 				 *
677 				 * - the additional authentication data itself should
678 				 * be written starting at an offset of 18 bytes into
679 				 * the array, leaving room for the first block (16 bytes)
680 				 * and the length encoding in the first two bytes of the
681 				 * second block.
682 				 *
683 				 * - the array should be big enough to hold the above
684 				 * fields, plus any padding to round this up to the
685 				 * nearest multiple of the block size (16 bytes).
686 				 * Padding will be added by the implementation.
687 				 *
688 				 * - Note that PMDs may modify the memory reserved
689 				 * (first 18 bytes and the final padding).
690 				 *
691 				 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
692 				 * caller should setup this field as follows:
693 				 *
694 				 * - the AAD is written in starting at byte 0
695 				 * - the array must be big enough to hold the AAD, plus
696 				 * any space to round this up to the nearest multiple
697 				 * of the block size (16 bytes).
698 				 *
699 				 */
700 				rte_iova_t phys_addr;	/**< physical address */
701 			} aad;
702 			/**< Additional authentication parameters */
703 		} aead;
704 
705 		struct {
706 			struct {
707 				struct {
708 					uint32_t offset;
709 					 /**< Starting point for cipher processing,
710 					  * specified as number of bytes from start
711 					  * of data in the source buffer.
712 					  * The result of the cipher operation will be
713 					  * written back into the output buffer
714 					  * starting at this location.
715 					  *
716 					  * @note
717 					  * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
718 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
719 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
720 					  * this field should be in bits. For
721 					  * digest-encrypted cases this must be
722 					  * an 8-bit multiple.
723 					  */
724 					uint32_t length;
725 					 /**< The message length, in bytes, of the
726 					  * source buffer on which the cryptographic
727 					  * operation will be computed.
728 					  * This is also the same as the result length.
729 					  * This must be a multiple of the block size
730 					  * or a multiple of data-unit length
731 					  * as described in xform.
732 					  *
733 					  * @note
734 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
735 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
736 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
737 					  * this field should be in bits. For
738 					  * digest-encrypted cases this must be
739 					  * an 8-bit multiple.
740 					  */
741 				} data; /**< Data offsets and length for ciphering */
742 			} cipher;
743 
744 			struct {
745 				struct {
746 					uint32_t offset;
747 					 /**< Starting point for hash processing,
748 					  * specified as number of bytes from start of
749 					  * packet in source buffer.
750 					  *
751 					  * @note
752 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
753 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
754 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
755 					  * this field should be in bits. For
756 					  * digest-encrypted cases this must be
757 					  * an 8-bit multiple.
758 					  *
759 					  * @note
760 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
761 					  * this offset should be such that
762 					  * data to authenticate starts at COUNT.
763 					  *
764 					  * @note
765 					  * For DOCSIS security protocol, this
766 					  * offset is the DOCSIS header length
767 					  * and, therefore, also the CRC offset
768 					  * i.e. the number of bytes into the
769 					  * packet at which CRC calculation
770 					  * should begin.
771 					  */
772 					uint32_t length;
773 					 /**< The message length, in bytes, of the source
774 					  * buffer that the hash will be computed on.
775 					  *
776 					  * @note
777 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
778 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
779 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
780 					  * this field should be in bits. For
781 					  * digest-encrypted cases this must be
782 					  * an 8-bit multiple.
783 					  *
784 					  * @note
785 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
786 					  * the length should include the COUNT,
787 					  * FRESH, message, direction bit and padding
788 					  * (to be multiple of 8 bits).
789 					  *
790 					  * @note
791 					  * For DOCSIS security protocol, this
792 					  * is the CRC length i.e. the number of
793 					  * bytes in the packet over which the
794 					  * CRC should be calculated
795 					  */
796 				} data;
797 				/**< Data offsets and length for authentication */
798 
799 				struct {
800 					uint8_t *data;
801 					/**< This points to the location where
802 					 * the digest result should be inserted
803 					 * (in the case of digest generation)
804 					 * or where the purported digest exists
805 					 * (in the case of digest verification).
806 					 *
807 					 * At session creation time, the client
808 					 * specified the digest result length with
809 					 * the digest_length member of the
810 					 * @ref rte_crypto_auth_xform structure.
811 					 * For physical crypto devices the caller
812 					 * must allocate at least digest_length of
813 					 * physically contiguous memory at this
814 					 * location.
815 					 *
816 					 * For digest generation, the digest result
817 					 * will overwrite any data at this location.
818 					 *
819 					 * @note
820 					 * Digest-encrypted case.
821 					 * Digest can be generated, appended to
822 					 * the end of raw data and encrypted
823 					 * together using chained digest
824 					 * generation
825 					 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
826 					 * and encryption
827 					 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
828 					 * xforms. Similarly, authentication
829 					 * of the raw data against appended,
830 					 * decrypted digest, can be performed
831 					 * using decryption
832 					 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
833 					 * and digest verification
834 					 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
835 					 * chained xforms.
836 					 * To perform those operations, a few
837 					 * additional conditions must be met:
838 					 * - caller must allocate at least
839 					 * digest_length of memory at the end of
840 					 * source and (in case of out-of-place
841 					 * operations) destination buffer; those
842 					 * buffers can be linear or split using
843 					 * scatter-gather lists,
844 					 * - digest data pointer must point to
845 					 * the end of source or (in case of
846 					 * out-of-place operations) destination
847 					 * data, which is pointer to the
848 					 * data buffer + auth.data.offset +
849 					 * auth.data.length,
850 					 * - cipher.data.offset +
851 					 * cipher.data.length must be greater
852 					 * than auth.data.offset +
853 					 * auth.data.length and is typically
854 					 * equal to auth.data.offset +
855 					 * auth.data.length + digest_length.
856 					 * - for wireless algorithms, i.e.
857 					 * SNOW 3G, KASUMI and ZUC, as the
858 					 * cipher.data.length,
859 					 * cipher.data.offset,
860 					 * auth.data.length and
861 					 * auth.data.offset are in bits, they
862 					 * must be 8-bit multiples.
863 					 *
864 					 * Note, that for security reasons, it
865 					 * is PMDs' responsibility to not
866 					 * leave an unencrypted digest in any
867 					 * buffer after performing auth-cipher
868 					 * operations.
869 					 *
870 					 */
871 					rte_iova_t phys_addr;
872 					/**< Physical address of digest */
873 				} digest; /**< Digest parameters */
874 			} auth;
875 		};
876 	};
877 };
878 
879 
880 /**
881  * Reset the fields of a symmetric operation to their default values.
882  *
883  * @param	op	The crypto operation to be reset.
884  */
885 static inline void
886 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
887 {
888 	memset(op, 0, sizeof(*op));
889 }
890 
891 
892 /**
893  * Allocate space for symmetric crypto xforms in the private data space of the
894  * crypto operation. This also defaults the crypto xform type to
895  * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
896  * in the crypto operation
897  *
898  * @return
899  * - On success returns pointer to first crypto xform in crypto operations chain
900  * - On failure returns NULL
901  */
902 static inline struct rte_crypto_sym_xform *
903 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
904 		void *priv_data, uint8_t nb_xforms)
905 {
906 	struct rte_crypto_sym_xform *xform;
907 
908 	sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
909 
910 	do {
911 		xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
912 		xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
913 	} while (xform);
914 
915 	return sym_op->xform;
916 }
917 
918 
919 /**
920  * Attach a session to a symmetric crypto operation
921  *
922  * @param	sym_op	crypto operation
923  * @param	sess	cryptodev session
924  */
925 static inline int
926 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
927 		struct rte_cryptodev_sym_session *sess)
928 {
929 	sym_op->session = sess;
930 
931 	return 0;
932 }
933 
934 /**
935  * Converts portion of mbuf data into a vector representation.
936  * Each segment will be represented as a separate entry in *vec* array.
937  * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
938  * @param mb
939  *   Pointer to the *rte_mbuf* object.
940  * @param ofs
941  *   Offset within mbuf data to start with.
942  * @param len
943  *   Length of data to represent.
944  * @param vec
945  *   Pointer to an output array of IO vectors.
946  * @param num
947  *   Size of an output array.
948  * @return
949  *   - number of successfully filled entries in *vec* array.
950  *   - negative number of elements in *vec* array required.
951  */
952 __rte_experimental
953 static inline int
954 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
955 	struct rte_crypto_vec vec[], uint32_t num)
956 {
957 	uint32_t i;
958 	struct rte_mbuf *nseg;
959 	uint32_t left;
960 	uint32_t seglen;
961 
962 	/* assuming that requested data starts in the first segment */
963 	RTE_ASSERT(mb->data_len > ofs);
964 
965 	if (mb->nb_segs > num)
966 		return -mb->nb_segs;
967 
968 	vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
969 	vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
970 	vec[0].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
971 
972 	/* whole data lies in the first segment */
973 	seglen = mb->data_len - ofs;
974 	if (len <= seglen) {
975 		vec[0].len = len;
976 		return 1;
977 	}
978 
979 	/* data spread across segments */
980 	vec[0].len = seglen;
981 	left = len - seglen;
982 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
983 
984 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
985 		vec[i].iova = rte_pktmbuf_iova(nseg);
986 		vec[i].tot_len = mb->buf_len - rte_pktmbuf_headroom(mb) - ofs;
987 
988 		seglen = nseg->data_len;
989 		if (left <= seglen) {
990 			/* whole requested data is completed */
991 			vec[i].len = left;
992 			left = 0;
993 			i++;
994 			break;
995 		}
996 
997 		/* use whole segment */
998 		vec[i].len = seglen;
999 		left -= seglen;
1000 	}
1001 
1002 	RTE_ASSERT(left == 0);
1003 	return i;
1004 }
1005 
1006 
1007 #ifdef __cplusplus
1008 }
1009 #endif
1010 
1011 #endif /* _RTE_CRYPTO_SYM_H_ */
1012