xref: /dpdk/lib/cryptodev/rte_crypto_sym.h (revision 99f9d799ce21ab22e922ffec8aad51d56e24d04d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #ifndef _RTE_CRYPTO_SYM_H_
6 #define _RTE_CRYPTO_SYM_H_
7 
8 /**
9  * @file rte_crypto_sym.h
10  *
11  * RTE Definitions for Symmetric Cryptography
12  *
13  * Defines symmetric cipher and authentication algorithms and modes, as well
14  * as supported symmetric crypto operation combinations.
15  */
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #include <string.h>
22 
23 #include <rte_mbuf.h>
24 #include <rte_memory.h>
25 #include <rte_mempool.h>
26 #include <rte_common.h>
27 
28 /**
29  * Crypto IO Vector (in analogy with struct iovec)
30  * Supposed be used to pass input/output data buffers for crypto data-path
31  * functions.
32  */
33 struct rte_crypto_vec {
34 	/** virtual address of the data buffer */
35 	void *base;
36 	/** IOVA of the data buffer */
37 	rte_iova_t iova;
38 	/** length of the data buffer */
39 	uint32_t len;
40 };
41 
42 /**
43  * Crypto scatter-gather list descriptor. Consists of a pointer to an array
44  * of Crypto IO vectors with its size.
45  */
46 struct rte_crypto_sgl {
47 	/** start of an array of vectors */
48 	struct rte_crypto_vec *vec;
49 	/** size of an array of vectors */
50 	uint32_t num;
51 };
52 
53 /**
54  * Crypto virtual and IOVA address descriptor, used to describe cryptographic
55  * data buffer without the length information. The length information is
56  * normally predefined during session creation.
57  */
58 struct rte_crypto_va_iova_ptr {
59 	void *va;
60 	rte_iova_t iova;
61 };
62 
63 /**
64  * Raw data operation descriptor.
65  * Supposed to be used with synchronous CPU crypto API call or asynchronous
66  * RAW data path API call.
67  */
68 struct rte_crypto_sym_vec {
69 	/** number of operations to perform */
70 	uint32_t num;
71 	/** array of SGL vectors */
72 	struct rte_crypto_sgl *sgl;
73 	/** array of pointers to cipher IV */
74 	struct rte_crypto_va_iova_ptr *iv;
75 	/** array of pointers to digest */
76 	struct rte_crypto_va_iova_ptr *digest;
77 
78 	__extension__
79 	union {
80 		/** array of pointers to auth IV, used for chain operation */
81 		struct rte_crypto_va_iova_ptr *auth_iv;
82 		/** array of pointers to AAD, used for AEAD operation */
83 		struct rte_crypto_va_iova_ptr *aad;
84 	};
85 
86 	/**
87 	 * array of statuses for each operation:
88 	 * - 0 on success
89 	 * - errno on error
90 	 */
91 	int32_t *status;
92 };
93 
94 /**
95  * used for cpu_crypto_process_bulk() to specify head/tail offsets
96  * for auth/cipher processing.
97  */
98 union rte_crypto_sym_ofs {
99 	uint64_t raw;
100 	struct {
101 		struct {
102 			uint16_t head;
103 			uint16_t tail;
104 		} auth, cipher;
105 	} ofs;
106 };
107 
108 /** Symmetric Cipher Algorithms
109  *
110  * Note, to avoid ABI breakage across releases
111  * - LIST_END should not be added to this enum
112  * - the order of enums should not be changed
113  * - new algorithms should only be added to the end
114  */
115 enum rte_crypto_cipher_algorithm {
116 	RTE_CRYPTO_CIPHER_NULL = 1,
117 	/**< NULL cipher algorithm. No mode applies to the NULL algorithm. */
118 
119 	RTE_CRYPTO_CIPHER_3DES_CBC,
120 	/**< Triple DES algorithm in CBC mode */
121 	RTE_CRYPTO_CIPHER_3DES_CTR,
122 	/**< Triple DES algorithm in CTR mode */
123 	RTE_CRYPTO_CIPHER_3DES_ECB,
124 	/**< Triple DES algorithm in ECB mode */
125 
126 	RTE_CRYPTO_CIPHER_AES_CBC,
127 	/**< AES algorithm in CBC mode */
128 	RTE_CRYPTO_CIPHER_AES_CTR,
129 	/**< AES algorithm in Counter mode */
130 	RTE_CRYPTO_CIPHER_AES_ECB,
131 	/**< AES algorithm in ECB mode */
132 	RTE_CRYPTO_CIPHER_AES_F8,
133 	/**< AES algorithm in F8 mode */
134 	RTE_CRYPTO_CIPHER_AES_XTS,
135 	/**< AES algorithm in XTS mode */
136 
137 	RTE_CRYPTO_CIPHER_ARC4,
138 	/**< (A)RC4 cipher algorithm */
139 
140 	RTE_CRYPTO_CIPHER_KASUMI_F8,
141 	/**< KASUMI algorithm in F8 mode */
142 
143 	RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
144 	/**< SNOW 3G algorithm in UEA2 mode */
145 
146 	RTE_CRYPTO_CIPHER_ZUC_EEA3,
147 	/**< ZUC algorithm in EEA3 mode */
148 
149 	RTE_CRYPTO_CIPHER_DES_CBC,
150 	/**< DES algorithm in CBC mode */
151 
152 	RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
153 	/**< AES algorithm using modes required by
154 	 * DOCSIS Baseline Privacy Plus Spec.
155 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
156 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
157 	 */
158 
159 	RTE_CRYPTO_CIPHER_DES_DOCSISBPI
160 	/**< DES algorithm using modes required by
161 	 * DOCSIS Baseline Privacy Plus Spec.
162 	 * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
163 	 * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
164 	 */
165 };
166 
167 /** Cipher algorithm name strings */
168 extern const char *
169 rte_crypto_cipher_algorithm_strings[];
170 
171 /** Symmetric Cipher Direction */
172 enum rte_crypto_cipher_operation {
173 	RTE_CRYPTO_CIPHER_OP_ENCRYPT,
174 	/**< Encrypt cipher operation */
175 	RTE_CRYPTO_CIPHER_OP_DECRYPT
176 	/**< Decrypt cipher operation */
177 };
178 
179 /** Cipher operation name strings */
180 extern const char *
181 rte_crypto_cipher_operation_strings[];
182 
183 /**
184  * Symmetric Cipher Setup Data.
185  *
186  * This structure contains data relating to Cipher (Encryption and Decryption)
187  *  use to create a session.
188  */
189 struct rte_crypto_cipher_xform {
190 	enum rte_crypto_cipher_operation op;
191 	/**< This parameter determines if the cipher operation is an encrypt or
192 	 * a decrypt operation. For the RC4 algorithm and the F8/CTR modes,
193 	 * only encrypt operations are valid.
194 	 */
195 	enum rte_crypto_cipher_algorithm algo;
196 	/**< Cipher algorithm */
197 
198 	RTE_STD_C11
199 	union { /* temporary anonymous union for ABI compatibility */
200 
201 	struct {
202 		const uint8_t *data;	/**< pointer to key data */
203 		uint16_t length;	/**< key length in bytes */
204 	} key;
205 	/**< Cipher key
206 	 *
207 	 * In case the PMD supports RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY, the
208 	 * original key data provided may be wrapped(encrypted) using key wrap
209 	 * algorithm such as AES key wrap (rfc3394) and hence length of the key
210 	 * may increase beyond the PMD advertised supported key size.
211 	 * PMD shall validate the key length and report EMSGSIZE error while
212 	 * configuring the session and application can skip checking the
213 	 * capability key length in such cases.
214 	 *
215 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will
216 	 * point to a concatenation of the AES encryption key followed by a
217 	 * keymask. As per RFC3711, the keymask should be padded with trailing
218 	 * bytes to match the length of the encryption key used.
219 	 *
220 	 * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes),
221 	 * 192 bits (24 bytes) or 256 bits (32 bytes).
222 	 *
223 	 * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length
224 	 * should be set to the combined length of the encryption key and the
225 	 * keymask. Since the keymask and the encryption key are the same size,
226 	 * key.length should be set to 2 x the AES encryption key length.
227 	 *
228 	 * For the AES-XTS mode of operation:
229 	 *  - Two keys must be provided and key.length refers to total length of
230 	 *    the two keys.
231 	 *  - key.data must point to the two keys concatenated together
232 	 *    (key1 || key2).
233 	 *  - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
234 	 *  - Both keys must have the same size.
235 	 **/
236 
237 	RTE_STD_C11
238 	struct { /* temporary anonymous struct for ABI compatibility */
239 		const uint8_t *_key_data; /* reserved for key.data union */
240 		uint16_t _key_length;     /* reserved for key.length union */
241 		/* next field can fill the padding hole */
242 
243 	uint16_t dataunit_len;
244 	/**< When RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS is enabled,
245 	 * this is the data-unit length of the algorithm,
246 	 * otherwise or when the value is 0, use the operation length.
247 	 * The value should be in the range defined by the dataunit_set field
248 	 * in the cipher capability.
249 	 *
250 	 * - For AES-XTS it is the size of data-unit, from IEEE Std 1619-2007.
251 	 * For-each data-unit in the operation, the tweak (IV) value is
252 	 * assigned consecutively starting from the operation assigned IV.
253 	 */
254 
255 	}; }; /* temporary struct nested in union for ABI compatibility */
256 
257 	struct {
258 		uint16_t offset;
259 		/**< Starting point for Initialisation Vector or Counter,
260 		 * specified as number of bytes from start of crypto
261 		 * operation (rte_crypto_op).
262 		 *
263 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
264 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
265 		 * Initialisation Vector (IV) value.
266 		 *
267 		 * - For block ciphers in CTR mode, this is the counter.
268 		 *
269 		 * - For CCM mode, the first byte is reserved, and the
270 		 * nonce should be written starting at &iv[1] (to allow
271 		 * space for the implementation to write in the flags
272 		 * in the first byte). Note that a full 16 bytes should
273 		 * be allocated, even though the length field will
274 		 * have a value less than this. Note that the PMDs may
275 		 * modify the memory reserved (the first byte and the
276 		 * final padding)
277 		 *
278 		 * - For AES-XTS, this is the 128bit tweak, i, from
279 		 * IEEE Std 1619-2007.
280 		 *
281 		 * For optimum performance, the data pointed to SHOULD
282 		 * be 8-byte aligned.
283 		 */
284 		uint16_t length;
285 		/**< Length of valid IV data.
286 		 *
287 		 * - For block ciphers in CBC or F8 mode, or for KASUMI
288 		 * in F8 mode, or for SNOW 3G in UEA2 mode, this is the
289 		 * length of the IV (which must be the same as the
290 		 * block length of the cipher).
291 		 *
292 		 * - For block ciphers in CTR mode, this is the length
293 		 * of the counter (which must be the same as the block
294 		 * length of the cipher).
295 		 *
296 		 * - For CCM mode, this is the length of the nonce,
297 		 * which can be in the range 7 to 13 inclusive.
298 		 */
299 	} iv;	/**< Initialisation vector parameters */
300 };
301 
302 /** Symmetric Authentication / Hash Algorithms
303  *
304  * Note, to avoid ABI breakage across releases
305  * - LIST_END should not be added to this enum
306  * - the order of enums should not be changed
307  * - new algorithms should only be added to the end
308  */
309 enum rte_crypto_auth_algorithm {
310 	RTE_CRYPTO_AUTH_NULL = 1,
311 	/**< NULL hash algorithm. */
312 
313 	RTE_CRYPTO_AUTH_AES_CBC_MAC,
314 	/**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */
315 	RTE_CRYPTO_AUTH_AES_CMAC,
316 	/**< AES CMAC algorithm. */
317 	RTE_CRYPTO_AUTH_AES_GMAC,
318 	/**< AES GMAC algorithm. */
319 	RTE_CRYPTO_AUTH_AES_XCBC_MAC,
320 	/**< AES XCBC algorithm. */
321 
322 	RTE_CRYPTO_AUTH_KASUMI_F9,
323 	/**< KASUMI algorithm in F9 mode. */
324 
325 	RTE_CRYPTO_AUTH_MD5,
326 	/**< MD5 algorithm */
327 	RTE_CRYPTO_AUTH_MD5_HMAC,
328 	/**< HMAC using MD5 algorithm */
329 
330 	RTE_CRYPTO_AUTH_SHA1,
331 	/**< 160 bit SHA algorithm. */
332 	RTE_CRYPTO_AUTH_SHA1_HMAC,
333 	/**< HMAC using 160 bit SHA algorithm.
334 	 * HMAC-SHA-1-96 can be generated by setting
335 	 * digest_length to 12 bytes in auth/aead xforms.
336 	 */
337 	RTE_CRYPTO_AUTH_SHA224,
338 	/**< 224 bit SHA algorithm. */
339 	RTE_CRYPTO_AUTH_SHA224_HMAC,
340 	/**< HMAC using 224 bit SHA algorithm. */
341 	RTE_CRYPTO_AUTH_SHA256,
342 	/**< 256 bit SHA algorithm. */
343 	RTE_CRYPTO_AUTH_SHA256_HMAC,
344 	/**< HMAC using 256 bit SHA algorithm. */
345 	RTE_CRYPTO_AUTH_SHA384,
346 	/**< 384 bit SHA algorithm. */
347 	RTE_CRYPTO_AUTH_SHA384_HMAC,
348 	/**< HMAC using 384 bit SHA algorithm. */
349 	RTE_CRYPTO_AUTH_SHA512,
350 	/**< 512 bit SHA algorithm. */
351 	RTE_CRYPTO_AUTH_SHA512_HMAC,
352 	/**< HMAC using 512 bit SHA algorithm. */
353 
354 	RTE_CRYPTO_AUTH_SNOW3G_UIA2,
355 	/**< SNOW 3G algorithm in UIA2 mode. */
356 
357 	RTE_CRYPTO_AUTH_ZUC_EIA3,
358 	/**< ZUC algorithm in EIA3 mode */
359 
360 	RTE_CRYPTO_AUTH_SHA3_224,
361 	/**< 224 bit SHA3 algorithm. */
362 	RTE_CRYPTO_AUTH_SHA3_224_HMAC,
363 	/**< HMAC using 224 bit SHA3 algorithm. */
364 	RTE_CRYPTO_AUTH_SHA3_256,
365 	/**< 256 bit SHA3 algorithm. */
366 	RTE_CRYPTO_AUTH_SHA3_256_HMAC,
367 	/**< HMAC using 256 bit SHA3 algorithm. */
368 	RTE_CRYPTO_AUTH_SHA3_384,
369 	/**< 384 bit SHA3 algorithm. */
370 	RTE_CRYPTO_AUTH_SHA3_384_HMAC,
371 	/**< HMAC using 384 bit SHA3 algorithm. */
372 	RTE_CRYPTO_AUTH_SHA3_512,
373 	/**< 512 bit SHA3 algorithm. */
374 	RTE_CRYPTO_AUTH_SHA3_512_HMAC
375 	/**< HMAC using 512 bit SHA3 algorithm. */
376 };
377 
378 /** Authentication algorithm name strings */
379 extern const char *
380 rte_crypto_auth_algorithm_strings[];
381 
382 /** Symmetric Authentication / Hash Operations */
383 enum rte_crypto_auth_operation {
384 	RTE_CRYPTO_AUTH_OP_VERIFY,	/**< Verify authentication digest */
385 	RTE_CRYPTO_AUTH_OP_GENERATE	/**< Generate authentication digest */
386 };
387 
388 /** Authentication operation name strings */
389 extern const char *
390 rte_crypto_auth_operation_strings[];
391 
392 /**
393  * Authentication / Hash transform data.
394  *
395  * This structure contains data relating to an authentication/hash crypto
396  * transforms. The fields op, algo and digest_length are common to all
397  * authentication transforms and MUST be set.
398  */
399 struct rte_crypto_auth_xform {
400 	enum rte_crypto_auth_operation op;
401 	/**< Authentication operation type */
402 	enum rte_crypto_auth_algorithm algo;
403 	/**< Authentication algorithm selection */
404 
405 	struct {
406 		const uint8_t *data;	/**< pointer to key data */
407 		uint16_t length;	/**< key length in bytes */
408 	} key;
409 	/**< Authentication key data.
410 	 * The authentication key length MUST be less than or equal to the
411 	 * block size of the algorithm. It is the callers responsibility to
412 	 * ensure that the key length is compliant with the standard being used
413 	 * (for example RFC 2104, FIPS 198a).
414 	 */
415 
416 	struct {
417 		uint16_t offset;
418 		/**< Starting point for Initialisation Vector or Counter,
419 		 * specified as number of bytes from start of crypto
420 		 * operation (rte_crypto_op).
421 		 *
422 		 * - For SNOW 3G in UIA2 mode, for ZUC in EIA3 mode
423 		 *   this is the authentication Initialisation Vector
424 		 *   (IV) value. For AES-GMAC IV description please refer
425 		 *   to the field `length` in iv struct.
426 		 *
427 		 * - For KASUMI in F9 mode and other authentication
428 		 *   algorithms, this field is not used.
429 		 *
430 		 * For optimum performance, the data pointed to SHOULD
431 		 * be 8-byte aligned.
432 		 */
433 		uint16_t length;
434 		/**< Length of valid IV data.
435 		 *
436 		 * - For SNOW3G in UIA2 mode, for ZUC in EIA3 mode and
437 		 *   for AES-GMAC, this is the length of the IV.
438 		 *
439 		 * - For KASUMI in F9 mode and other authentication
440 		 *   algorithms, this field is not used.
441 		 *
442 		 * - For GMAC mode, this is either:
443 		 * 1) Number greater or equal to one, which means that IV
444 		 *    is used and J0 will be computed internally, a minimum
445 		 *    of 16 bytes must be allocated.
446 		 * 2) Zero, in which case data points to J0. In this case
447 		 *    16 bytes of J0 should be passed where J0 is defined
448 		 *    by NIST SP800-38D.
449 		 *
450 		 */
451 	} iv;	/**< Initialisation vector parameters */
452 
453 	uint16_t digest_length;
454 	/**< Length of the digest to be returned. If the verify option is set,
455 	 * this specifies the length of the digest to be compared for the
456 	 * session.
457 	 *
458 	 * It is the caller's responsibility to ensure that the
459 	 * digest length is compliant with the hash algorithm being used.
460 	 * If the value is less than the maximum length allowed by the hash,
461 	 * the result shall be truncated.
462 	 */
463 };
464 
465 
466 /** Symmetric AEAD Algorithms
467  *
468  * Note, to avoid ABI breakage across releases
469  * - LIST_END should not be added to this enum
470  * - the order of enums should not be changed
471  * - new algorithms should only be added to the end
472  */
473 enum rte_crypto_aead_algorithm {
474 	RTE_CRYPTO_AEAD_AES_CCM = 1,
475 	/**< AES algorithm in CCM mode. */
476 	RTE_CRYPTO_AEAD_AES_GCM,
477 	/**< AES algorithm in GCM mode. */
478 	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
479 	/**< Chacha20 cipher with poly1305 authenticator */
480 };
481 
482 /** AEAD algorithm name strings */
483 extern const char *
484 rte_crypto_aead_algorithm_strings[];
485 
486 /** Symmetric AEAD Operations */
487 enum rte_crypto_aead_operation {
488 	RTE_CRYPTO_AEAD_OP_ENCRYPT,
489 	/**< Encrypt and generate digest */
490 	RTE_CRYPTO_AEAD_OP_DECRYPT
491 	/**< Verify digest and decrypt */
492 };
493 
494 /** Authentication operation name strings */
495 extern const char *
496 rte_crypto_aead_operation_strings[];
497 
498 struct rte_crypto_aead_xform {
499 	enum rte_crypto_aead_operation op;
500 	/**< AEAD operation type */
501 	enum rte_crypto_aead_algorithm algo;
502 	/**< AEAD algorithm selection */
503 
504 	struct {
505 		const uint8_t *data;	/**< pointer to key data */
506 		uint16_t length;	/**< key length in bytes */
507 	} key;
508 
509 	struct {
510 		uint16_t offset;
511 		/**< Starting point for Initialisation Vector or Counter,
512 		 * specified as number of bytes from start of crypto
513 		 * operation (rte_crypto_op).
514 		 *
515 		 * - For CCM mode, the first byte is reserved, and the
516 		 * nonce should be written starting at &iv[1] (to allow
517 		 * space for the implementation to write in the flags
518 		 * in the first byte). Note that a full 16 bytes should
519 		 * be allocated, even though the length field will
520 		 * have a value less than this.
521 		 *
522 		 * - For Chacha20-Poly1305 it is 96-bit nonce.
523 		 * PMD sets initial counter for Poly1305 key generation
524 		 * part to 0 and for Chacha20 encryption to 1 as per
525 		 * rfc8439 2.8. AEAD construction.
526 		 *
527 		 * For optimum performance, the data pointed to SHOULD
528 		 * be 8-byte aligned.
529 		 */
530 		uint16_t length;
531 		/**< Length of valid IV data.
532 		 *
533 		 * - For GCM mode, this is either:
534 		 * 1) Number greater or equal to one, which means that IV
535 		 *    is used and J0 will be computed internally, a minimum
536 		 *    of 16 bytes must be allocated.
537 		 * 2) Zero, in which case data points to J0. In this case
538 		 *    16 bytes of J0 should be passed where J0 is defined
539 		 *    by NIST SP800-38D.
540 		 *
541 		 * - For CCM mode, this is the length of the nonce,
542 		 * which can be in the range 7 to 13 inclusive.
543 		 *
544 		 * - For Chacha20-Poly1305 this field is always 12.
545 		 */
546 	} iv;	/**< Initialisation vector parameters */
547 
548 	uint16_t digest_length;
549 
550 	uint16_t aad_length;
551 	/**< The length of the additional authenticated data (AAD) in bytes.
552 	 * For CCM mode, this is the length of the actual AAD, even though
553 	 * it is required to reserve 18 bytes before the AAD and padding
554 	 * at the end of it, so a multiple of 16 bytes is allocated.
555 	 */
556 };
557 
558 /** Crypto transformation types */
559 enum rte_crypto_sym_xform_type {
560 	RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0,	/**< No xform specified */
561 	RTE_CRYPTO_SYM_XFORM_AUTH,		/**< Authentication xform */
562 	RTE_CRYPTO_SYM_XFORM_CIPHER,		/**< Cipher xform  */
563 	RTE_CRYPTO_SYM_XFORM_AEAD		/**< AEAD xform  */
564 };
565 
566 /**
567  * Symmetric crypto transform structure.
568  *
569  * This is used to specify the crypto transforms required, multiple transforms
570  * can be chained together to specify a chain transforms such as authentication
571  * then cipher, or cipher then authentication. Each transform structure can
572  * hold a single transform, the type field is used to specify which transform
573  * is contained within the union
574  */
575 struct rte_crypto_sym_xform {
576 	struct rte_crypto_sym_xform *next;
577 	/**< next xform in chain */
578 	enum rte_crypto_sym_xform_type type
579 	; /**< xform type */
580 	RTE_STD_C11
581 	union {
582 		struct rte_crypto_auth_xform auth;
583 		/**< Authentication / hash xform */
584 		struct rte_crypto_cipher_xform cipher;
585 		/**< Cipher xform */
586 		struct rte_crypto_aead_xform aead;
587 		/**< AEAD xform */
588 	};
589 };
590 
591 struct rte_cryptodev_sym_session;
592 
593 /**
594  * Symmetric Cryptographic Operation.
595  *
596  * This structure contains data relating to performing symmetric cryptographic
597  * processing on a referenced mbuf data buffer.
598  *
599  * When a symmetric crypto operation is enqueued with the device for processing
600  * it must have a valid *rte_mbuf* structure attached, via m_src parameter,
601  * which contains the source data which the crypto operation is to be performed
602  * on.
603  * While the mbuf is in use by a crypto operation no part of the mbuf should be
604  * changed by the application as the device may read or write to any part of the
605  * mbuf. In the case of hardware crypto devices some or all of the mbuf
606  * may be DMAed in and out of the device, so writing over the original data,
607  * though only the part specified by the rte_crypto_sym_op for transformation
608  * will be changed.
609  * Out-of-place (OOP) operation, where the source mbuf is different to the
610  * destination mbuf, is a special case. Data will be copied from m_src to m_dst.
611  * The part copied includes all the parts of the source mbuf that will be
612  * operated on, based on the cipher.data.offset+cipher.data.length and
613  * auth.data.offset+auth.data.length values in the rte_crypto_sym_op. The part
614  * indicated by the cipher parameters will be transformed, any extra data around
615  * this indicated by the auth parameters will be copied unchanged from source to
616  * destination mbuf.
617  * Also in OOP operation the cipher.data.offset and auth.data.offset apply to
618  * both source and destination mbufs. As these offsets are relative to the
619  * data_off parameter in each mbuf this can result in the data written to the
620  * destination buffer being at a different alignment, relative to buffer start,
621  * to the data in the source buffer.
622  */
623 struct rte_crypto_sym_op {
624 	struct rte_mbuf *m_src;	/**< source mbuf */
625 	struct rte_mbuf *m_dst;	/**< destination mbuf */
626 
627 	RTE_STD_C11
628 	union {
629 		struct rte_cryptodev_sym_session *session;
630 		/**< Handle for the initialised session context */
631 		struct rte_crypto_sym_xform *xform;
632 		/**< Session-less API crypto operation parameters */
633 		struct rte_security_session *sec_session;
634 		/**< Handle for the initialised security session context */
635 	};
636 
637 	RTE_STD_C11
638 	union {
639 		struct {
640 			struct {
641 				uint32_t offset;
642 				 /**< Starting point for AEAD processing, specified as
643 				  * number of bytes from start of packet in source
644 				  * buffer.
645 				  */
646 				uint32_t length;
647 				 /**< The message length, in bytes, of the source buffer
648 				  * on which the cryptographic operation will be
649 				  * computed. This must be a multiple of the block size
650 				  */
651 			} data; /**< Data offsets and length for AEAD */
652 			struct {
653 				uint8_t *data;
654 				/**< This points to the location where the digest result
655 				 * should be inserted (in the case of digest generation)
656 				 * or where the purported digest exists (in the case of
657 				 * digest verification).
658 				 *
659 				 * At session creation time, the client specified the
660 				 * digest result length with the digest_length member
661 				 * of the @ref rte_crypto_auth_xform structure. For
662 				 * physical crypto devices the caller must allocate at
663 				 * least digest_length of physically contiguous memory
664 				 * at this location.
665 				 *
666 				 * For digest generation, the digest result will
667 				 * overwrite any data at this location.
668 				 *
669 				 * @note
670 				 * For GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), for
671 				 * "digest result" read "authentication tag T".
672 				 */
673 				rte_iova_t phys_addr;
674 				/**< Physical address of digest */
675 			} digest; /**< Digest parameters */
676 			struct {
677 				uint8_t *data;
678 				/**< Pointer to Additional Authenticated Data (AAD)
679 				 * needed for authenticated cipher mechanisms (CCM and
680 				 * GCM)
681 				 *
682 				 * Specifically for CCM (@ref RTE_CRYPTO_AEAD_AES_CCM),
683 				 * the caller should setup this field as follows:
684 				 *
685 				 * - the additional authentication data itself should
686 				 * be written starting at an offset of 18 bytes into
687 				 * the array, leaving room for the first block (16 bytes)
688 				 * and the length encoding in the first two bytes of the
689 				 * second block.
690 				 *
691 				 * - the array should be big enough to hold the above
692 				 * fields, plus any padding to round this up to the
693 				 * nearest multiple of the block size (16 bytes).
694 				 * Padding will be added by the implementation.
695 				 *
696 				 * - Note that PMDs may modify the memory reserved
697 				 * (first 18 bytes and the final padding).
698 				 *
699 				 * Finally, for GCM (@ref RTE_CRYPTO_AEAD_AES_GCM), the
700 				 * caller should setup this field as follows:
701 				 *
702 				 * - the AAD is written in starting at byte 0
703 				 * - the array must be big enough to hold the AAD, plus
704 				 * any space to round this up to the nearest multiple
705 				 * of the block size (16 bytes).
706 				 *
707 				 */
708 				rte_iova_t phys_addr;	/**< physical address */
709 			} aad;
710 			/**< Additional authentication parameters */
711 		} aead;
712 
713 		struct {
714 			struct {
715 				struct {
716 					uint32_t offset;
717 					 /**< Starting point for cipher processing,
718 					  * specified as number of bytes from start
719 					  * of data in the source buffer.
720 					  * The result of the cipher operation will be
721 					  * written back into the output buffer
722 					  * starting at this location.
723 					  *
724 					  * @note
725 					  * For SNOW 3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
726 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
727 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
728 					  * this field should be in bits. For
729 					  * digest-encrypted cases this must be
730 					  * an 8-bit multiple.
731 					  */
732 					uint32_t length;
733 					 /**< The message length, in bytes, of the
734 					  * source buffer on which the cryptographic
735 					  * operation will be computed.
736 					  * This is also the same as the result length.
737 					  * This must be a multiple of the block size
738 					  * or a multiple of data-unit length
739 					  * as described in xform.
740 					  *
741 					  * @note
742 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
743 					  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
744 					  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
745 					  * this field should be in bits. For
746 					  * digest-encrypted cases this must be
747 					  * an 8-bit multiple.
748 					  */
749 				} data; /**< Data offsets and length for ciphering */
750 			} cipher;
751 
752 			struct {
753 				struct {
754 					uint32_t offset;
755 					 /**< Starting point for hash processing,
756 					  * specified as number of bytes from start of
757 					  * packet in source buffer.
758 					  *
759 					  * @note
760 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
761 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
762 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
763 					  * this field should be in bits. For
764 					  * digest-encrypted cases this must be
765 					  * an 8-bit multiple.
766 					  *
767 					  * @note
768 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
769 					  * this offset should be such that
770 					  * data to authenticate starts at COUNT.
771 					  *
772 					  * @note
773 					  * For DOCSIS security protocol, this
774 					  * offset is the DOCSIS header length
775 					  * and, therefore, also the CRC offset
776 					  * i.e. the number of bytes into the
777 					  * packet at which CRC calculation
778 					  * should begin.
779 					  */
780 					uint32_t length;
781 					 /**< The message length, in bytes, of the source
782 					  * buffer that the hash will be computed on.
783 					  *
784 					  * @note
785 					  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
786 					  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
787 					  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
788 					  * this field should be in bits. For
789 					  * digest-encrypted cases this must be
790 					  * an 8-bit multiple.
791 					  *
792 					  * @note
793 					  * For KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
794 					  * the length should include the COUNT,
795 					  * FRESH, message, direction bit and padding
796 					  * (to be multiple of 8 bits).
797 					  *
798 					  * @note
799 					  * For DOCSIS security protocol, this
800 					  * is the CRC length i.e. the number of
801 					  * bytes in the packet over which the
802 					  * CRC should be calculated
803 					  */
804 				} data;
805 				/**< Data offsets and length for authentication */
806 
807 				struct {
808 					uint8_t *data;
809 					/**< This points to the location where
810 					 * the digest result should be inserted
811 					 * (in the case of digest generation)
812 					 * or where the purported digest exists
813 					 * (in the case of digest verification).
814 					 *
815 					 * At session creation time, the client
816 					 * specified the digest result length with
817 					 * the digest_length member of the
818 					 * @ref rte_crypto_auth_xform structure.
819 					 * For physical crypto devices the caller
820 					 * must allocate at least digest_length of
821 					 * physically contiguous memory at this
822 					 * location.
823 					 *
824 					 * For digest generation, the digest result
825 					 * will overwrite any data at this location.
826 					 *
827 					 * @note
828 					 * Digest-encrypted case.
829 					 * Digest can be generated, appended to
830 					 * the end of raw data and encrypted
831 					 * together using chained digest
832 					 * generation
833 					 * (@ref RTE_CRYPTO_AUTH_OP_GENERATE)
834 					 * and encryption
835 					 * (@ref RTE_CRYPTO_CIPHER_OP_ENCRYPT)
836 					 * xforms. Similarly, authentication
837 					 * of the raw data against appended,
838 					 * decrypted digest, can be performed
839 					 * using decryption
840 					 * (@ref RTE_CRYPTO_CIPHER_OP_DECRYPT)
841 					 * and digest verification
842 					 * (@ref RTE_CRYPTO_AUTH_OP_VERIFY)
843 					 * chained xforms.
844 					 * To perform those operations, a few
845 					 * additional conditions must be met:
846 					 * - caller must allocate at least
847 					 * digest_length of memory at the end of
848 					 * source and (in case of out-of-place
849 					 * operations) destination buffer; those
850 					 * buffers can be linear or split using
851 					 * scatter-gather lists,
852 					 * - digest data pointer must point to
853 					 * the end of source or (in case of
854 					 * out-of-place operations) destination
855 					 * data, which is pointer to the
856 					 * data buffer + auth.data.offset +
857 					 * auth.data.length,
858 					 * - cipher.data.offset +
859 					 * cipher.data.length must be greater
860 					 * than auth.data.offset +
861 					 * auth.data.length and is typically
862 					 * equal to auth.data.offset +
863 					 * auth.data.length + digest_length.
864 					 * - for wireless algorithms, i.e.
865 					 * SNOW 3G, KASUMI and ZUC, as the
866 					 * cipher.data.length,
867 					 * cipher.data.offset,
868 					 * auth.data.length and
869 					 * auth.data.offset are in bits, they
870 					 * must be 8-bit multiples.
871 					 *
872 					 * Note, that for security reasons, it
873 					 * is PMDs' responsibility to not
874 					 * leave an unencrypted digest in any
875 					 * buffer after performing auth-cipher
876 					 * operations.
877 					 *
878 					 */
879 					rte_iova_t phys_addr;
880 					/**< Physical address of digest */
881 				} digest; /**< Digest parameters */
882 			} auth;
883 		};
884 	};
885 };
886 
887 
888 /**
889  * Reset the fields of a symmetric operation to their default values.
890  *
891  * @param	op	The crypto operation to be reset.
892  */
893 static inline void
894 __rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
895 {
896 	memset(op, 0, sizeof(*op));
897 }
898 
899 
900 /**
901  * Allocate space for symmetric crypto xforms in the private data space of the
902  * crypto operation. This also defaults the crypto xform type to
903  * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms
904  * in the crypto operation
905  *
906  * @return
907  * - On success returns pointer to first crypto xform in crypto operations chain
908  * - On failure returns NULL
909  */
910 static inline struct rte_crypto_sym_xform *
911 __rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op,
912 		void *priv_data, uint8_t nb_xforms)
913 {
914 	struct rte_crypto_sym_xform *xform;
915 
916 	sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data;
917 
918 	do {
919 		xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED;
920 		xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
921 	} while (xform);
922 
923 	return sym_op->xform;
924 }
925 
926 
927 /**
928  * Attach a session to a symmetric crypto operation
929  *
930  * @param	sym_op	crypto operation
931  * @param	sess	cryptodev session
932  */
933 static inline int
934 __rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op,
935 		struct rte_cryptodev_sym_session *sess)
936 {
937 	sym_op->session = sess;
938 
939 	return 0;
940 }
941 
942 /**
943  * Converts portion of mbuf data into a vector representation.
944  * Each segment will be represented as a separate entry in *vec* array.
945  * Expects that provided *ofs* + *len* not to exceed mbuf's *pkt_len*.
946  * @param mb
947  *   Pointer to the *rte_mbuf* object.
948  * @param ofs
949  *   Offset within mbuf data to start with.
950  * @param len
951  *   Length of data to represent.
952  * @param vec
953  *   Pointer to an output array of IO vectors.
954  * @param num
955  *   Size of an output array.
956  * @return
957  *   - number of successfully filled entries in *vec* array.
958  *   - negative number of elements in *vec* array required.
959  */
960 __rte_experimental
961 static inline int
962 rte_crypto_mbuf_to_vec(const struct rte_mbuf *mb, uint32_t ofs, uint32_t len,
963 	struct rte_crypto_vec vec[], uint32_t num)
964 {
965 	uint32_t i;
966 	struct rte_mbuf *nseg;
967 	uint32_t left;
968 	uint32_t seglen;
969 
970 	/* assuming that requested data starts in the first segment */
971 	RTE_ASSERT(mb->data_len > ofs);
972 
973 	if (mb->nb_segs > num)
974 		return -mb->nb_segs;
975 
976 	vec[0].base = rte_pktmbuf_mtod_offset(mb, void *, ofs);
977 	vec[0].iova = rte_pktmbuf_iova_offset(mb, ofs);
978 
979 	/* whole data lies in the first segment */
980 	seglen = mb->data_len - ofs;
981 	if (len <= seglen) {
982 		vec[0].len = len;
983 		return 1;
984 	}
985 
986 	/* data spread across segments */
987 	vec[0].len = seglen;
988 	left = len - seglen;
989 	for (i = 1, nseg = mb->next; nseg != NULL; nseg = nseg->next, i++) {
990 
991 		vec[i].base = rte_pktmbuf_mtod(nseg, void *);
992 		vec[i].iova = rte_pktmbuf_iova(nseg);
993 
994 		seglen = nseg->data_len;
995 		if (left <= seglen) {
996 			/* whole requested data is completed */
997 			vec[i].len = left;
998 			left = 0;
999 			break;
1000 		}
1001 
1002 		/* use whole segment */
1003 		vec[i].len = seglen;
1004 		left -= seglen;
1005 	}
1006 
1007 	RTE_ASSERT(left == 0);
1008 	return i + 1;
1009 }
1010 
1011 
1012 #ifdef __cplusplus
1013 }
1014 #endif
1015 
1016 #endif /* _RTE_CRYPTO_SYM_H_ */
1017