xref: /dpdk/lib/ipsec/crypto.h (revision fba9875559906e04eaeb74532f4cfd51194259a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #ifndef _CRYPTO_H_
6 #define _CRYPTO_H_
7 
8 /**
9  * @file crypto.h
10  * Contains crypto specific functions/structures/macros used internally
11  * by ipsec library.
12  */
13 
14 /*
15  * AES-CTR counter block format.
16  */
17 
18 struct __rte_packed_begin aesctr_cnt_blk {
19 	uint32_t nonce;
20 	uint64_t iv;
21 	uint32_t cnt;
22 } __rte_packed_end;
23 
24  /*
25   * CHACHA20-POLY1305 devices have some specific requirements
26   * for IV and AAD formats.
27   * Ideally that to be done by the driver itself.
28   */
29 
30 struct __rte_packed_begin aead_chacha20_poly1305_iv {
31 	uint32_t salt;
32 	uint64_t iv;
33 	uint32_t cnt;
34 } __rte_packed_end;
35 
36 struct __rte_packed_begin aead_chacha20_poly1305_aad {
37 	uint32_t spi;
38 	/*
39 	 * RFC 4106, section 5:
40 	 * Two formats of the AAD are defined:
41 	 * one for 32-bit sequence numbers, and one for 64-bit ESN.
42 	 */
43 	union {
44 		uint32_t u32[2];
45 		uint64_t u64;
46 	} sqn;
47 	uint32_t align0; /* align to 16B boundary */
48 } __rte_packed_end;
49 
50 struct __rte_packed_begin chacha20_poly1305_esph_iv {
51 	struct rte_esp_hdr esph;
52 	uint64_t iv;
53 } __rte_packed_end;
54 
55  /*
56   * AES-GCM devices have some specific requirements for IV and AAD formats.
57   * Ideally that to be done by the driver itself.
58   */
59 
60 struct __rte_packed_begin aead_gcm_iv {
61 	uint32_t salt;
62 	uint64_t iv;
63 	uint32_t cnt;
64 } __rte_packed_end;
65 
66 struct __rte_packed_begin aead_gcm_aad {
67 	uint32_t spi;
68 	/*
69 	 * RFC 4106, section 5:
70 	 * Two formats of the AAD are defined:
71 	 * one for 32-bit sequence numbers, and one for 64-bit ESN.
72 	 */
73 	union {
74 		uint32_t u32[2];
75 		uint64_t u64;
76 	} sqn;
77 	uint32_t align0; /* align to 16B boundary */
78 } __rte_packed_end;
79 
80 struct __rte_packed_begin gcm_esph_iv {
81 	struct rte_esp_hdr esph;
82 	uint64_t iv;
83 } __rte_packed_end;
84 
85  /*
86   * AES-CCM devices have some specific requirements for IV and AAD formats.
87   * Ideally that to be done by the driver itself.
88   */
89 union __rte_packed_begin aead_ccm_salt {
90 	uint32_t salt;
91 	struct inner {
92 		uint8_t salt8[3];
93 		uint8_t ccm_flags;
94 	} inner;
95 } __rte_packed_end;
96 
97 
98 struct __rte_packed_begin aead_ccm_iv {
99 	uint8_t ccm_flags;
100 	uint8_t salt[3];
101 	uint64_t iv;
102 	uint32_t cnt;
103 } __rte_packed_end;
104 
105 struct __rte_packed_begin aead_ccm_aad {
106 	uint8_t padding[18];
107 	uint32_t spi;
108 	/*
109 	 * RFC 4309, section 5:
110 	 * Two formats of the AAD are defined:
111 	 * one for 32-bit sequence numbers, and one for 64-bit ESN.
112 	 */
113 	union {
114 		uint32_t u32[2];
115 		uint64_t u64;
116 	} sqn;
117 	uint32_t align0; /* align to 16B boundary */
118 } __rte_packed_end;
119 
120 struct __rte_packed_begin ccm_esph_iv {
121 	struct rte_esp_hdr esph;
122 	uint64_t iv;
123 } __rte_packed_end;
124 
125 
126 static inline void
127 aes_ctr_cnt_blk_fill(struct aesctr_cnt_blk *ctr, uint64_t iv, uint32_t nonce)
128 {
129 	ctr->nonce = nonce;
130 	ctr->iv = iv;
131 	ctr->cnt = rte_cpu_to_be_32(1);
132 }
133 
134 static inline void
135 aead_chacha20_poly1305_iv_fill(struct aead_chacha20_poly1305_iv
136 			       *chacha20_poly1305,
137 			       uint64_t iv, uint32_t salt)
138 {
139 	chacha20_poly1305->salt = salt;
140 	chacha20_poly1305->iv = iv;
141 	chacha20_poly1305->cnt = rte_cpu_to_be_32(1);
142 }
143 
144 static inline void
145 aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt)
146 {
147 	gcm->salt = salt;
148 	gcm->iv = iv;
149 	gcm->cnt = rte_cpu_to_be_32(1);
150 }
151 
152 static inline void
153 aead_ccm_iv_fill(struct aead_ccm_iv *ccm, uint64_t iv, uint32_t salt)
154 {
155 	union aead_ccm_salt tsalt;
156 
157 	tsalt.salt = salt;
158 	ccm->ccm_flags = tsalt.inner.ccm_flags;
159 	ccm->salt[0] = tsalt.inner.salt8[0];
160 	ccm->salt[1] = tsalt.inner.salt8[1];
161 	ccm->salt[2] = tsalt.inner.salt8[2];
162 	ccm->iv = iv;
163 	ccm->cnt = rte_cpu_to_be_32(1);
164 }
165 
166 
167 /*
168  * RFC 4106, 5 AAD Construction
169  * spi and sqn should already be converted into network byte order.
170  * Make sure that not used bytes are zeroed.
171  */
172 static inline void
173 aead_gcm_aad_fill(struct aead_gcm_aad *aad, rte_be32_t spi, rte_be64_t sqn,
174 	int esn)
175 {
176 	aad->spi = spi;
177 	if (esn)
178 		aad->sqn.u64 = sqn;
179 	else {
180 		aad->sqn.u32[0] = sqn_low32(sqn);
181 		aad->sqn.u32[1] = 0;
182 	}
183 	aad->align0 = 0;
184 }
185 
186 /*
187  * RFC 4309, 5 AAD Construction
188  * spi and sqn should already be converted into network byte order.
189  * Make sure that not used bytes are zeroed.
190  */
191 static inline void
192 aead_ccm_aad_fill(struct aead_ccm_aad *aad, rte_be32_t spi, rte_be64_t sqn,
193 	int esn)
194 {
195 	aad->spi = spi;
196 	if (esn)
197 		aad->sqn.u64 = sqn;
198 	else {
199 		aad->sqn.u32[0] = sqn_low32(sqn);
200 		aad->sqn.u32[1] = 0;
201 	}
202 	aad->align0 = 0;
203 }
204 
205 static inline void
206 gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], rte_be64_t sqn)
207 {
208 	iv[0] = sqn;
209 	iv[1] = 0;
210 }
211 
212 
213 /*
214  * RFC 7634, 2.1 AAD Construction
215  * spi and sqn should already be converted into network byte order.
216  * Make sure that not used bytes are zeroed.
217  */
218 static inline void
219 aead_chacha20_poly1305_aad_fill(struct aead_chacha20_poly1305_aad *aad,
220 					rte_be32_t spi, rte_be64_t sqn,
221 					int esn)
222 {
223 	aad->spi = spi;
224 	if (esn)
225 		aad->sqn.u64 = sqn;
226 	else {
227 		aad->sqn.u32[0] = sqn_low32(sqn);
228 		aad->sqn.u32[1] = 0;
229 	}
230 	aad->align0 = 0;
231 }
232 
233 /*
234  * Helper routine to copy IV
235  * Right now we support only algorithms with IV length equals 0/8/16 bytes.
236  */
237 static inline void
238 copy_iv(uint64_t dst[IPSEC_MAX_IV_QWORD],
239 	const uint64_t src[IPSEC_MAX_IV_QWORD], uint32_t len)
240 {
241 	RTE_BUILD_BUG_ON(IPSEC_MAX_IV_SIZE != 2 * sizeof(uint64_t));
242 
243 	switch (len) {
244 	case IPSEC_MAX_IV_SIZE:
245 		dst[1] = src[1];
246 		/* fallthrough */
247 	case sizeof(uint64_t):
248 		dst[0] = src[0];
249 		/* fallthrough */
250 	case 0:
251 		break;
252 	default:
253 		/* should never happen */
254 		RTE_ASSERT(NULL);
255 	}
256 }
257 
258 /*
259  * from RFC 4303 3.3.2.1.4:
260  * If the ESN option is enabled for the SA, the high-order 32
261  * bits of the sequence number are appended after the Next Header field
262  * for purposes of this computation, but are not transmitted.
263  */
264 
265 /*
266  * Helper function that moves ICV by 4B below, and inserts SQN.hibits.
267  * icv parameter points to the new start of ICV.
268  */
269 static inline void
270 insert_sqh(uint32_t sqh, void *picv, uint32_t icv_len)
271 {
272 	uint32_t *icv;
273 	int32_t i;
274 
275 	RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
276 
277 	icv = picv;
278 	icv_len = icv_len / sizeof(uint32_t);
279 	for (i = icv_len; i-- != 0; icv[i] = icv[i - 1])
280 		;
281 
282 	icv[i] = sqh;
283 }
284 
285 /*
286  * Helper function that moves ICV by 4B up, and removes SQN.hibits.
287  * icv parameter points to the new start of ICV.
288  */
289 static inline void
290 remove_sqh(void *picv, uint32_t icv_len)
291 {
292 	uint32_t i, *icv;
293 
294 	RTE_ASSERT(icv_len % sizeof(uint32_t) == 0);
295 
296 	icv = picv;
297 	icv_len = icv_len / sizeof(uint32_t);
298 	for (i = 0; i != icv_len; i++)
299 		icv[i] = icv[i + 1];
300 }
301 
302 /*
303  * setup crypto ops for LOOKASIDE_NONE (pure crypto) type of devices.
304  */
305 static inline void
306 lksd_none_cop_prepare(struct rte_crypto_op *cop,
307 	struct rte_cryptodev_sym_session *cs, struct rte_mbuf *mb)
308 {
309 	struct rte_crypto_sym_op *sop;
310 
311 	sop = cop->sym;
312 	cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
313 	cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
314 	cop->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
315 	sop->m_src = mb;
316 	__rte_crypto_sym_op_attach_sym_session(sop, cs);
317 }
318 
319 #endif /* _CRYPTO_H_ */
320