xref: /dpdk/examples/ipsec-secgw/esp.c (revision 923b5f27656627248979bd15b5aa07eff4932f85)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 #include <fcntl.h>
13 #include <unistd.h>
14 
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
19 
20 #include "ipsec.h"
21 #include "esp.h"
22 #include "ipip.h"
23 
24 int
esp_inbound(struct rte_mbuf * m,struct ipsec_sa * sa,struct rte_crypto_op * cop)25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 		struct rte_crypto_op *cop)
27 {
28 	struct ip *ip4;
29 	struct rte_crypto_sym_op *sym_cop;
30 	int32_t payload_len, ip_hdr_len;
31 
32 	RTE_ASSERT(sa != NULL);
33 	if (ipsec_get_action_type(sa) ==
34 			RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
35 		return 0;
36 
37 	RTE_ASSERT(m != NULL);
38 	RTE_ASSERT(cop != NULL);
39 
40 	ip4 = rte_pktmbuf_mtod(m, struct ip *);
41 	if (likely(ip4->ip_v == IPVERSION))
42 		ip_hdr_len = ip4->ip_hl * 4;
43 	else if (ip4->ip_v == IP6_VERSION)
44 		/* XXX No option headers supported */
45 		ip_hdr_len = sizeof(struct ip6_hdr);
46 	else {
47 		RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
48 				ip4->ip_v);
49 		return -EINVAL;
50 	}
51 
52 	payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
53 		sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
54 
55 	if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
56 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
57 				payload_len, sa->block_size);
58 		return -EINVAL;
59 	}
60 
61 	sym_cop = get_sym_cop(cop);
62 	sym_cop->m_src = m;
63 
64 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
65 		sym_cop->aead.data.offset =
66 			ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
67 		sym_cop->aead.data.length = payload_len;
68 
69 		struct cnt_blk *icb;
70 		uint8_t *aad;
71 		uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
72 					sizeof(struct rte_esp_hdr));
73 
74 		icb = get_cnt_blk(m);
75 		icb->salt = sa->salt;
76 		memcpy(&icb->iv, iv, 8);
77 		icb->cnt = rte_cpu_to_be_32(1);
78 
79 		aad = get_aad(m);
80 		memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
81 		sym_cop->aead.aad.data = aad;
82 		sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
83 				aad - rte_pktmbuf_mtod(m, uint8_t *));
84 
85 		sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
86 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
87 		sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
88 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
89 	} else {
90 		sym_cop->cipher.data.offset =  ip_hdr_len +
91 			sizeof(struct rte_esp_hdr) +
92 			sa->iv_len;
93 		sym_cop->cipher.data.length = payload_len;
94 
95 		struct cnt_blk *icb;
96 		uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
97 					sizeof(struct rte_esp_hdr));
98 		uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
99 					uint8_t *, IV_OFFSET);
100 
101 		switch (sa->cipher_algo) {
102 		case RTE_CRYPTO_CIPHER_NULL:
103 		case RTE_CRYPTO_CIPHER_DES_CBC:
104 		case RTE_CRYPTO_CIPHER_3DES_CBC:
105 		case RTE_CRYPTO_CIPHER_AES_CBC:
106 			/* Copy IV at the end of crypto operation */
107 			rte_memcpy(iv_ptr, iv, sa->iv_len);
108 			break;
109 		case RTE_CRYPTO_CIPHER_AES_CTR:
110 			icb = get_cnt_blk(m);
111 			icb->salt = sa->salt;
112 			memcpy(&icb->iv, iv, 8);
113 			icb->cnt = rte_cpu_to_be_32(1);
114 			break;
115 		default:
116 			RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
117 					sa->cipher_algo);
118 			return -EINVAL;
119 		}
120 
121 		switch (sa->auth_algo) {
122 		case RTE_CRYPTO_AUTH_NULL:
123 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
124 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
125 		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
126 			sym_cop->auth.data.offset = ip_hdr_len;
127 			sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
128 				sa->iv_len + payload_len;
129 			break;
130 		default:
131 			RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
132 					sa->auth_algo);
133 			return -EINVAL;
134 		}
135 
136 		sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
137 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
138 		sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
139 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
140 	}
141 
142 	return 0;
143 }
144 
145 int
esp_inbound_post(struct rte_mbuf * m,struct ipsec_sa * sa,struct rte_crypto_op * cop)146 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
147 		struct rte_crypto_op *cop)
148 {
149 	struct ip *ip4, *ip;
150 	struct ip6_hdr *ip6;
151 	uint8_t *nexthdr, *pad_len;
152 	uint8_t *padding;
153 	uint16_t i;
154 	struct rte_ipsec_session *ips;
155 
156 	RTE_ASSERT(m != NULL);
157 	RTE_ASSERT(sa != NULL);
158 	RTE_ASSERT(cop != NULL);
159 
160 	ips = ipsec_get_primary_session(sa);
161 
162 	if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
163 			(ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
164 		if (m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
165 			if (m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)
166 				cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
167 			else
168 				cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
169 		} else
170 			cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
171 	}
172 
173 	if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
174 		RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
175 		return -1;
176 	}
177 
178 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
179 		ips->security.ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
180 		nexthdr = &m->inner_esp_next_proto;
181 	} else {
182 		nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
183 				rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
184 		pad_len = nexthdr - 1;
185 
186 		padding = pad_len - *pad_len;
187 		for (i = 0; i < *pad_len; i++) {
188 			if (padding[i] != i + 1) {
189 				RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
190 				return -EINVAL;
191 			}
192 		}
193 
194 		if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
195 			RTE_LOG(ERR, IPSEC_ESP,
196 					"failed to remove pad_len + digest\n");
197 			return -EINVAL;
198 		}
199 	}
200 
201 	if (unlikely(IS_TRANSPORT(sa->flags))) {
202 		ip = rte_pktmbuf_mtod(m, struct ip *);
203 		ip4 = (struct ip *)rte_pktmbuf_adj(m,
204 				sizeof(struct rte_esp_hdr) + sa->iv_len);
205 		if (likely(ip->ip_v == IPVERSION)) {
206 			memmove(ip4, ip, ip->ip_hl * 4);
207 			ip4->ip_p = *nexthdr;
208 			ip4->ip_len = htons(rte_pktmbuf_data_len(m));
209 		} else {
210 			ip6 = (struct ip6_hdr *)ip4;
211 			/* XXX No option headers supported */
212 			memmove(ip6, ip, sizeof(struct ip6_hdr));
213 			ip6->ip6_nxt = *nexthdr;
214 			ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
215 					      sizeof(struct ip6_hdr));
216 		}
217 	} else
218 		ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
219 
220 	return 0;
221 }
222 
223 int
esp_outbound(struct rte_mbuf * m,struct ipsec_sa * sa,struct rte_crypto_op * cop)224 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
225 		struct rte_crypto_op *cop)
226 {
227 	struct ip *ip4;
228 	struct ip6_hdr *ip6;
229 	struct rte_esp_hdr *esp = NULL;
230 	uint8_t *padding = NULL, *new_ip, nlp;
231 	struct rte_crypto_sym_op *sym_cop;
232 	int32_t i;
233 	uint16_t pad_payload_len, pad_len, ip_hdr_len;
234 	struct rte_ipsec_session *ips;
235 
236 	RTE_ASSERT(m != NULL);
237 	RTE_ASSERT(sa != NULL);
238 
239 	ips = ipsec_get_primary_session(sa);
240 	ip_hdr_len = 0;
241 
242 	ip4 = rte_pktmbuf_mtod(m, struct ip *);
243 	if (likely(ip4->ip_v == IPVERSION)) {
244 		if (unlikely(IS_TRANSPORT(sa->flags))) {
245 			ip_hdr_len = ip4->ip_hl * 4;
246 			nlp = ip4->ip_p;
247 		} else
248 			nlp = IPPROTO_IPIP;
249 	} else if (ip4->ip_v == IP6_VERSION) {
250 		if (unlikely(IS_TRANSPORT(sa->flags))) {
251 			/* XXX No option headers supported */
252 			ip_hdr_len = sizeof(struct ip6_hdr);
253 			ip6 = (struct ip6_hdr *)ip4;
254 			nlp = ip6->ip6_nxt;
255 		} else
256 			nlp = IPPROTO_IPV6;
257 	} else {
258 		RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
259 				ip4->ip_v);
260 		return -EINVAL;
261 	}
262 
263 	/* Padded payload length */
264 	pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
265 			ip_hdr_len + 2, sa->block_size);
266 	pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
267 
268 	RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags));
269 
270 	if (likely(IS_IP4_TUNNEL(sa->flags)))
271 		ip_hdr_len = sizeof(struct ip);
272 	else if (IS_IP6_TUNNEL(sa->flags))
273 		ip_hdr_len = sizeof(struct ip6_hdr);
274 	else if (!IS_TRANSPORT(sa->flags)) {
275 		RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
276 				sa->flags);
277 		return -EINVAL;
278 	}
279 
280 	/* Check maximum packet size */
281 	if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
282 			pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
283 		RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
284 		return -EINVAL;
285 	}
286 
287 	/* Add trailer padding if it is not constructed by HW */
288 	if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
289 		(ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
290 		 !(ips->security.ol_flags &
291 			 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
292 		padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
293 							sa->digest_len);
294 		if (unlikely(padding == NULL)) {
295 			RTE_LOG(ERR, IPSEC_ESP,
296 					"not enough mbuf trailing space\n");
297 			return -ENOSPC;
298 		}
299 		rte_prefetch0(padding);
300 	}
301 
302 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
303 	case IP4_TUNNEL:
304 		ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
305 				&sa->src, &sa->dst);
306 		esp = (struct rte_esp_hdr *)(ip4 + 1);
307 		break;
308 	case IP6_TUNNEL:
309 		ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
310 				&sa->src, &sa->dst);
311 		esp = (struct rte_esp_hdr *)(ip6 + 1);
312 		break;
313 	case TRANSPORT:
314 		new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
315 				sizeof(struct rte_esp_hdr) + sa->iv_len);
316 		memmove(new_ip, ip4, ip_hdr_len);
317 		esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
318 		ip4 = (struct ip *)new_ip;
319 		if (likely(ip4->ip_v == IPVERSION)) {
320 			ip4->ip_p = IPPROTO_ESP;
321 			ip4->ip_len = htons(rte_pktmbuf_data_len(m));
322 		} else {
323 			ip6 = (struct ip6_hdr *)new_ip;
324 			ip6->ip6_nxt = IPPROTO_ESP;
325 			ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
326 					      sizeof(struct ip6_hdr));
327 		}
328 	}
329 
330 	sa->seq++;
331 	esp->spi = rte_cpu_to_be_32(sa->spi);
332 	esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
333 
334 	/* set iv */
335 	uint64_t *iv = (uint64_t *)(esp + 1);
336 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
337 		*iv = rte_cpu_to_be_64(sa->seq);
338 	} else {
339 		switch (sa->cipher_algo) {
340 		case RTE_CRYPTO_CIPHER_NULL:
341 		case RTE_CRYPTO_CIPHER_DES_CBC:
342 		case RTE_CRYPTO_CIPHER_3DES_CBC:
343 		case RTE_CRYPTO_CIPHER_AES_CBC:
344 			memset(iv, 0, sa->iv_len);
345 			break;
346 		case RTE_CRYPTO_CIPHER_AES_CTR:
347 			*iv = rte_cpu_to_be_64(sa->seq);
348 			break;
349 		default:
350 			RTE_LOG(ERR, IPSEC_ESP,
351 				"unsupported cipher algorithm %u\n",
352 				sa->cipher_algo);
353 			return -EINVAL;
354 		}
355 	}
356 
357 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
358 		if (ips->security.ol_flags &
359 				RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
360 			/* Set the inner esp next protocol for HW trailer */
361 			m->inner_esp_next_proto = nlp;
362 			m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
363 		} else {
364 			padding[pad_len - 2] = pad_len - 2;
365 			padding[pad_len - 1] = nlp;
366 		}
367 		goto done;
368 	}
369 
370 	RTE_ASSERT(cop != NULL);
371 	sym_cop = get_sym_cop(cop);
372 	sym_cop->m_src = m;
373 
374 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
375 		uint8_t *aad;
376 
377 		sym_cop->aead.data.offset = ip_hdr_len +
378 			sizeof(struct rte_esp_hdr) + sa->iv_len;
379 		sym_cop->aead.data.length = pad_payload_len;
380 
381 		/* Fill pad_len using default sequential scheme */
382 		for (i = 0; i < pad_len - 2; i++)
383 			padding[i] = i + 1;
384 		padding[pad_len - 2] = pad_len - 2;
385 		padding[pad_len - 1] = nlp;
386 
387 		struct cnt_blk *icb = get_cnt_blk(m);
388 		icb->salt = sa->salt;
389 		icb->iv = rte_cpu_to_be_64(sa->seq);
390 		icb->cnt = rte_cpu_to_be_32(1);
391 
392 		aad = get_aad(m);
393 		memcpy(aad, esp, 8);
394 		sym_cop->aead.aad.data = aad;
395 		sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
396 				aad - rte_pktmbuf_mtod(m, uint8_t *));
397 
398 		sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
399 			rte_pktmbuf_pkt_len(m) - sa->digest_len);
400 		sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
401 			rte_pktmbuf_pkt_len(m) - sa->digest_len);
402 	} else {
403 		switch (sa->cipher_algo) {
404 		case RTE_CRYPTO_CIPHER_NULL:
405 		case RTE_CRYPTO_CIPHER_DES_CBC:
406 		case RTE_CRYPTO_CIPHER_3DES_CBC:
407 		case RTE_CRYPTO_CIPHER_AES_CBC:
408 			sym_cop->cipher.data.offset = ip_hdr_len +
409 				sizeof(struct rte_esp_hdr);
410 			sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
411 			break;
412 		case RTE_CRYPTO_CIPHER_AES_CTR:
413 			sym_cop->cipher.data.offset = ip_hdr_len +
414 				sizeof(struct rte_esp_hdr) + sa->iv_len;
415 			sym_cop->cipher.data.length = pad_payload_len;
416 			break;
417 		default:
418 			RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
419 					sa->cipher_algo);
420 			return -EINVAL;
421 		}
422 
423 		/* Fill pad_len using default sequential scheme */
424 		for (i = 0; i < pad_len - 2; i++)
425 			padding[i] = i + 1;
426 		padding[pad_len - 2] = pad_len - 2;
427 		padding[pad_len - 1] = nlp;
428 
429 		struct cnt_blk *icb = get_cnt_blk(m);
430 		icb->salt = sa->salt;
431 		icb->iv = rte_cpu_to_be_64(sa->seq);
432 		icb->cnt = rte_cpu_to_be_32(1);
433 
434 		switch (sa->auth_algo) {
435 		case RTE_CRYPTO_AUTH_NULL:
436 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
437 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
438 		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
439 			sym_cop->auth.data.offset = ip_hdr_len;
440 			sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
441 				sa->iv_len + pad_payload_len;
442 			break;
443 		default:
444 			RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
445 					sa->auth_algo);
446 			return -EINVAL;
447 		}
448 
449 		sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
450 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
451 		sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
452 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
453 	}
454 
455 done:
456 	return 0;
457 }
458 
459 int
esp_outbound_post(struct rte_mbuf * m,struct ipsec_sa * sa,struct rte_crypto_op * cop)460 esp_outbound_post(struct rte_mbuf *m,
461 		  struct ipsec_sa *sa,
462 		  struct rte_crypto_op *cop)
463 {
464 	enum rte_security_session_action_type type;
465 	RTE_ASSERT(m != NULL);
466 	RTE_ASSERT(sa != NULL);
467 
468 	type = ipsec_get_action_type(sa);
469 
470 	if ((type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
471 			(type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
472 		m->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
473 	} else {
474 		RTE_ASSERT(cop != NULL);
475 		if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
476 			RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",
477 				__func__);
478 			return -1;
479 		}
480 	}
481 
482 	return 0;
483 }
484