xref: /dpdk/examples/ipsec-secgw/esp.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 #include <fcntl.h>
13 #include <unistd.h>
14 
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
19 
20 #include "ipsec.h"
21 #include "esp.h"
22 #include "ipip.h"
23 
24 int
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 		struct rte_crypto_op *cop)
27 {
28 	struct ip *ip4;
29 	struct rte_crypto_sym_op *sym_cop;
30 	int32_t payload_len, ip_hdr_len;
31 
32 	RTE_ASSERT(sa != NULL);
33 	if (ipsec_get_action_type(sa) ==
34 			RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
35 		return 0;
36 
37 	RTE_ASSERT(m != NULL);
38 	RTE_ASSERT(cop != NULL);
39 
40 	ip4 = rte_pktmbuf_mtod(m, struct ip *);
41 	if (likely(ip4->ip_v == IPVERSION))
42 		ip_hdr_len = ip4->ip_hl * 4;
43 	else if (ip4->ip_v == IP6_VERSION)
44 		/* XXX No option headers supported */
45 		ip_hdr_len = sizeof(struct ip6_hdr);
46 	else {
47 		RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
48 				ip4->ip_v);
49 		return -EINVAL;
50 	}
51 
52 	payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
53 		sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
54 
55 	if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
56 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
57 				payload_len, sa->block_size);
58 		return -EINVAL;
59 	}
60 
61 	sym_cop = get_sym_cop(cop);
62 	sym_cop->m_src = m;
63 
64 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
65 		sym_cop->aead.data.offset =
66 			ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
67 		sym_cop->aead.data.length = payload_len;
68 
69 		struct cnt_blk *icb;
70 		uint8_t *aad;
71 		uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
72 					sizeof(struct rte_esp_hdr));
73 
74 		icb = get_cnt_blk(m);
75 		icb->salt = sa->salt;
76 		memcpy(&icb->iv, iv, 8);
77 		icb->cnt = rte_cpu_to_be_32(1);
78 
79 		aad = get_aad(m);
80 		memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
81 		sym_cop->aead.aad.data = aad;
82 		sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
83 				aad - rte_pktmbuf_mtod(m, uint8_t *));
84 
85 		sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
86 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
87 		sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
88 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
89 	} else {
90 		sym_cop->cipher.data.offset =  ip_hdr_len +
91 			sizeof(struct rte_esp_hdr) +
92 			sa->iv_len;
93 		sym_cop->cipher.data.length = payload_len;
94 
95 		struct cnt_blk *icb;
96 		uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
97 					sizeof(struct rte_esp_hdr));
98 		uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
99 					uint8_t *, IV_OFFSET);
100 
101 		switch (sa->cipher_algo) {
102 		case RTE_CRYPTO_CIPHER_NULL:
103 		case RTE_CRYPTO_CIPHER_3DES_CBC:
104 		case RTE_CRYPTO_CIPHER_AES_CBC:
105 			/* Copy IV at the end of crypto operation */
106 			rte_memcpy(iv_ptr, iv, sa->iv_len);
107 			break;
108 		case RTE_CRYPTO_CIPHER_AES_CTR:
109 			icb = get_cnt_blk(m);
110 			icb->salt = sa->salt;
111 			memcpy(&icb->iv, iv, 8);
112 			icb->cnt = rte_cpu_to_be_32(1);
113 			break;
114 		default:
115 			RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
116 					sa->cipher_algo);
117 			return -EINVAL;
118 		}
119 
120 		switch (sa->auth_algo) {
121 		case RTE_CRYPTO_AUTH_NULL:
122 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
123 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
124 			sym_cop->auth.data.offset = ip_hdr_len;
125 			sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
126 				sa->iv_len + payload_len;
127 			break;
128 		default:
129 			RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
130 					sa->auth_algo);
131 			return -EINVAL;
132 		}
133 
134 		sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
135 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
136 		sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
137 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
138 	}
139 
140 	return 0;
141 }
142 
143 int
144 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
145 		struct rte_crypto_op *cop)
146 {
147 	struct ip *ip4, *ip;
148 	struct ip6_hdr *ip6;
149 	uint8_t *nexthdr, *pad_len;
150 	uint8_t *padding;
151 	uint16_t i;
152 	struct rte_ipsec_session *ips;
153 
154 	RTE_ASSERT(m != NULL);
155 	RTE_ASSERT(sa != NULL);
156 	RTE_ASSERT(cop != NULL);
157 
158 	ips = ipsec_get_primary_session(sa);
159 
160 	if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
161 			(ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
162 		if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
163 			if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
164 				cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
165 			else
166 				cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
167 		} else
168 			cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
169 	}
170 
171 	if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
172 		RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
173 		return -1;
174 	}
175 
176 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
177 		ips->security.ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
178 		nexthdr = &m->inner_esp_next_proto;
179 	} else {
180 		nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
181 				rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
182 		pad_len = nexthdr - 1;
183 
184 		padding = pad_len - *pad_len;
185 		for (i = 0; i < *pad_len; i++) {
186 			if (padding[i] != i + 1) {
187 				RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
188 				return -EINVAL;
189 			}
190 		}
191 
192 		if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
193 			RTE_LOG(ERR, IPSEC_ESP,
194 					"failed to remove pad_len + digest\n");
195 			return -EINVAL;
196 		}
197 	}
198 
199 	if (unlikely(IS_TRANSPORT(sa->flags))) {
200 		ip = rte_pktmbuf_mtod(m, struct ip *);
201 		ip4 = (struct ip *)rte_pktmbuf_adj(m,
202 				sizeof(struct rte_esp_hdr) + sa->iv_len);
203 		if (likely(ip->ip_v == IPVERSION)) {
204 			memmove(ip4, ip, ip->ip_hl * 4);
205 			ip4->ip_p = *nexthdr;
206 			ip4->ip_len = htons(rte_pktmbuf_data_len(m));
207 		} else {
208 			ip6 = (struct ip6_hdr *)ip4;
209 			/* XXX No option headers supported */
210 			memmove(ip6, ip, sizeof(struct ip6_hdr));
211 			ip6->ip6_nxt = *nexthdr;
212 			ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
213 					      sizeof(struct ip6_hdr));
214 		}
215 	} else
216 		ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
217 
218 	return 0;
219 }
220 
221 int
222 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
223 		struct rte_crypto_op *cop)
224 {
225 	struct ip *ip4;
226 	struct ip6_hdr *ip6;
227 	struct rte_esp_hdr *esp = NULL;
228 	uint8_t *padding = NULL, *new_ip, nlp;
229 	struct rte_crypto_sym_op *sym_cop;
230 	int32_t i;
231 	uint16_t pad_payload_len, pad_len, ip_hdr_len;
232 	struct rte_ipsec_session *ips;
233 
234 	RTE_ASSERT(m != NULL);
235 	RTE_ASSERT(sa != NULL);
236 
237 	ips = ipsec_get_primary_session(sa);
238 	ip_hdr_len = 0;
239 
240 	ip4 = rte_pktmbuf_mtod(m, struct ip *);
241 	if (likely(ip4->ip_v == IPVERSION)) {
242 		if (unlikely(IS_TRANSPORT(sa->flags))) {
243 			ip_hdr_len = ip4->ip_hl * 4;
244 			nlp = ip4->ip_p;
245 		} else
246 			nlp = IPPROTO_IPIP;
247 	} else if (ip4->ip_v == IP6_VERSION) {
248 		if (unlikely(IS_TRANSPORT(sa->flags))) {
249 			/* XXX No option headers supported */
250 			ip_hdr_len = sizeof(struct ip6_hdr);
251 			ip6 = (struct ip6_hdr *)ip4;
252 			nlp = ip6->ip6_nxt;
253 		} else
254 			nlp = IPPROTO_IPV6;
255 	} else {
256 		RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
257 				ip4->ip_v);
258 		return -EINVAL;
259 	}
260 
261 	/* Padded payload length */
262 	pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
263 			ip_hdr_len + 2, sa->block_size);
264 	pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
265 
266 	RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags));
267 
268 	if (likely(IS_IP4_TUNNEL(sa->flags)))
269 		ip_hdr_len = sizeof(struct ip);
270 	else if (IS_IP6_TUNNEL(sa->flags))
271 		ip_hdr_len = sizeof(struct ip6_hdr);
272 	else if (!IS_TRANSPORT(sa->flags)) {
273 		RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
274 				sa->flags);
275 		return -EINVAL;
276 	}
277 
278 	/* Check maximum packet size */
279 	if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
280 			pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
281 		RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
282 		return -EINVAL;
283 	}
284 
285 	/* Add trailer padding if it is not constructed by HW */
286 	if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
287 		(ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
288 		 !(ips->security.ol_flags &
289 			 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
290 		padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
291 							sa->digest_len);
292 		if (unlikely(padding == NULL)) {
293 			RTE_LOG(ERR, IPSEC_ESP,
294 					"not enough mbuf trailing space\n");
295 			return -ENOSPC;
296 		}
297 		rte_prefetch0(padding);
298 	}
299 
300 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
301 	case IP4_TUNNEL:
302 		ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
303 				&sa->src, &sa->dst);
304 		esp = (struct rte_esp_hdr *)(ip4 + 1);
305 		break;
306 	case IP6_TUNNEL:
307 		ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
308 				&sa->src, &sa->dst);
309 		esp = (struct rte_esp_hdr *)(ip6 + 1);
310 		break;
311 	case TRANSPORT:
312 		new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
313 				sizeof(struct rte_esp_hdr) + sa->iv_len);
314 		memmove(new_ip, ip4, ip_hdr_len);
315 		esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
316 		ip4 = (struct ip *)new_ip;
317 		if (likely(ip4->ip_v == IPVERSION)) {
318 			ip4->ip_p = IPPROTO_ESP;
319 			ip4->ip_len = htons(rte_pktmbuf_data_len(m));
320 		} else {
321 			ip6 = (struct ip6_hdr *)new_ip;
322 			ip6->ip6_nxt = IPPROTO_ESP;
323 			ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
324 					      sizeof(struct ip6_hdr));
325 		}
326 	}
327 
328 	sa->seq++;
329 	esp->spi = rte_cpu_to_be_32(sa->spi);
330 	esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
331 
332 	/* set iv */
333 	uint64_t *iv = (uint64_t *)(esp + 1);
334 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
335 		*iv = rte_cpu_to_be_64(sa->seq);
336 	} else {
337 		switch (sa->cipher_algo) {
338 		case RTE_CRYPTO_CIPHER_NULL:
339 		case RTE_CRYPTO_CIPHER_3DES_CBC:
340 		case RTE_CRYPTO_CIPHER_AES_CBC:
341 			memset(iv, 0, sa->iv_len);
342 			break;
343 		case RTE_CRYPTO_CIPHER_AES_CTR:
344 			*iv = rte_cpu_to_be_64(sa->seq);
345 			break;
346 		default:
347 			RTE_LOG(ERR, IPSEC_ESP,
348 				"unsupported cipher algorithm %u\n",
349 				sa->cipher_algo);
350 			return -EINVAL;
351 		}
352 	}
353 
354 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
355 		if (ips->security.ol_flags &
356 				RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
357 			/* Set the inner esp next protocol for HW trailer */
358 			m->inner_esp_next_proto = nlp;
359 			m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
360 		} else {
361 			padding[pad_len - 2] = pad_len - 2;
362 			padding[pad_len - 1] = nlp;
363 		}
364 		goto done;
365 	}
366 
367 	RTE_ASSERT(cop != NULL);
368 	sym_cop = get_sym_cop(cop);
369 	sym_cop->m_src = m;
370 
371 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
372 		uint8_t *aad;
373 
374 		sym_cop->aead.data.offset = ip_hdr_len +
375 			sizeof(struct rte_esp_hdr) + sa->iv_len;
376 		sym_cop->aead.data.length = pad_payload_len;
377 
378 		/* Fill pad_len using default sequential scheme */
379 		for (i = 0; i < pad_len - 2; i++)
380 			padding[i] = i + 1;
381 		padding[pad_len - 2] = pad_len - 2;
382 		padding[pad_len - 1] = nlp;
383 
384 		struct cnt_blk *icb = get_cnt_blk(m);
385 		icb->salt = sa->salt;
386 		icb->iv = rte_cpu_to_be_64(sa->seq);
387 		icb->cnt = rte_cpu_to_be_32(1);
388 
389 		aad = get_aad(m);
390 		memcpy(aad, esp, 8);
391 		sym_cop->aead.aad.data = aad;
392 		sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
393 				aad - rte_pktmbuf_mtod(m, uint8_t *));
394 
395 		sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
396 			rte_pktmbuf_pkt_len(m) - sa->digest_len);
397 		sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
398 			rte_pktmbuf_pkt_len(m) - sa->digest_len);
399 	} else {
400 		switch (sa->cipher_algo) {
401 		case RTE_CRYPTO_CIPHER_NULL:
402 		case RTE_CRYPTO_CIPHER_3DES_CBC:
403 		case RTE_CRYPTO_CIPHER_AES_CBC:
404 			sym_cop->cipher.data.offset = ip_hdr_len +
405 				sizeof(struct rte_esp_hdr);
406 			sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
407 			break;
408 		case RTE_CRYPTO_CIPHER_AES_CTR:
409 			sym_cop->cipher.data.offset = ip_hdr_len +
410 				sizeof(struct rte_esp_hdr) + sa->iv_len;
411 			sym_cop->cipher.data.length = pad_payload_len;
412 			break;
413 		default:
414 			RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
415 					sa->cipher_algo);
416 			return -EINVAL;
417 		}
418 
419 		/* Fill pad_len using default sequential scheme */
420 		for (i = 0; i < pad_len - 2; i++)
421 			padding[i] = i + 1;
422 		padding[pad_len - 2] = pad_len - 2;
423 		padding[pad_len - 1] = nlp;
424 
425 		struct cnt_blk *icb = get_cnt_blk(m);
426 		icb->salt = sa->salt;
427 		icb->iv = rte_cpu_to_be_64(sa->seq);
428 		icb->cnt = rte_cpu_to_be_32(1);
429 
430 		switch (sa->auth_algo) {
431 		case RTE_CRYPTO_AUTH_NULL:
432 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
433 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
434 			sym_cop->auth.data.offset = ip_hdr_len;
435 			sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
436 				sa->iv_len + pad_payload_len;
437 			break;
438 		default:
439 			RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
440 					sa->auth_algo);
441 			return -EINVAL;
442 		}
443 
444 		sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
445 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
446 		sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
447 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
448 	}
449 
450 done:
451 	return 0;
452 }
453 
454 int
455 esp_outbound_post(struct rte_mbuf *m,
456 		  struct ipsec_sa *sa,
457 		  struct rte_crypto_op *cop)
458 {
459 	enum rte_security_session_action_type type;
460 	RTE_ASSERT(m != NULL);
461 	RTE_ASSERT(sa != NULL);
462 
463 	type = ipsec_get_action_type(sa);
464 
465 	if ((type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
466 			(type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
467 		m->ol_flags |= PKT_TX_SEC_OFFLOAD;
468 	} else {
469 		RTE_ASSERT(cop != NULL);
470 		if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
471 			RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",
472 				__func__);
473 			return -1;
474 		}
475 	}
476 
477 	return 0;
478 }
479