xref: /dpdk/examples/ipsec-secgw/ipsec.c (revision 2bf48044dca1892e571fd4964eecaacf6cb0c1c2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_event_crypto_adapter.h>
10 #include <rte_log.h>
11 #include <rte_crypto.h>
12 #include <rte_security.h>
13 #include <rte_cryptodev.h>
14 #include <rte_ipsec.h>
15 #include <rte_ethdev.h>
16 #include <rte_mbuf.h>
17 #include <rte_hash.h>
18 
19 #include "ipsec.h"
20 #include "esp.h"
21 
22 static inline void
23 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
24 {
25 	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
26 		struct rte_security_ipsec_tunnel_param *tunnel =
27 				&ipsec->tunnel;
28 		if (IS_IP4_TUNNEL(sa->flags)) {
29 			tunnel->type =
30 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
31 			tunnel->ipv4.ttl = IPDEFTTL;
32 
33 			memcpy((uint8_t *)&tunnel->ipv4.src_ip,
34 				(uint8_t *)&sa->src.ip.ip4, 4);
35 
36 			memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
37 				(uint8_t *)&sa->dst.ip.ip4, 4);
38 		} else if (IS_IP6_TUNNEL(sa->flags)) {
39 			tunnel->type =
40 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
41 			tunnel->ipv6.hlimit = IPDEFTTL;
42 			tunnel->ipv6.dscp = 0;
43 			tunnel->ipv6.flabel = 0;
44 
45 			memcpy((uint8_t *)&tunnel->ipv6.src_addr,
46 				(uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
47 
48 			memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
49 				(uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
50 		}
51 		/* TODO support for Transport */
52 	}
53 	ipsec->replay_win_sz = app_sa_prm.window_size;
54 	ipsec->options.esn = app_sa_prm.enable_esn;
55 	ipsec->options.udp_encap = sa->udp_encap;
56 	if (IS_HW_REASSEMBLY_EN(sa->flags))
57 		ipsec->options.ip_reassembly_en = 1;
58 }
59 
60 static inline int
61 verify_crypto_xform(const struct rte_cryptodev_capabilities *capabilities,
62 		struct rte_crypto_sym_xform *crypto_xform)
63 {
64 	const struct rte_cryptodev_capabilities *crypto_cap;
65 	int j = 0;
66 
67 	while ((crypto_cap = &capabilities[j++])->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
68 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
69 				crypto_cap->sym.xform_type == crypto_xform->type) {
70 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
71 					crypto_cap->sym.aead.algo == crypto_xform->aead.algo) {
72 				if (rte_cryptodev_sym_capability_check_aead(&crypto_cap->sym,
73 						crypto_xform->aead.key.length,
74 						crypto_xform->aead.digest_length,
75 						crypto_xform->aead.aad_length,
76 						crypto_xform->aead.iv.length) == 0)
77 					return 0;
78 			}
79 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
80 					crypto_cap->sym.cipher.algo == crypto_xform->cipher.algo) {
81 				if (rte_cryptodev_sym_capability_check_cipher(&crypto_cap->sym,
82 						crypto_xform->cipher.key.length,
83 						crypto_xform->cipher.iv.length) == 0)
84 					return 0;
85 			}
86 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
87 					crypto_cap->sym.auth.algo == crypto_xform->auth.algo) {
88 				if (rte_cryptodev_sym_capability_check_auth(&crypto_cap->sym,
89 						crypto_xform->auth.key.length,
90 						crypto_xform->auth.digest_length,
91 						crypto_xform->auth.iv.length) == 0)
92 					return 0;
93 			}
94 		}
95 	}
96 
97 	return -ENOTSUP;
98 }
99 
100 static inline int
101 verify_crypto_capabilities(const struct rte_cryptodev_capabilities *capabilities,
102 		struct rte_crypto_sym_xform *crypto_xform)
103 {
104 	if (crypto_xform->next != NULL)
105 		return (verify_crypto_xform(capabilities, crypto_xform) ||
106 		    verify_crypto_xform(capabilities, crypto_xform->next));
107 	else
108 		return verify_crypto_xform(capabilities, crypto_xform);
109 }
110 
111 static inline int
112 verify_ipsec_capabilities(struct rte_security_ipsec_xform *ipsec_xform,
113 		const struct rte_security_capability *sec_cap)
114 {
115 	/* Verify security capabilities */
116 
117 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
118 		RTE_LOG(INFO, USER1, "ESN is not supported\n");
119 		return -ENOTSUP;
120 	}
121 
122 	if (ipsec_xform->options.udp_encap == 1 &&
123 	    sec_cap->ipsec.options.udp_encap == 0) {
124 		RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
125 		return -ENOTSUP;
126 	}
127 
128 	if (ipsec_xform->options.udp_ports_verify == 1 &&
129 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
130 		RTE_LOG(DEBUG, USER1,
131 			"UDP encapsulation ports verification is not supported\n");
132 		return -ENOTSUP;
133 	}
134 
135 	if (ipsec_xform->options.copy_dscp == 1 &&
136 	    sec_cap->ipsec.options.copy_dscp == 0) {
137 		RTE_LOG(DEBUG, USER1, "Copy DSCP is not supported\n");
138 		return -ENOTSUP;
139 	}
140 
141 	if (ipsec_xform->options.copy_flabel == 1 &&
142 	    sec_cap->ipsec.options.copy_flabel == 0) {
143 		RTE_LOG(DEBUG, USER1, "Copy Flow Label is not supported\n");
144 		return -ENOTSUP;
145 	}
146 
147 	if (ipsec_xform->options.copy_df == 1 &&
148 	    sec_cap->ipsec.options.copy_df == 0) {
149 		RTE_LOG(DEBUG, USER1, "Copy DP bit is not supported\n");
150 		return -ENOTSUP;
151 	}
152 
153 	if (ipsec_xform->options.dec_ttl == 1 &&
154 	    sec_cap->ipsec.options.dec_ttl == 0) {
155 		RTE_LOG(DEBUG, USER1, "Decrement TTL is not supported\n");
156 		return -ENOTSUP;
157 	}
158 
159 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
160 		RTE_LOG(DEBUG, USER1, "ECN is not supported\n");
161 		return -ENOTSUP;
162 	}
163 
164 	if (ipsec_xform->options.stats == 1 &&
165 	    sec_cap->ipsec.options.stats == 0) {
166 		RTE_LOG(DEBUG, USER1, "Stats is not supported\n");
167 		return -ENOTSUP;
168 	}
169 
170 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
171 	    (ipsec_xform->options.iv_gen_disable == 1) &&
172 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
173 		RTE_LOG(DEBUG, USER1, "Application provided IV is not supported\n");
174 		return -ENOTSUP;
175 	}
176 
177 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
178 	    (ipsec_xform->options.tunnel_hdr_verify >
179 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
180 		RTE_LOG(DEBUG, USER1, "Tunnel header verify is not supported\n");
181 		return -ENOTSUP;
182 	}
183 
184 	if (ipsec_xform->options.ip_csum_enable == 1 &&
185 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
186 		RTE_LOG(DEBUG, USER1, "Inner IP checksum is not supported\n");
187 		return -ENOTSUP;
188 	}
189 
190 	if (ipsec_xform->options.l4_csum_enable == 1 &&
191 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
192 		RTE_LOG(DEBUG, USER1, "Inner L4 checksum is not supported\n");
193 		return -ENOTSUP;
194 	}
195 
196 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
197 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
198 			RTE_LOG(DEBUG, USER1, "Replay window size is not supported\n");
199 			return -ENOTSUP;
200 		}
201 	}
202 
203 	return 0;
204 }
205 
206 
207 static inline int
208 verify_security_capabilities(struct rte_security_ctx *ctx,
209 		struct rte_security_session_conf *sess_conf,
210 		uint32_t *ol_flags)
211 {
212 	struct rte_security_capability_idx sec_cap_idx;
213 	const struct rte_security_capability *sec_cap;
214 
215 	sec_cap_idx.action = sess_conf->action_type;
216 	sec_cap_idx.protocol = sess_conf->protocol;
217 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
218 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
219 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
220 
221 	sec_cap = rte_security_capability_get(ctx, &sec_cap_idx);
222 	if (sec_cap == NULL)
223 		return -ENOTSUP;
224 
225 	if (verify_crypto_capabilities(sec_cap->crypto_capabilities,
226 				sess_conf->crypto_xform))
227 		return -ENOTSUP;
228 
229 	if (verify_ipsec_capabilities(&sess_conf->ipsec, sec_cap))
230 		return -ENOTSUP;
231 
232 	if (ol_flags != NULL)
233 		*ol_flags = sec_cap->ol_flags;
234 
235 	return 0;
236 }
237 
238 int
239 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
240 	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
241 	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
242 {
243 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
244 	enum rte_crypto_op_sess_type sess_type;
245 	struct rte_cryptodev_info cdev_info;
246 	enum rte_crypto_op_type op_type;
247 	unsigned long cdev_id_qp = 0;
248 	struct ipsec_ctx *ipsec_ctx;
249 	struct cdev_key key = { 0 };
250 	void *sess = NULL;
251 	uint32_t lcore_id;
252 	int32_t ret = 0;
253 
254 	RTE_LCORE_FOREACH(lcore_id) {
255 		ipsec_ctx = ipsec_ctx_lcore[lcore_id];
256 
257 		/* Core is not bound to any cryptodev, skip it */
258 		if (ipsec_ctx->cdev_map == NULL)
259 			continue;
260 
261 		/* Looking for cryptodev, which can handle this SA */
262 		key.lcore_id = (uint8_t)lcore_id;
263 		key.cipher_algo = (uint8_t)sa->cipher_algo;
264 		key.auth_algo = (uint8_t)sa->auth_algo;
265 		key.aead_algo = (uint8_t)sa->aead_algo;
266 
267 		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
268 				(void **)&cdev_id_qp);
269 		if (ret == -ENOENT)
270 			continue;
271 		if (ret < 0) {
272 			RTE_LOG(ERR, IPSEC,
273 					"No cryptodev: core %u, cipher_algo %u, "
274 					"auth_algo %u, aead_algo %u\n",
275 					key.lcore_id,
276 					key.cipher_algo,
277 					key.auth_algo,
278 					key.aead_algo);
279 			return ret;
280 		}
281 
282 		/* Verify that all cores are using same cryptodev for current
283 		 * algorithm combination, required by SA.
284 		 * Current cryptodev mapping process will map SA to the first
285 		 * cryptodev that matches requirements, so it's a double check,
286 		 * not an additional restriction.
287 		 */
288 		if (cdev_id == RTE_CRYPTO_MAX_DEVS)
289 			cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
290 		else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) {
291 			RTE_LOG(ERR, IPSEC,
292 					"SA mapping to multiple cryptodevs is "
293 					"not supported!");
294 			return -EINVAL;
295 		}
296 
297 		/* Store per core queue pair information */
298 		sa->cqp[lcore_id] = &ipsec_ctx->tbl[cdev_id_qp];
299 	}
300 	if (cdev_id == RTE_CRYPTO_MAX_DEVS) {
301 		RTE_LOG(WARNING, IPSEC, "No cores found to handle SA\n");
302 		return 0;
303 	}
304 
305 	RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
306 			"%u\n", sa->spi, cdev_id);
307 
308 	if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
309 		ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
310 		struct rte_security_session_conf sess_conf = {
311 			.action_type = ips->type,
312 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
313 			{.ipsec = {
314 				.spi = sa->spi,
315 				.salt = sa->salt,
316 				.options = { 0 },
317 				.replay_win_sz = 0,
318 				.direction = sa->direction,
319 				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
320 				.mode = (IS_TUNNEL(sa->flags)) ?
321 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
322 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
323 			} },
324 			.crypto_xform = sa->xforms,
325 			.userdata = NULL,
326 
327 		};
328 
329 		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
330 			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
331 							rte_cryptodev_get_sec_ctx(
332 							cdev_id);
333 
334 			/* Set IPsec parameters in conf */
335 			set_ipsec_conf(sa, &(sess_conf.ipsec));
336 
337 			if (verify_security_capabilities(ctx, &sess_conf, NULL)) {
338 				RTE_LOG(ERR, IPSEC,
339 					"Requested security session config not supported\n");
340 				return -1;
341 			}
342 
343 			ips->security.ses = rte_security_session_create(ctx,
344 					&sess_conf, skt_ctx->session_pool);
345 			if (ips->security.ses == NULL) {
346 				RTE_LOG(ERR, IPSEC,
347 				"SEC Session init failed: err: %d\n", ret);
348 				return -1;
349 			}
350 			ips->security.ctx = ctx;
351 
352 			sess = ips->security.ses;
353 			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
354 			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
355 		} else {
356 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
357 			return -1;
358 		}
359 	} else {
360 		struct rte_cryptodev_info info;
361 
362 		rte_cryptodev_info_get(cdev_id, &info);
363 
364 		if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
365 			if (!(info.feature_flags &
366 				RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
367 				return -ENOTSUP;
368 
369 		}
370 
371 		if (verify_crypto_capabilities(info.capabilities, sa->xforms)) {
372 			RTE_LOG(ERR, IPSEC,
373 				"Requested crypto session config not supported\n");
374 			return -1;
375 		}
376 
377 		ips->crypto.dev_id = cdev_id;
378 		ips->crypto.ses = rte_cryptodev_sym_session_create(cdev_id,
379 				sa->xforms, skt_ctx->session_pool);
380 
381 		rte_cryptodev_info_get(cdev_id, &cdev_info);
382 	}
383 
384 	/* Setup meta data required by event crypto adapter */
385 	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
386 		union rte_event_crypto_metadata m_data;
387 		const struct eventdev_params *eventdev_conf;
388 
389 		eventdev_conf = &(em_conf->eventdev_config[0]);
390 		memset(&m_data, 0, sizeof(m_data));
391 
392 		/* Fill in response information */
393 		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
394 		m_data.response_info.op = RTE_EVENT_OP_NEW;
395 		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
396 
397 		/* Fill in request information */
398 		m_data.request_info.cdev_id = cdev_id;
399 		m_data.request_info.queue_pair_id = 0;
400 
401 		/* Attach meta info to session */
402 		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
403 				sess_type, &m_data, sizeof(m_data));
404 	}
405 
406 	return 0;
407 }
408 
409 int
410 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
411 		struct rte_ipsec_session *ips)
412 {
413 	int32_t ret = 0;
414 	struct rte_security_ctx *sec_ctx;
415 	struct rte_security_session_conf sess_conf = {
416 		.action_type = ips->type,
417 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
418 		{.ipsec = {
419 			.spi = sa->spi,
420 			.salt = sa->salt,
421 			.options = { 0 },
422 			.replay_win_sz = 0,
423 			.direction = sa->direction,
424 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP
425 		} },
426 		.crypto_xform = sa->xforms,
427 		.userdata = NULL,
428 	};
429 
430 	if (IS_TRANSPORT(sa->flags)) {
431 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
432 		if (IS_IP4(sa->flags)) {
433 			sess_conf.ipsec.tunnel.type =
434 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
435 
436 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
437 				sa->src.ip.ip4;
438 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
439 				sa->dst.ip.ip4;
440 		} else if (IS_IP6(sa->flags)) {
441 			sess_conf.ipsec.tunnel.type =
442 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
443 
444 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
445 				sa->src.ip.ip6.ip6_b, 16);
446 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
447 				sa->dst.ip.ip6.ip6_b, 16);
448 		}
449 	} else if (IS_TUNNEL(sa->flags)) {
450 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
451 
452 		if (IS_IP4(sa->flags)) {
453 			sess_conf.ipsec.tunnel.type =
454 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
455 
456 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
457 				sa->src.ip.ip4;
458 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
459 				sa->dst.ip.ip4;
460 		} else if (IS_IP6(sa->flags)) {
461 			sess_conf.ipsec.tunnel.type =
462 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
463 
464 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
465 				sa->src.ip.ip6.ip6_b, 16);
466 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
467 				sa->dst.ip.ip6.ip6_b, 16);
468 		} else {
469 			RTE_LOG(ERR, IPSEC, "invalid tunnel type\n");
470 			return -1;
471 		}
472 	}
473 
474 	if (sa->udp_encap) {
475 		sess_conf.ipsec.options.udp_encap = 1;
476 		sess_conf.ipsec.udp.sport = htons(sa->udp.sport);
477 		sess_conf.ipsec.udp.dport = htons(sa->udp.dport);
478 	}
479 
480 	if (sa->esn > 0) {
481 		sess_conf.ipsec.options.esn = 1;
482 		sess_conf.ipsec.esn.value = sa->esn;
483 	}
484 
485 
486 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
487 		sa->spi, sa->portid);
488 
489 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
490 		struct rte_flow_error err;
491 		int ret = 0;
492 
493 		sec_ctx = (struct rte_security_ctx *)
494 					rte_eth_dev_get_sec_ctx(
495 					sa->portid);
496 		if (sec_ctx == NULL) {
497 			RTE_LOG(ERR, IPSEC,
498 				" rte_eth_dev_get_sec_ctx failed\n");
499 			return -1;
500 		}
501 
502 		if (verify_security_capabilities(sec_ctx, &sess_conf,
503 					&ips->security.ol_flags)) {
504 			RTE_LOG(ERR, IPSEC,
505 				"Requested security session config not supported\n");
506 			return -1;
507 		}
508 
509 		ips->security.ses = rte_security_session_create(sec_ctx,
510 				&sess_conf, skt_ctx->session_pool);
511 		if (ips->security.ses == NULL) {
512 			RTE_LOG(ERR, IPSEC,
513 				"SEC Session init failed: err: %d\n", ret);
514 			return -1;
515 		}
516 
517 		ips->security.ctx = sec_ctx;
518 		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
519 
520 		if (IS_IP6(sa->flags)) {
521 			sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
522 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
523 			sa->pattern[1].spec = &sa->ipv6_spec;
524 
525 			memcpy(sa->ipv6_spec.hdr.dst_addr,
526 				sa->dst.ip.ip6.ip6_b, 16);
527 			memcpy(sa->ipv6_spec.hdr.src_addr,
528 			       sa->src.ip.ip6.ip6_b, 16);
529 		} else if (IS_IP4(sa->flags)) {
530 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
531 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
532 			sa->pattern[1].spec = &sa->ipv4_spec;
533 
534 			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
535 			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
536 		}
537 
538 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
539 
540 		if (sa->udp_encap) {
541 
542 			sa->udp_spec.hdr.dst_port =
543 					rte_cpu_to_be_16(sa->udp.dport);
544 			sa->udp_spec.hdr.src_port =
545 					rte_cpu_to_be_16(sa->udp.sport);
546 
547 			sa->pattern[2].mask = &rte_flow_item_udp_mask;
548 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
549 			sa->pattern[2].spec = &sa->udp_spec;
550 
551 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP;
552 			sa->pattern[3].spec = &sa->esp_spec;
553 			sa->pattern[3].mask = &rte_flow_item_esp_mask;
554 
555 			sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END;
556 		} else {
557 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
558 			sa->pattern[2].spec = &sa->esp_spec;
559 			sa->pattern[2].mask = &rte_flow_item_esp_mask;
560 
561 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
562 		}
563 
564 		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
565 		sa->action[0].conf = ips->security.ses;
566 
567 		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
568 
569 		sa->attr.egress = (sa->direction ==
570 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
571 		sa->attr.ingress = (sa->direction ==
572 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
573 		if (sa->attr.ingress) {
574 			uint8_t rss_key[64];
575 			struct rte_eth_rss_conf rss_conf = {
576 				.rss_key = rss_key,
577 				.rss_key_len = sizeof(rss_key),
578 			};
579 			struct rte_eth_dev_info dev_info;
580 			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
581 			struct rte_flow_action_rss action_rss;
582 			unsigned int i;
583 			unsigned int j;
584 
585 			/* Don't create flow if default flow is created */
586 			if (flow_info_tbl[sa->portid].rx_def_flow)
587 				return 0;
588 
589 			ret = rte_eth_dev_info_get(sa->portid, &dev_info);
590 			if (ret != 0) {
591 				RTE_LOG(ERR, IPSEC,
592 					"Error during getting device (port %u) info: %s\n",
593 					sa->portid, strerror(-ret));
594 				return ret;
595 			}
596 
597 			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
598 			/* Try RSS. */
599 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
600 			sa->action[1].conf = &action_rss;
601 			ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
602 					&rss_conf);
603 			if (ret != 0) {
604 				RTE_LOG(ERR, IPSEC,
605 					"rte_eth_dev_rss_hash_conf_get:ret=%d\n",
606 					ret);
607 				return -1;
608 			}
609 			for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
610 				queue[j++] = i;
611 
612 			action_rss = (struct rte_flow_action_rss){
613 					.types = rss_conf.rss_hf,
614 					.key_len = rss_conf.rss_key_len,
615 					.queue_num = j,
616 					.key = rss_key,
617 					.queue = queue,
618 			};
619 			ret = rte_flow_validate(sa->portid, &sa->attr,
620 						sa->pattern, sa->action,
621 						&err);
622 			if (!ret)
623 				goto flow_create;
624 			/* Try Queue. */
625 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
626 			sa->action[1].conf =
627 				&(struct rte_flow_action_queue){
628 				.index = 0,
629 			};
630 			ret = rte_flow_validate(sa->portid, &sa->attr,
631 						sa->pattern, sa->action,
632 						&err);
633 			/* Try End. */
634 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
635 			sa->action[1].conf = NULL;
636 			ret = rte_flow_validate(sa->portid, &sa->attr,
637 						sa->pattern, sa->action,
638 						&err);
639 			if (ret)
640 				goto flow_create_failure;
641 		} else if (sa->attr.egress &&
642 				(ips->security.ol_flags &
643 					RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
644 			sa->action[1].type =
645 					RTE_FLOW_ACTION_TYPE_PASSTHRU;
646 			sa->action[2].type =
647 					RTE_FLOW_ACTION_TYPE_END;
648 		}
649 flow_create:
650 		sa->flow = rte_flow_create(sa->portid,
651 				&sa->attr, sa->pattern, sa->action, &err);
652 		if (sa->flow == NULL) {
653 flow_create_failure:
654 			RTE_LOG(ERR, IPSEC,
655 				"Failed to create ipsec flow msg: %s\n",
656 				err.message);
657 			return -1;
658 		}
659 	} else if (ips->type ==	RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
660 		sec_ctx = (struct rte_security_ctx *)
661 				rte_eth_dev_get_sec_ctx(sa->portid);
662 
663 		if (sec_ctx == NULL) {
664 			RTE_LOG(ERR, IPSEC,
665 				"Ethernet device doesn't have security features registered\n");
666 			return -1;
667 		}
668 
669 		/* Set IPsec parameters in conf */
670 		set_ipsec_conf(sa, &(sess_conf.ipsec));
671 
672 		/* Save SA as userdata for the security session. When
673 		 * the packet is received, this userdata will be
674 		 * retrieved using the metadata from the packet.
675 		 *
676 		 * The PMD is expected to set similar metadata for other
677 		 * operations, like rte_eth_event, which are tied to
678 		 * security session. In such cases, the userdata could
679 		 * be obtained to uniquely identify the security
680 		 * parameters denoted.
681 		 */
682 
683 		sess_conf.userdata = (void *) sa;
684 
685 		if (verify_security_capabilities(sec_ctx, &sess_conf,
686 					&ips->security.ol_flags)) {
687 			RTE_LOG(ERR, IPSEC,
688 				"Requested security session config not supported\n");
689 			return -1;
690 		}
691 
692 		ips->security.ses = rte_security_session_create(sec_ctx,
693 					&sess_conf, skt_ctx->session_pool);
694 		if (ips->security.ses == NULL) {
695 			RTE_LOG(ERR, IPSEC,
696 				"SEC Session init failed: err: %d\n", ret);
697 			return -1;
698 		}
699 
700 		ips->security.ctx = sec_ctx;
701 	}
702 
703 	return 0;
704 }
705 
706 int
707 create_ipsec_esp_flow(struct ipsec_sa *sa)
708 {
709 	int ret = 0;
710 	struct rte_flow_error err = {};
711 	if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
712 		RTE_LOG(ERR, IPSEC,
713 			"No Flow director rule for Egress traffic\n");
714 		return -1;
715 	}
716 	if (sa->flags == TRANSPORT) {
717 		RTE_LOG(ERR, IPSEC,
718 			"No Flow director rule for transport mode\n");
719 		return -1;
720 	}
721 	sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
722 	sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
723 	sa->action[0].conf = &(struct rte_flow_action_queue) {
724 				.index = sa->fdir_qid,
725 	};
726 	sa->attr.egress = 0;
727 	sa->attr.ingress = 1;
728 	if (IS_IP6(sa->flags)) {
729 		sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
730 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
731 		sa->pattern[1].spec = &sa->ipv6_spec;
732 		memcpy(sa->ipv6_spec.hdr.dst_addr,
733 			sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
734 		memcpy(sa->ipv6_spec.hdr.src_addr,
735 			sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
736 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
737 		sa->pattern[2].spec = &sa->esp_spec;
738 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
739 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
740 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
741 	} else if (IS_IP4(sa->flags)) {
742 		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
743 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
744 		sa->pattern[1].spec = &sa->ipv4_spec;
745 		sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
746 		sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
747 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
748 		sa->pattern[2].spec = &sa->esp_spec;
749 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
750 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
751 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
752 	}
753 	sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
754 
755 	ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
756 				&err);
757 	if (ret < 0) {
758 		RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
759 		return ret;
760 	}
761 
762 	sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
763 					sa->action, &err);
764 	if (!sa->flow) {
765 		RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
766 		return -1;
767 	}
768 
769 	return 0;
770 }
771 
772 /*
773  * queue crypto-ops into PMD queue.
774  */
775 void
776 enqueue_cop_burst(struct cdev_qp *cqp)
777 {
778 	uint32_t i, len, ret;
779 
780 	len = cqp->len;
781 	ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
782 	if (ret < len) {
783 		RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
784 			" enqueued %u crypto ops out of %u\n",
785 			cqp->id, cqp->qp, ret, len);
786 			/* drop packets that we fail to enqueue */
787 			for (i = ret; i < len; i++)
788 				free_pkts(&cqp->buf[i]->sym->m_src, 1);
789 	}
790 	cqp->in_flight += ret;
791 	cqp->len = 0;
792 }
793 
794 static inline void
795 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
796 {
797 	cqp->buf[cqp->len++] = cop;
798 
799 	if (cqp->len == MAX_PKT_BURST)
800 		enqueue_cop_burst(cqp);
801 }
802 
803 static inline void
804 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
805 		struct rte_mbuf *pkts[], void *sas[],
806 		uint16_t nb_pkts)
807 {
808 	int32_t ret = 0, i;
809 	struct ipsec_mbuf_metadata *priv;
810 	struct rte_crypto_sym_op *sym_cop;
811 	struct ipsec_sa *sa;
812 	struct rte_ipsec_session *ips;
813 
814 	for (i = 0; i < nb_pkts; i++) {
815 		if (unlikely(sas[i] == NULL)) {
816 			free_pkts(&pkts[i], 1);
817 			continue;
818 		}
819 
820 		rte_prefetch0(sas[i]);
821 		rte_prefetch0(pkts[i]);
822 
823 		priv = get_priv(pkts[i]);
824 		sa = ipsec_mask_saptr(sas[i]);
825 		priv->sa = sa;
826 		ips = ipsec_get_primary_session(sa);
827 
828 		switch (ips->type) {
829 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
830 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
831 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
832 
833 			rte_prefetch0(&priv->sym_cop);
834 
835 			if (unlikely(ips->security.ses == NULL)) {
836 				free_pkts(&pkts[i], 1);
837 				continue;
838 			}
839 
840 			if (unlikely((pkts[i]->packet_type &
841 					(RTE_PTYPE_TUNNEL_MASK |
842 					RTE_PTYPE_L4_MASK)) ==
843 					MBUF_PTYPE_TUNNEL_ESP_IN_UDP &&
844 					sa->udp_encap != 1)) {
845 				free_pkts(&pkts[i], 1);
846 				continue;
847 			}
848 
849 			sym_cop = get_sym_cop(&priv->cop);
850 			sym_cop->m_src = pkts[i];
851 
852 			rte_security_attach_session(&priv->cop,
853 				ips->security.ses);
854 			break;
855 
856 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
857 			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
858 					" legacy mode.");
859 			free_pkts(&pkts[i], 1);
860 			continue;
861 
862 		case RTE_SECURITY_ACTION_TYPE_NONE:
863 
864 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
865 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
866 
867 			rte_prefetch0(&priv->sym_cop);
868 
869 			if (unlikely(ips->crypto.ses == NULL)) {
870 				free_pkts(&pkts[i], 1);
871 				continue;
872 			}
873 
874 			rte_crypto_op_attach_sym_session(&priv->cop,
875 					ips->crypto.ses);
876 
877 			ret = xform_func(pkts[i], sa, &priv->cop);
878 			if (unlikely(ret)) {
879 				free_pkts(&pkts[i], 1);
880 				continue;
881 			}
882 			break;
883 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
884 			RTE_ASSERT(ips->security.ses != NULL);
885 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
886 			if (ips->security.ol_flags &
887 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
888 				rte_security_set_pkt_metadata(
889 					ips->security.ctx, ips->security.ses,
890 					pkts[i], NULL);
891 			continue;
892 		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
893 			RTE_ASSERT(ips->security.ses != NULL);
894 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
895 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
896 
897 			rte_prefetch0(&priv->sym_cop);
898 			rte_security_attach_session(&priv->cop,
899 					ips->security.ses);
900 
901 			ret = xform_func(pkts[i], sa, &priv->cop);
902 			if (unlikely(ret)) {
903 				free_pkts(&pkts[i], 1);
904 				continue;
905 			}
906 
907 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
908 			if (ips->security.ol_flags &
909 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
910 				rte_security_set_pkt_metadata(
911 					ips->security.ctx, ips->security.ses,
912 					pkts[i], NULL);
913 			continue;
914 		}
915 
916 		enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop);
917 	}
918 }
919 
920 static inline int32_t
921 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
922 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
923 {
924 	int32_t nb_pkts, ret;
925 	struct ipsec_mbuf_metadata *priv;
926 	struct ipsec_sa *sa;
927 	struct rte_mbuf *pkt;
928 
929 	nb_pkts = 0;
930 	while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
931 		pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
932 		rte_prefetch0(pkt);
933 		priv = get_priv(pkt);
934 		sa = priv->sa;
935 		ret = xform_func(pkt, sa, &priv->cop);
936 		if (unlikely(ret)) {
937 			free_pkts(&pkt, 1);
938 			continue;
939 		}
940 		pkts[nb_pkts++] = pkt;
941 	}
942 
943 	return nb_pkts;
944 }
945 
946 static inline int
947 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
948 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
949 {
950 	int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
951 	struct ipsec_mbuf_metadata *priv;
952 	struct rte_crypto_op *cops[max_pkts];
953 	struct ipsec_sa *sa;
954 	struct rte_mbuf *pkt;
955 
956 	for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
957 		struct cdev_qp *cqp;
958 
959 		cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
960 		if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
961 			ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
962 
963 		if (cqp->in_flight == 0)
964 			continue;
965 
966 		nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
967 				cops, max_pkts - nb_pkts);
968 
969 		cqp->in_flight -= nb_cops;
970 
971 		for (j = 0; j < nb_cops; j++) {
972 			pkt = cops[j]->sym->m_src;
973 			rte_prefetch0(pkt);
974 
975 			priv = get_priv(pkt);
976 			sa = priv->sa;
977 
978 			RTE_ASSERT(sa != NULL);
979 
980 			if (ipsec_get_action_type(sa) ==
981 				RTE_SECURITY_ACTION_TYPE_NONE) {
982 				ret = xform_func(pkt, sa, cops[j]);
983 				if (unlikely(ret)) {
984 					free_pkts(&pkt, 1);
985 					continue;
986 				}
987 			} else if (ipsec_get_action_type(sa) ==
988 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
989 				if (cops[j]->status) {
990 					free_pkts(&pkt, 1);
991 					continue;
992 				}
993 			}
994 			pkts[nb_pkts++] = pkt;
995 		}
996 	}
997 
998 	/* return packets */
999 	return nb_pkts;
1000 }
1001 
1002 uint16_t
1003 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1004 		uint16_t nb_pkts, uint16_t len)
1005 {
1006 	void *sas[nb_pkts];
1007 
1008 	inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
1009 
1010 	ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
1011 
1012 	return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
1013 }
1014 
1015 uint16_t
1016 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1017 		uint16_t len)
1018 {
1019 	return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
1020 }
1021 
1022 uint16_t
1023 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1024 		uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
1025 {
1026 	void *sas[nb_pkts];
1027 
1028 	outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
1029 
1030 	ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
1031 
1032 	return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
1033 }
1034 
1035 uint16_t
1036 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1037 		uint16_t len)
1038 {
1039 	return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
1040 }
1041