xref: /dpdk/examples/ipsec-secgw/ipsec.c (revision a131d9ec3f4367719ca6b82bfefae8e98cea74c4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_event_crypto_adapter.h>
10 #include <rte_log.h>
11 #include <rte_crypto.h>
12 #include <rte_security.h>
13 #include <rte_cryptodev.h>
14 #include <rte_ipsec.h>
15 #include <rte_ethdev.h>
16 #include <rte_mbuf.h>
17 #include <rte_hash.h>
18 
19 #include "ipsec.h"
20 #include "esp.h"
21 
22 static inline void
23 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
24 {
25 	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
26 		struct rte_security_ipsec_tunnel_param *tunnel =
27 				&ipsec->tunnel;
28 		if (IS_IP4_TUNNEL(sa->flags)) {
29 			tunnel->type =
30 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
31 			tunnel->ipv4.ttl = IPDEFTTL;
32 
33 			memcpy((uint8_t *)&tunnel->ipv4.src_ip,
34 				(uint8_t *)&sa->src.ip.ip4, 4);
35 
36 			memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
37 				(uint8_t *)&sa->dst.ip.ip4, 4);
38 		} else if (IS_IP6_TUNNEL(sa->flags)) {
39 			tunnel->type =
40 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
41 			tunnel->ipv6.hlimit = IPDEFTTL;
42 			tunnel->ipv6.dscp = 0;
43 			tunnel->ipv6.flabel = 0;
44 
45 			memcpy((uint8_t *)&tunnel->ipv6.src_addr,
46 				(uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
47 
48 			memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
49 				(uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
50 		}
51 		/* TODO support for Transport */
52 	}
53 	ipsec->replay_win_sz = app_sa_prm.window_size;
54 	ipsec->options.esn = app_sa_prm.enable_esn;
55 	ipsec->options.udp_encap = sa->udp_encap;
56 	if (IS_HW_REASSEMBLY_EN(sa->flags))
57 		ipsec->options.ip_reassembly_en = 1;
58 }
59 
60 static inline int
61 verify_crypto_xform(const struct rte_cryptodev_capabilities *capabilities,
62 		struct rte_crypto_sym_xform *crypto_xform)
63 {
64 	const struct rte_cryptodev_capabilities *crypto_cap;
65 	int j = 0;
66 
67 	while ((crypto_cap = &capabilities[j++])->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
68 		if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
69 				crypto_cap->sym.xform_type == crypto_xform->type) {
70 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
71 					crypto_cap->sym.aead.algo == crypto_xform->aead.algo) {
72 				if (rte_cryptodev_sym_capability_check_aead(&crypto_cap->sym,
73 						crypto_xform->aead.key.length,
74 						crypto_xform->aead.digest_length,
75 						crypto_xform->aead.aad_length,
76 						crypto_xform->aead.iv.length) == 0)
77 					return 0;
78 			}
79 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
80 					crypto_cap->sym.cipher.algo == crypto_xform->cipher.algo) {
81 				if (rte_cryptodev_sym_capability_check_cipher(&crypto_cap->sym,
82 						crypto_xform->cipher.key.length,
83 						crypto_xform->cipher.iv.length) == 0)
84 					return 0;
85 			}
86 			if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
87 					crypto_cap->sym.auth.algo == crypto_xform->auth.algo) {
88 				if (rte_cryptodev_sym_capability_check_auth(&crypto_cap->sym,
89 						crypto_xform->auth.key.length,
90 						crypto_xform->auth.digest_length,
91 						crypto_xform->auth.iv.length) == 0)
92 					return 0;
93 			}
94 		}
95 	}
96 
97 	return -ENOTSUP;
98 }
99 
100 static inline int
101 verify_crypto_capabilities(const struct rte_cryptodev_capabilities *capabilities,
102 		struct rte_crypto_sym_xform *crypto_xform)
103 {
104 	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
105 		return verify_crypto_xform(capabilities, crypto_xform);
106 	else if (crypto_xform->next != NULL)
107 		return (verify_crypto_xform(capabilities, crypto_xform) ||
108 		    verify_crypto_xform(capabilities, crypto_xform->next));
109 	else
110 		return -ENOTSUP;
111 }
112 
113 static inline int
114 verify_ipsec_capabilities(struct rte_security_ipsec_xform *ipsec_xform,
115 		const struct rte_security_capability *sec_cap)
116 {
117 	/* Verify security capabilities */
118 
119 	if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
120 		RTE_LOG(INFO, USER1, "ESN is not supported\n");
121 		return -ENOTSUP;
122 	}
123 
124 	if (ipsec_xform->options.udp_encap == 1 &&
125 	    sec_cap->ipsec.options.udp_encap == 0) {
126 		RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
127 		return -ENOTSUP;
128 	}
129 
130 	if (ipsec_xform->options.udp_ports_verify == 1 &&
131 	    sec_cap->ipsec.options.udp_ports_verify == 0) {
132 		RTE_LOG(DEBUG, USER1,
133 			"UDP encapsulation ports verification is not supported\n");
134 		return -ENOTSUP;
135 	}
136 
137 	if (ipsec_xform->options.copy_dscp == 1 &&
138 	    sec_cap->ipsec.options.copy_dscp == 0) {
139 		RTE_LOG(DEBUG, USER1, "Copy DSCP is not supported\n");
140 		return -ENOTSUP;
141 	}
142 
143 	if (ipsec_xform->options.copy_flabel == 1 &&
144 	    sec_cap->ipsec.options.copy_flabel == 0) {
145 		RTE_LOG(DEBUG, USER1, "Copy Flow Label is not supported\n");
146 		return -ENOTSUP;
147 	}
148 
149 	if (ipsec_xform->options.copy_df == 1 &&
150 	    sec_cap->ipsec.options.copy_df == 0) {
151 		RTE_LOG(DEBUG, USER1, "Copy DP bit is not supported\n");
152 		return -ENOTSUP;
153 	}
154 
155 	if (ipsec_xform->options.dec_ttl == 1 &&
156 	    sec_cap->ipsec.options.dec_ttl == 0) {
157 		RTE_LOG(DEBUG, USER1, "Decrement TTL is not supported\n");
158 		return -ENOTSUP;
159 	}
160 
161 	if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
162 		RTE_LOG(DEBUG, USER1, "ECN is not supported\n");
163 		return -ENOTSUP;
164 	}
165 
166 	if (ipsec_xform->options.stats == 1 &&
167 	    sec_cap->ipsec.options.stats == 0) {
168 		RTE_LOG(DEBUG, USER1, "Stats is not supported\n");
169 		return -ENOTSUP;
170 	}
171 
172 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
173 	    (ipsec_xform->options.iv_gen_disable == 1) &&
174 	    (sec_cap->ipsec.options.iv_gen_disable != 1)) {
175 		RTE_LOG(DEBUG, USER1, "Application provided IV is not supported\n");
176 		return -ENOTSUP;
177 	}
178 
179 	if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
180 	    (ipsec_xform->options.tunnel_hdr_verify >
181 	    sec_cap->ipsec.options.tunnel_hdr_verify)) {
182 		RTE_LOG(DEBUG, USER1, "Tunnel header verify is not supported\n");
183 		return -ENOTSUP;
184 	}
185 
186 	if (ipsec_xform->options.ip_csum_enable == 1 &&
187 	    sec_cap->ipsec.options.ip_csum_enable == 0) {
188 		RTE_LOG(DEBUG, USER1, "Inner IP checksum is not supported\n");
189 		return -ENOTSUP;
190 	}
191 
192 	if (ipsec_xform->options.l4_csum_enable == 1 &&
193 	    sec_cap->ipsec.options.l4_csum_enable == 0) {
194 		RTE_LOG(DEBUG, USER1, "Inner L4 checksum is not supported\n");
195 		return -ENOTSUP;
196 	}
197 
198 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
199 		if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
200 			RTE_LOG(DEBUG, USER1, "Replay window size is not supported\n");
201 			return -ENOTSUP;
202 		}
203 	}
204 
205 	return 0;
206 }
207 
208 
209 static inline int
210 verify_security_capabilities(struct rte_security_ctx *ctx,
211 		struct rte_security_session_conf *sess_conf,
212 		uint32_t *ol_flags)
213 {
214 	struct rte_security_capability_idx sec_cap_idx;
215 	const struct rte_security_capability *sec_cap;
216 
217 	sec_cap_idx.action = sess_conf->action_type;
218 	sec_cap_idx.protocol = sess_conf->protocol;
219 	sec_cap_idx.ipsec.proto = sess_conf->ipsec.proto;
220 	sec_cap_idx.ipsec.mode = sess_conf->ipsec.mode;
221 	sec_cap_idx.ipsec.direction = sess_conf->ipsec.direction;
222 
223 	sec_cap = rte_security_capability_get(ctx, &sec_cap_idx);
224 	if (sec_cap == NULL)
225 		return -ENOTSUP;
226 
227 	if (verify_crypto_capabilities(sec_cap->crypto_capabilities,
228 				sess_conf->crypto_xform))
229 		return -ENOTSUP;
230 
231 	if (verify_ipsec_capabilities(&sess_conf->ipsec, sec_cap))
232 		return -ENOTSUP;
233 
234 	if (ol_flags != NULL)
235 		*ol_flags = sec_cap->ol_flags;
236 
237 	return 0;
238 }
239 
240 int
241 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
242 	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
243 	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
244 {
245 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
246 	enum rte_crypto_op_sess_type sess_type;
247 	struct rte_cryptodev_info cdev_info;
248 	enum rte_crypto_op_type op_type;
249 	unsigned long cdev_id_qp = 0;
250 	struct ipsec_ctx *ipsec_ctx;
251 	struct cdev_key key = { 0 };
252 	void *sess = NULL;
253 	uint32_t lcore_id;
254 	int32_t ret = 0;
255 
256 	RTE_LCORE_FOREACH(lcore_id) {
257 		ipsec_ctx = ipsec_ctx_lcore[lcore_id];
258 
259 		/* Core is not bound to any cryptodev, skip it */
260 		if (ipsec_ctx->cdev_map == NULL)
261 			continue;
262 
263 		/* Looking for cryptodev, which can handle this SA */
264 		key.lcore_id = (uint8_t)lcore_id;
265 		key.cipher_algo = (uint8_t)sa->cipher_algo;
266 		key.auth_algo = (uint8_t)sa->auth_algo;
267 		key.aead_algo = (uint8_t)sa->aead_algo;
268 
269 		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
270 				(void **)&cdev_id_qp);
271 		if (ret == -ENOENT)
272 			continue;
273 		if (ret < 0) {
274 			RTE_LOG(ERR, IPSEC,
275 					"No cryptodev: core %u, cipher_algo %u, "
276 					"auth_algo %u, aead_algo %u\n",
277 					key.lcore_id,
278 					key.cipher_algo,
279 					key.auth_algo,
280 					key.aead_algo);
281 			return ret;
282 		}
283 
284 		/* Verify that all cores are using same cryptodev for current
285 		 * algorithm combination, required by SA.
286 		 * Current cryptodev mapping process will map SA to the first
287 		 * cryptodev that matches requirements, so it's a double check,
288 		 * not an additional restriction.
289 		 */
290 		if (cdev_id == RTE_CRYPTO_MAX_DEVS)
291 			cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
292 		else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) {
293 			RTE_LOG(ERR, IPSEC,
294 					"SA mapping to multiple cryptodevs is "
295 					"not supported!");
296 			return -EINVAL;
297 		}
298 
299 		/* Store per core queue pair information */
300 		sa->cqp[lcore_id] = &ipsec_ctx->tbl[cdev_id_qp];
301 	}
302 	if (cdev_id == RTE_CRYPTO_MAX_DEVS) {
303 		RTE_LOG(WARNING, IPSEC, "No cores found to handle SA\n");
304 		return 0;
305 	}
306 
307 	RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
308 			"%u\n", sa->spi, cdev_id);
309 
310 	if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
311 		ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
312 		struct rte_security_session_conf sess_conf = {
313 			.action_type = ips->type,
314 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
315 			{.ipsec = {
316 				.spi = sa->spi,
317 				.salt = sa->salt,
318 				.options = { 0 },
319 				.replay_win_sz = 0,
320 				.direction = sa->direction,
321 				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
322 				.mode = (IS_TUNNEL(sa->flags)) ?
323 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
324 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
325 			} },
326 			.crypto_xform = sa->xforms,
327 			.userdata = NULL,
328 
329 		};
330 
331 		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
332 			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
333 							rte_cryptodev_get_sec_ctx(
334 							cdev_id);
335 
336 			/* Set IPsec parameters in conf */
337 			set_ipsec_conf(sa, &(sess_conf.ipsec));
338 
339 			if (verify_security_capabilities(ctx, &sess_conf, NULL)) {
340 				RTE_LOG(ERR, IPSEC,
341 					"Requested security session config not supported\n");
342 				return -1;
343 			}
344 
345 			ips->security.ses = rte_security_session_create(ctx,
346 					&sess_conf, skt_ctx->session_pool);
347 			if (ips->security.ses == NULL) {
348 				RTE_LOG(ERR, IPSEC,
349 				"SEC Session init failed: err: %d\n", ret);
350 				return -1;
351 			}
352 			ips->security.ctx = ctx;
353 
354 			sess = ips->security.ses;
355 			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
356 			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
357 		} else {
358 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
359 			return -1;
360 		}
361 	} else {
362 		struct rte_cryptodev_info info;
363 
364 		rte_cryptodev_info_get(cdev_id, &info);
365 
366 		if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
367 			if (!(info.feature_flags &
368 				RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
369 				return -ENOTSUP;
370 
371 		}
372 
373 		if (verify_crypto_capabilities(info.capabilities, sa->xforms)) {
374 			RTE_LOG(ERR, IPSEC,
375 				"Requested crypto session config not supported\n");
376 			return -1;
377 		}
378 
379 		ips->crypto.dev_id = cdev_id;
380 		ips->crypto.ses = rte_cryptodev_sym_session_create(cdev_id,
381 				sa->xforms, skt_ctx->session_pool);
382 
383 		rte_cryptodev_info_get(cdev_id, &cdev_info);
384 	}
385 
386 	/* Setup meta data required by event crypto adapter */
387 	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
388 		union rte_event_crypto_metadata m_data;
389 		const struct eventdev_params *eventdev_conf;
390 
391 		eventdev_conf = &(em_conf->eventdev_config[0]);
392 		memset(&m_data, 0, sizeof(m_data));
393 
394 		/* Fill in response information */
395 		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
396 		m_data.response_info.op = RTE_EVENT_OP_NEW;
397 		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
398 
399 		/* Fill in request information */
400 		m_data.request_info.cdev_id = cdev_id;
401 		m_data.request_info.queue_pair_id = 0;
402 
403 		/* Attach meta info to session */
404 		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
405 				sess_type, &m_data, sizeof(m_data));
406 	}
407 
408 	return 0;
409 }
410 
411 int
412 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
413 		struct rte_ipsec_session *ips)
414 {
415 	int32_t ret = 0;
416 	struct rte_security_ctx *sec_ctx;
417 	struct rte_security_session_conf sess_conf = {
418 		.action_type = ips->type,
419 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
420 		{.ipsec = {
421 			.spi = sa->spi,
422 			.salt = sa->salt,
423 			.options = { 0 },
424 			.replay_win_sz = 0,
425 			.direction = sa->direction,
426 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP
427 		} },
428 		.crypto_xform = sa->xforms,
429 		.userdata = NULL,
430 	};
431 
432 	if (IS_TRANSPORT(sa->flags)) {
433 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
434 		if (IS_IP4(sa->flags)) {
435 			sess_conf.ipsec.tunnel.type =
436 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
437 
438 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
439 				sa->src.ip.ip4;
440 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
441 				sa->dst.ip.ip4;
442 		} else if (IS_IP6(sa->flags)) {
443 			sess_conf.ipsec.tunnel.type =
444 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
445 
446 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
447 				sa->src.ip.ip6.ip6_b, 16);
448 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
449 				sa->dst.ip.ip6.ip6_b, 16);
450 		}
451 	} else if (IS_TUNNEL(sa->flags)) {
452 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
453 
454 		if (IS_IP4(sa->flags)) {
455 			sess_conf.ipsec.tunnel.type =
456 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
457 
458 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
459 				sa->src.ip.ip4;
460 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
461 				sa->dst.ip.ip4;
462 		} else if (IS_IP6(sa->flags)) {
463 			sess_conf.ipsec.tunnel.type =
464 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
465 
466 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
467 				sa->src.ip.ip6.ip6_b, 16);
468 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
469 				sa->dst.ip.ip6.ip6_b, 16);
470 		} else {
471 			RTE_LOG(ERR, IPSEC, "invalid tunnel type\n");
472 			return -1;
473 		}
474 	}
475 
476 	if (sa->udp_encap) {
477 		sess_conf.ipsec.options.udp_encap = 1;
478 		sess_conf.ipsec.udp.sport = htons(sa->udp.sport);
479 		sess_conf.ipsec.udp.dport = htons(sa->udp.dport);
480 	}
481 
482 	if (sa->esn > 0) {
483 		sess_conf.ipsec.options.esn = 1;
484 		sess_conf.ipsec.esn.value = sa->esn;
485 	}
486 
487 
488 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
489 		sa->spi, sa->portid);
490 
491 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
492 		struct rte_flow_error err;
493 		int ret = 0;
494 
495 		sec_ctx = (struct rte_security_ctx *)
496 					rte_eth_dev_get_sec_ctx(
497 					sa->portid);
498 		if (sec_ctx == NULL) {
499 			RTE_LOG(ERR, IPSEC,
500 				" rte_eth_dev_get_sec_ctx failed\n");
501 			return -1;
502 		}
503 
504 		if (verify_security_capabilities(sec_ctx, &sess_conf,
505 					&ips->security.ol_flags)) {
506 			RTE_LOG(ERR, IPSEC,
507 				"Requested security session config not supported\n");
508 			return -1;
509 		}
510 
511 		ips->security.ses = rte_security_session_create(sec_ctx,
512 				&sess_conf, skt_ctx->session_pool);
513 		if (ips->security.ses == NULL) {
514 			RTE_LOG(ERR, IPSEC,
515 				"SEC Session init failed: err: %d\n", ret);
516 			return -1;
517 		}
518 
519 		ips->security.ctx = sec_ctx;
520 		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
521 
522 		if (IS_IP6(sa->flags)) {
523 			sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
524 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
525 			sa->pattern[1].spec = &sa->ipv6_spec;
526 
527 			memcpy(sa->ipv6_spec.hdr.dst_addr,
528 				sa->dst.ip.ip6.ip6_b, 16);
529 			memcpy(sa->ipv6_spec.hdr.src_addr,
530 			       sa->src.ip.ip6.ip6_b, 16);
531 		} else if (IS_IP4(sa->flags)) {
532 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
533 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
534 			sa->pattern[1].spec = &sa->ipv4_spec;
535 
536 			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
537 			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
538 		}
539 
540 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
541 
542 		if (sa->udp_encap) {
543 
544 			sa->udp_spec.hdr.dst_port =
545 					rte_cpu_to_be_16(sa->udp.dport);
546 			sa->udp_spec.hdr.src_port =
547 					rte_cpu_to_be_16(sa->udp.sport);
548 
549 			sa->pattern[2].mask = &rte_flow_item_udp_mask;
550 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
551 			sa->pattern[2].spec = &sa->udp_spec;
552 
553 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP;
554 			sa->pattern[3].spec = &sa->esp_spec;
555 			sa->pattern[3].mask = &rte_flow_item_esp_mask;
556 
557 			sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END;
558 		} else {
559 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
560 			sa->pattern[2].spec = &sa->esp_spec;
561 			sa->pattern[2].mask = &rte_flow_item_esp_mask;
562 
563 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
564 		}
565 
566 		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
567 		sa->action[0].conf = ips->security.ses;
568 
569 		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
570 
571 		sa->attr.egress = (sa->direction ==
572 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
573 		sa->attr.ingress = (sa->direction ==
574 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
575 		if (sa->attr.ingress) {
576 			uint8_t rss_key[64];
577 			struct rte_eth_rss_conf rss_conf = {
578 				.rss_key = rss_key,
579 				.rss_key_len = sizeof(rss_key),
580 			};
581 			struct rte_eth_dev_info dev_info;
582 			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
583 			struct rte_flow_action_rss action_rss;
584 			unsigned int i;
585 			unsigned int j;
586 
587 			/* Don't create flow if default flow is created */
588 			if (flow_info_tbl[sa->portid].rx_def_flow)
589 				return 0;
590 
591 			ret = rte_eth_dev_info_get(sa->portid, &dev_info);
592 			if (ret != 0) {
593 				RTE_LOG(ERR, IPSEC,
594 					"Error during getting device (port %u) info: %s\n",
595 					sa->portid, strerror(-ret));
596 				return ret;
597 			}
598 
599 			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
600 			/* Try RSS. */
601 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
602 			sa->action[1].conf = &action_rss;
603 			ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
604 					&rss_conf);
605 			if (ret != 0) {
606 				RTE_LOG(ERR, IPSEC,
607 					"rte_eth_dev_rss_hash_conf_get:ret=%d\n",
608 					ret);
609 				return -1;
610 			}
611 			for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
612 				queue[j++] = i;
613 
614 			action_rss = (struct rte_flow_action_rss){
615 					.types = rss_conf.rss_hf,
616 					.key_len = rss_conf.rss_key_len,
617 					.queue_num = j,
618 					.key = rss_key,
619 					.queue = queue,
620 			};
621 			ret = rte_flow_validate(sa->portid, &sa->attr,
622 						sa->pattern, sa->action,
623 						&err);
624 			if (!ret)
625 				goto flow_create;
626 			/* Try Queue. */
627 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
628 			sa->action[1].conf =
629 				&(struct rte_flow_action_queue){
630 				.index = 0,
631 			};
632 			ret = rte_flow_validate(sa->portid, &sa->attr,
633 						sa->pattern, sa->action,
634 						&err);
635 			/* Try End. */
636 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
637 			sa->action[1].conf = NULL;
638 			ret = rte_flow_validate(sa->portid, &sa->attr,
639 						sa->pattern, sa->action,
640 						&err);
641 			if (ret)
642 				goto flow_create_failure;
643 		} else if (sa->attr.egress &&
644 				(ips->security.ol_flags &
645 					RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
646 			sa->action[1].type =
647 					RTE_FLOW_ACTION_TYPE_PASSTHRU;
648 			sa->action[2].type =
649 					RTE_FLOW_ACTION_TYPE_END;
650 		}
651 flow_create:
652 		sa->flow = rte_flow_create(sa->portid,
653 				&sa->attr, sa->pattern, sa->action, &err);
654 		if (sa->flow == NULL) {
655 flow_create_failure:
656 			RTE_LOG(ERR, IPSEC,
657 				"Failed to create ipsec flow msg: %s\n",
658 				err.message);
659 			return -1;
660 		}
661 	} else if (ips->type ==	RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
662 		sec_ctx = (struct rte_security_ctx *)
663 				rte_eth_dev_get_sec_ctx(sa->portid);
664 
665 		if (sec_ctx == NULL) {
666 			RTE_LOG(ERR, IPSEC,
667 				"Ethernet device doesn't have security features registered\n");
668 			return -1;
669 		}
670 
671 		/* Set IPsec parameters in conf */
672 		set_ipsec_conf(sa, &(sess_conf.ipsec));
673 
674 		/* Save SA as userdata for the security session. When
675 		 * the packet is received, this userdata will be
676 		 * retrieved using the metadata from the packet.
677 		 *
678 		 * The PMD is expected to set similar metadata for other
679 		 * operations, like rte_eth_event, which are tied to
680 		 * security session. In such cases, the userdata could
681 		 * be obtained to uniquely identify the security
682 		 * parameters denoted.
683 		 */
684 
685 		sess_conf.userdata = (void *) sa;
686 
687 		if (verify_security_capabilities(sec_ctx, &sess_conf,
688 					&ips->security.ol_flags)) {
689 			RTE_LOG(ERR, IPSEC,
690 				"Requested security session config not supported\n");
691 			return -1;
692 		}
693 
694 		ips->security.ses = rte_security_session_create(sec_ctx,
695 					&sess_conf, skt_ctx->session_pool);
696 		if (ips->security.ses == NULL) {
697 			RTE_LOG(ERR, IPSEC,
698 				"SEC Session init failed: err: %d\n", ret);
699 			return -1;
700 		}
701 
702 		ips->security.ctx = sec_ctx;
703 	}
704 
705 	return 0;
706 }
707 
708 int
709 create_ipsec_esp_flow(struct ipsec_sa *sa)
710 {
711 	int ret = 0;
712 	struct rte_flow_error err = {};
713 	if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
714 		RTE_LOG(ERR, IPSEC,
715 			"No Flow director rule for Egress traffic\n");
716 		return -1;
717 	}
718 	if (sa->flags == TRANSPORT) {
719 		RTE_LOG(ERR, IPSEC,
720 			"No Flow director rule for transport mode\n");
721 		return -1;
722 	}
723 	sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
724 	sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
725 	sa->action[0].conf = &(struct rte_flow_action_queue) {
726 				.index = sa->fdir_qid,
727 	};
728 	sa->attr.egress = 0;
729 	sa->attr.ingress = 1;
730 	if (IS_IP6(sa->flags)) {
731 		sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
732 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
733 		sa->pattern[1].spec = &sa->ipv6_spec;
734 		memcpy(sa->ipv6_spec.hdr.dst_addr,
735 			sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
736 		memcpy(sa->ipv6_spec.hdr.src_addr,
737 			sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
738 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
739 		sa->pattern[2].spec = &sa->esp_spec;
740 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
741 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
742 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
743 	} else if (IS_IP4(sa->flags)) {
744 		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
745 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
746 		sa->pattern[1].spec = &sa->ipv4_spec;
747 		sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
748 		sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
749 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
750 		sa->pattern[2].spec = &sa->esp_spec;
751 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
752 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
753 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
754 	}
755 	sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
756 
757 	ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
758 				&err);
759 	if (ret < 0) {
760 		RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
761 		return ret;
762 	}
763 
764 	sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
765 					sa->action, &err);
766 	if (!sa->flow) {
767 		RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
768 		return -1;
769 	}
770 
771 	return 0;
772 }
773 
774 /*
775  * queue crypto-ops into PMD queue.
776  */
777 void
778 enqueue_cop_burst(struct cdev_qp *cqp)
779 {
780 	uint32_t i, len, ret;
781 
782 	len = cqp->len;
783 	ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
784 	if (ret < len) {
785 		RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
786 			" enqueued %u crypto ops out of %u\n",
787 			cqp->id, cqp->qp, ret, len);
788 			/* drop packets that we fail to enqueue */
789 			for (i = ret; i < len; i++)
790 				free_pkts(&cqp->buf[i]->sym->m_src, 1);
791 	}
792 	cqp->in_flight += ret;
793 	cqp->len = 0;
794 }
795 
796 static inline void
797 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
798 {
799 	cqp->buf[cqp->len++] = cop;
800 
801 	if (cqp->len == MAX_PKT_BURST)
802 		enqueue_cop_burst(cqp);
803 }
804 
805 static inline void
806 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
807 		struct rte_mbuf *pkts[], void *sas[],
808 		uint16_t nb_pkts)
809 {
810 	int32_t ret = 0, i;
811 	struct ipsec_mbuf_metadata *priv;
812 	struct rte_crypto_sym_op *sym_cop;
813 	struct ipsec_sa *sa;
814 	struct rte_ipsec_session *ips;
815 
816 	for (i = 0; i < nb_pkts; i++) {
817 		if (unlikely(sas[i] == NULL)) {
818 			free_pkts(&pkts[i], 1);
819 			continue;
820 		}
821 
822 		rte_prefetch0(sas[i]);
823 		rte_prefetch0(pkts[i]);
824 
825 		priv = get_priv(pkts[i]);
826 		sa = ipsec_mask_saptr(sas[i]);
827 		priv->sa = sa;
828 		ips = ipsec_get_primary_session(sa);
829 
830 		switch (ips->type) {
831 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
832 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
833 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
834 
835 			rte_prefetch0(&priv->sym_cop);
836 
837 			if (unlikely(ips->security.ses == NULL)) {
838 				free_pkts(&pkts[i], 1);
839 				continue;
840 			}
841 
842 			if (unlikely((pkts[i]->packet_type &
843 					(RTE_PTYPE_TUNNEL_MASK |
844 					RTE_PTYPE_L4_MASK)) ==
845 					MBUF_PTYPE_TUNNEL_ESP_IN_UDP &&
846 					sa->udp_encap != 1)) {
847 				free_pkts(&pkts[i], 1);
848 				continue;
849 			}
850 
851 			sym_cop = get_sym_cop(&priv->cop);
852 			sym_cop->m_src = pkts[i];
853 
854 			rte_security_attach_session(&priv->cop,
855 				ips->security.ses);
856 			break;
857 
858 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
859 			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
860 					" legacy mode.");
861 			free_pkts(&pkts[i], 1);
862 			continue;
863 
864 		case RTE_SECURITY_ACTION_TYPE_NONE:
865 
866 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
867 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
868 
869 			rte_prefetch0(&priv->sym_cop);
870 
871 			if (unlikely(ips->crypto.ses == NULL)) {
872 				free_pkts(&pkts[i], 1);
873 				continue;
874 			}
875 
876 			rte_crypto_op_attach_sym_session(&priv->cop,
877 					ips->crypto.ses);
878 
879 			ret = xform_func(pkts[i], sa, &priv->cop);
880 			if (unlikely(ret)) {
881 				free_pkts(&pkts[i], 1);
882 				continue;
883 			}
884 			break;
885 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
886 			RTE_ASSERT(ips->security.ses != NULL);
887 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
888 			if (ips->security.ol_flags &
889 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
890 				rte_security_set_pkt_metadata(
891 					ips->security.ctx, ips->security.ses,
892 					pkts[i], NULL);
893 			continue;
894 		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
895 			RTE_ASSERT(ips->security.ses != NULL);
896 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
897 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
898 
899 			rte_prefetch0(&priv->sym_cop);
900 			rte_security_attach_session(&priv->cop,
901 					ips->security.ses);
902 
903 			ret = xform_func(pkts[i], sa, &priv->cop);
904 			if (unlikely(ret)) {
905 				free_pkts(&pkts[i], 1);
906 				continue;
907 			}
908 
909 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
910 			if (ips->security.ol_flags &
911 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
912 				rte_security_set_pkt_metadata(
913 					ips->security.ctx, ips->security.ses,
914 					pkts[i], NULL);
915 			continue;
916 		}
917 
918 		enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop);
919 	}
920 }
921 
922 static inline int32_t
923 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
924 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
925 {
926 	int32_t nb_pkts, ret;
927 	struct ipsec_mbuf_metadata *priv;
928 	struct ipsec_sa *sa;
929 	struct rte_mbuf *pkt;
930 
931 	nb_pkts = 0;
932 	while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
933 		pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
934 		rte_prefetch0(pkt);
935 		priv = get_priv(pkt);
936 		sa = priv->sa;
937 		ret = xform_func(pkt, sa, &priv->cop);
938 		if (unlikely(ret)) {
939 			free_pkts(&pkt, 1);
940 			continue;
941 		}
942 		pkts[nb_pkts++] = pkt;
943 	}
944 
945 	return nb_pkts;
946 }
947 
948 static inline int
949 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
950 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
951 {
952 	int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
953 	struct ipsec_mbuf_metadata *priv;
954 	struct rte_crypto_op *cops[max_pkts];
955 	struct ipsec_sa *sa;
956 	struct rte_mbuf *pkt;
957 
958 	for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
959 		struct cdev_qp *cqp;
960 
961 		cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
962 		if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
963 			ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
964 
965 		if (cqp->in_flight == 0)
966 			continue;
967 
968 		nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
969 				cops, max_pkts - nb_pkts);
970 
971 		cqp->in_flight -= nb_cops;
972 
973 		for (j = 0; j < nb_cops; j++) {
974 			pkt = cops[j]->sym->m_src;
975 			rte_prefetch0(pkt);
976 
977 			priv = get_priv(pkt);
978 			sa = priv->sa;
979 
980 			RTE_ASSERT(sa != NULL);
981 
982 			if (ipsec_get_action_type(sa) ==
983 				RTE_SECURITY_ACTION_TYPE_NONE) {
984 				ret = xform_func(pkt, sa, cops[j]);
985 				if (unlikely(ret)) {
986 					free_pkts(&pkt, 1);
987 					continue;
988 				}
989 			} else if (ipsec_get_action_type(sa) ==
990 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
991 				if (cops[j]->status) {
992 					free_pkts(&pkt, 1);
993 					continue;
994 				}
995 			}
996 			pkts[nb_pkts++] = pkt;
997 		}
998 	}
999 
1000 	/* return packets */
1001 	return nb_pkts;
1002 }
1003 
1004 uint16_t
1005 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1006 		uint16_t nb_pkts, uint16_t len)
1007 {
1008 	void *sas[nb_pkts];
1009 
1010 	inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
1011 
1012 	ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
1013 
1014 	return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
1015 }
1016 
1017 uint16_t
1018 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1019 		uint16_t len)
1020 {
1021 	return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
1022 }
1023 
1024 uint16_t
1025 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1026 		uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
1027 {
1028 	void *sas[nb_pkts];
1029 
1030 	outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
1031 
1032 	ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
1033 
1034 	return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
1035 }
1036 
1037 uint16_t
1038 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
1039 		uint16_t len)
1040 {
1041 	return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
1042 }
1043