xref: /dpdk/examples/ipsec-secgw/ipsec.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_event_crypto_adapter.h>
10 #include <rte_log.h>
11 #include <rte_crypto.h>
12 #include <rte_security.h>
13 #include <rte_cryptodev.h>
14 #include <rte_ipsec.h>
15 #include <rte_ethdev.h>
16 #include <rte_mbuf.h>
17 #include <rte_hash.h>
18 
19 #include "ipsec.h"
20 #include "esp.h"
21 
22 static inline void
23 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
24 {
25 	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
26 		struct rte_security_ipsec_tunnel_param *tunnel =
27 				&ipsec->tunnel;
28 		if (IS_IP4_TUNNEL(sa->flags)) {
29 			tunnel->type =
30 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
31 			tunnel->ipv4.ttl = IPDEFTTL;
32 
33 			memcpy((uint8_t *)&tunnel->ipv4.src_ip,
34 				(uint8_t *)&sa->src.ip.ip4, 4);
35 
36 			memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
37 				(uint8_t *)&sa->dst.ip.ip4, 4);
38 		} else if (IS_IP6_TUNNEL(sa->flags)) {
39 			tunnel->type =
40 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
41 			tunnel->ipv6.hlimit = IPDEFTTL;
42 			tunnel->ipv6.dscp = 0;
43 			tunnel->ipv6.flabel = 0;
44 
45 			memcpy((uint8_t *)&tunnel->ipv6.src_addr,
46 				(uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
47 
48 			memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
49 				(uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
50 		}
51 		/* TODO support for Transport */
52 	}
53 	ipsec->replay_win_sz = app_sa_prm.window_size;
54 	ipsec->options.esn = app_sa_prm.enable_esn;
55 	ipsec->options.udp_encap = sa->udp_encap;
56 	if (IS_HW_REASSEMBLY_EN(sa->flags))
57 		ipsec->options.ip_reassembly_en = 1;
58 }
59 
60 int
61 create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
62 	struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
63 	struct ipsec_sa *sa, struct rte_ipsec_session *ips)
64 {
65 	uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
66 	enum rte_crypto_op_sess_type sess_type;
67 	struct rte_cryptodev_info cdev_info;
68 	enum rte_crypto_op_type op_type;
69 	unsigned long cdev_id_qp = 0;
70 	struct ipsec_ctx *ipsec_ctx;
71 	struct cdev_key key = { 0 };
72 	void *sess = NULL;
73 	uint32_t lcore_id;
74 	int32_t ret = 0;
75 
76 	RTE_LCORE_FOREACH(lcore_id) {
77 		ipsec_ctx = ipsec_ctx_lcore[lcore_id];
78 
79 		/* Core is not bound to any cryptodev, skip it */
80 		if (ipsec_ctx->cdev_map == NULL)
81 			continue;
82 
83 		/* Looking for cryptodev, which can handle this SA */
84 		key.lcore_id = (uint8_t)lcore_id;
85 		key.cipher_algo = (uint8_t)sa->cipher_algo;
86 		key.auth_algo = (uint8_t)sa->auth_algo;
87 		key.aead_algo = (uint8_t)sa->aead_algo;
88 
89 		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
90 				(void **)&cdev_id_qp);
91 		if (ret == -ENOENT)
92 			continue;
93 		if (ret < 0) {
94 			RTE_LOG(ERR, IPSEC,
95 					"No cryptodev: core %u, cipher_algo %u, "
96 					"auth_algo %u, aead_algo %u\n",
97 					key.lcore_id,
98 					key.cipher_algo,
99 					key.auth_algo,
100 					key.aead_algo);
101 			return ret;
102 		}
103 
104 		/* Verify that all cores are using same cryptodev for current
105 		 * algorithm combination, required by SA.
106 		 * Current cryptodev mapping process will map SA to the first
107 		 * cryptodev that matches requirements, so it's a double check,
108 		 * not an additional restriction.
109 		 */
110 		if (cdev_id == RTE_CRYPTO_MAX_DEVS)
111 			cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
112 		else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) {
113 			RTE_LOG(ERR, IPSEC,
114 					"SA mapping to multiple cryptodevs is "
115 					"not supported!");
116 			return -EINVAL;
117 		}
118 
119 		/* Store per core queue pair information */
120 		sa->cqp[lcore_id] = &ipsec_ctx->tbl[cdev_id_qp];
121 	}
122 	if (cdev_id == RTE_CRYPTO_MAX_DEVS) {
123 		RTE_LOG(WARNING, IPSEC, "No cores found to handle SA\n");
124 		return 0;
125 	}
126 
127 	RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
128 			"%u\n", sa->spi, cdev_id);
129 
130 	if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
131 		ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
132 		struct rte_security_session_conf sess_conf = {
133 			.action_type = ips->type,
134 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
135 			{.ipsec = {
136 				.spi = sa->spi,
137 				.salt = sa->salt,
138 				.options = { 0 },
139 				.replay_win_sz = 0,
140 				.direction = sa->direction,
141 				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
142 				.mode = (IS_TUNNEL(sa->flags)) ?
143 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
144 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
145 			} },
146 			.crypto_xform = sa->xforms,
147 			.userdata = NULL,
148 
149 		};
150 
151 		if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
152 			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
153 							rte_cryptodev_get_sec_ctx(
154 							cdev_id);
155 
156 			/* Set IPsec parameters in conf */
157 			set_ipsec_conf(sa, &(sess_conf.ipsec));
158 
159 			ips->security.ses = rte_security_session_create(ctx,
160 					&sess_conf, skt_ctx->session_pool);
161 			if (ips->security.ses == NULL) {
162 				RTE_LOG(ERR, IPSEC,
163 				"SEC Session init failed: err: %d\n", ret);
164 				return -1;
165 			}
166 			ips->security.ctx = ctx;
167 
168 			sess = ips->security.ses;
169 			op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
170 			sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
171 		} else {
172 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
173 			return -1;
174 		}
175 	} else {
176 		if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
177 			struct rte_cryptodev_info info;
178 
179 			rte_cryptodev_info_get(cdev_id, &info);
180 			if (!(info.feature_flags &
181 				RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
182 				return -ENOTSUP;
183 
184 		}
185 		ips->crypto.dev_id = cdev_id;
186 		ips->crypto.ses = rte_cryptodev_sym_session_create(cdev_id,
187 				sa->xforms, skt_ctx->session_pool);
188 
189 		rte_cryptodev_info_get(cdev_id, &cdev_info);
190 	}
191 
192 	/* Setup meta data required by event crypto adapter */
193 	if (em_conf->enable_event_crypto_adapter && sess != NULL) {
194 		union rte_event_crypto_metadata m_data;
195 		const struct eventdev_params *eventdev_conf;
196 
197 		eventdev_conf = &(em_conf->eventdev_config[0]);
198 		memset(&m_data, 0, sizeof(m_data));
199 
200 		/* Fill in response information */
201 		m_data.response_info.sched_type = em_conf->ext_params.sched_type;
202 		m_data.response_info.op = RTE_EVENT_OP_NEW;
203 		m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
204 
205 		/* Fill in request information */
206 		m_data.request_info.cdev_id = cdev_id;
207 		m_data.request_info.queue_pair_id = 0;
208 
209 		/* Attach meta info to session */
210 		rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
211 				sess_type, &m_data, sizeof(m_data));
212 	}
213 
214 	return 0;
215 }
216 
217 int
218 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
219 		struct rte_ipsec_session *ips)
220 {
221 	int32_t ret = 0;
222 	struct rte_security_ctx *sec_ctx;
223 	struct rte_security_session_conf sess_conf = {
224 		.action_type = ips->type,
225 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
226 		{.ipsec = {
227 			.spi = sa->spi,
228 			.salt = sa->salt,
229 			.options = { 0 },
230 			.replay_win_sz = 0,
231 			.direction = sa->direction,
232 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP
233 		} },
234 		.crypto_xform = sa->xforms,
235 		.userdata = NULL,
236 	};
237 
238 	if (IS_TRANSPORT(sa->flags)) {
239 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
240 		if (IS_IP4(sa->flags)) {
241 			sess_conf.ipsec.tunnel.type =
242 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
243 
244 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
245 				sa->src.ip.ip4;
246 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
247 				sa->dst.ip.ip4;
248 		} else if (IS_IP6(sa->flags)) {
249 			sess_conf.ipsec.tunnel.type =
250 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
251 
252 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
253 				sa->src.ip.ip6.ip6_b, 16);
254 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
255 				sa->dst.ip.ip6.ip6_b, 16);
256 		}
257 	} else if (IS_TUNNEL(sa->flags)) {
258 		sess_conf.ipsec.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
259 
260 		if (IS_IP4(sa->flags)) {
261 			sess_conf.ipsec.tunnel.type =
262 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
263 
264 			sess_conf.ipsec.tunnel.ipv4.src_ip.s_addr =
265 				sa->src.ip.ip4;
266 			sess_conf.ipsec.tunnel.ipv4.dst_ip.s_addr =
267 				sa->dst.ip.ip4;
268 		} else if (IS_IP6(sa->flags)) {
269 			sess_conf.ipsec.tunnel.type =
270 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
271 
272 			memcpy(sess_conf.ipsec.tunnel.ipv6.src_addr.s6_addr,
273 				sa->src.ip.ip6.ip6_b, 16);
274 			memcpy(sess_conf.ipsec.tunnel.ipv6.dst_addr.s6_addr,
275 				sa->dst.ip.ip6.ip6_b, 16);
276 		} else {
277 			RTE_LOG(ERR, IPSEC, "invalid tunnel type\n");
278 			return -1;
279 		}
280 	}
281 
282 	if (sa->udp_encap) {
283 		sess_conf.ipsec.options.udp_encap = 1;
284 		sess_conf.ipsec.udp.sport = htons(sa->udp.sport);
285 		sess_conf.ipsec.udp.dport = htons(sa->udp.dport);
286 	}
287 
288 	if (sa->esn > 0) {
289 		sess_conf.ipsec.options.esn = 1;
290 		sess_conf.ipsec.esn.value = sa->esn;
291 	}
292 
293 
294 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
295 		sa->spi, sa->portid);
296 
297 	if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
298 		struct rte_flow_error err;
299 		const struct rte_security_capability *sec_cap;
300 		int ret = 0;
301 
302 		sec_ctx = (struct rte_security_ctx *)
303 					rte_eth_dev_get_sec_ctx(
304 					sa->portid);
305 		if (sec_ctx == NULL) {
306 			RTE_LOG(ERR, IPSEC,
307 				" rte_eth_dev_get_sec_ctx failed\n");
308 			return -1;
309 		}
310 
311 		ips->security.ses = rte_security_session_create(sec_ctx,
312 				&sess_conf, skt_ctx->session_pool);
313 		if (ips->security.ses == NULL) {
314 			RTE_LOG(ERR, IPSEC,
315 				"SEC Session init failed: err: %d\n", ret);
316 			return -1;
317 		}
318 
319 		sec_cap = rte_security_capabilities_get(sec_ctx);
320 
321 		/* iterate until ESP tunnel*/
322 		while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
323 			if (sec_cap->action == ips->type &&
324 			    sec_cap->protocol ==
325 				RTE_SECURITY_PROTOCOL_IPSEC &&
326 			    sec_cap->ipsec.mode ==
327 				RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
328 			    sec_cap->ipsec.direction == sa->direction)
329 				break;
330 			sec_cap++;
331 		}
332 
333 		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
334 			RTE_LOG(ERR, IPSEC,
335 				"No suitable security capability found\n");
336 			return -1;
337 		}
338 
339 		ips->security.ol_flags = sec_cap->ol_flags;
340 		ips->security.ctx = sec_ctx;
341 		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
342 
343 		if (IS_IP6(sa->flags)) {
344 			sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
345 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
346 			sa->pattern[1].spec = &sa->ipv6_spec;
347 
348 			memcpy(sa->ipv6_spec.hdr.dst_addr,
349 				sa->dst.ip.ip6.ip6_b, 16);
350 			memcpy(sa->ipv6_spec.hdr.src_addr,
351 			       sa->src.ip.ip6.ip6_b, 16);
352 		} else if (IS_IP4(sa->flags)) {
353 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
354 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
355 			sa->pattern[1].spec = &sa->ipv4_spec;
356 
357 			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
358 			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
359 		}
360 
361 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
362 
363 		if (sa->udp_encap) {
364 
365 			sa->udp_spec.hdr.dst_port =
366 					rte_cpu_to_be_16(sa->udp.dport);
367 			sa->udp_spec.hdr.src_port =
368 					rte_cpu_to_be_16(sa->udp.sport);
369 
370 			sa->pattern[2].mask = &rte_flow_item_udp_mask;
371 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_UDP;
372 			sa->pattern[2].spec = &sa->udp_spec;
373 
374 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_ESP;
375 			sa->pattern[3].spec = &sa->esp_spec;
376 			sa->pattern[3].mask = &rte_flow_item_esp_mask;
377 
378 			sa->pattern[4].type = RTE_FLOW_ITEM_TYPE_END;
379 		} else {
380 			sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
381 			sa->pattern[2].spec = &sa->esp_spec;
382 			sa->pattern[2].mask = &rte_flow_item_esp_mask;
383 
384 			sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
385 		}
386 
387 		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
388 		sa->action[0].conf = ips->security.ses;
389 
390 		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
391 
392 		sa->attr.egress = (sa->direction ==
393 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
394 		sa->attr.ingress = (sa->direction ==
395 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
396 		if (sa->attr.ingress) {
397 			uint8_t rss_key[64];
398 			struct rte_eth_rss_conf rss_conf = {
399 				.rss_key = rss_key,
400 				.rss_key_len = sizeof(rss_key),
401 			};
402 			struct rte_eth_dev_info dev_info;
403 			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
404 			struct rte_flow_action_rss action_rss;
405 			unsigned int i;
406 			unsigned int j;
407 
408 			/* Don't create flow if default flow is created */
409 			if (flow_info_tbl[sa->portid].rx_def_flow)
410 				return 0;
411 
412 			ret = rte_eth_dev_info_get(sa->portid, &dev_info);
413 			if (ret != 0) {
414 				RTE_LOG(ERR, IPSEC,
415 					"Error during getting device (port %u) info: %s\n",
416 					sa->portid, strerror(-ret));
417 				return ret;
418 			}
419 
420 			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
421 			/* Try RSS. */
422 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
423 			sa->action[1].conf = &action_rss;
424 			ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
425 					&rss_conf);
426 			if (ret != 0) {
427 				RTE_LOG(ERR, IPSEC,
428 					"rte_eth_dev_rss_hash_conf_get:ret=%d\n",
429 					ret);
430 				return -1;
431 			}
432 			for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
433 				queue[j++] = i;
434 
435 			action_rss = (struct rte_flow_action_rss){
436 					.types = rss_conf.rss_hf,
437 					.key_len = rss_conf.rss_key_len,
438 					.queue_num = j,
439 					.key = rss_key,
440 					.queue = queue,
441 			};
442 			ret = rte_flow_validate(sa->portid, &sa->attr,
443 						sa->pattern, sa->action,
444 						&err);
445 			if (!ret)
446 				goto flow_create;
447 			/* Try Queue. */
448 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
449 			sa->action[1].conf =
450 				&(struct rte_flow_action_queue){
451 				.index = 0,
452 			};
453 			ret = rte_flow_validate(sa->portid, &sa->attr,
454 						sa->pattern, sa->action,
455 						&err);
456 			/* Try End. */
457 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
458 			sa->action[1].conf = NULL;
459 			ret = rte_flow_validate(sa->portid, &sa->attr,
460 						sa->pattern, sa->action,
461 						&err);
462 			if (ret)
463 				goto flow_create_failure;
464 		} else if (sa->attr.egress &&
465 				(ips->security.ol_flags &
466 					RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
467 			sa->action[1].type =
468 					RTE_FLOW_ACTION_TYPE_PASSTHRU;
469 			sa->action[2].type =
470 					RTE_FLOW_ACTION_TYPE_END;
471 		}
472 flow_create:
473 		sa->flow = rte_flow_create(sa->portid,
474 				&sa->attr, sa->pattern, sa->action, &err);
475 		if (sa->flow == NULL) {
476 flow_create_failure:
477 			RTE_LOG(ERR, IPSEC,
478 				"Failed to create ipsec flow msg: %s\n",
479 				err.message);
480 			return -1;
481 		}
482 	} else if (ips->type ==	RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
483 		const struct rte_security_capability *sec_cap;
484 
485 		sec_ctx = (struct rte_security_ctx *)
486 				rte_eth_dev_get_sec_ctx(sa->portid);
487 
488 		if (sec_ctx == NULL) {
489 			RTE_LOG(ERR, IPSEC,
490 				"Ethernet device doesn't have security features registered\n");
491 			return -1;
492 		}
493 
494 		/* Set IPsec parameters in conf */
495 		set_ipsec_conf(sa, &(sess_conf.ipsec));
496 
497 		/* Save SA as userdata for the security session. When
498 		 * the packet is received, this userdata will be
499 		 * retrieved using the metadata from the packet.
500 		 *
501 		 * The PMD is expected to set similar metadata for other
502 		 * operations, like rte_eth_event, which are tied to
503 		 * security session. In such cases, the userdata could
504 		 * be obtained to uniquely identify the security
505 		 * parameters denoted.
506 		 */
507 
508 		sess_conf.userdata = (void *) sa;
509 
510 		ips->security.ses = rte_security_session_create(sec_ctx,
511 					&sess_conf, skt_ctx->session_pool);
512 		if (ips->security.ses == NULL) {
513 			RTE_LOG(ERR, IPSEC,
514 				"SEC Session init failed: err: %d\n", ret);
515 			return -1;
516 		}
517 
518 		sec_cap = rte_security_capabilities_get(sec_ctx);
519 		if (sec_cap == NULL) {
520 			RTE_LOG(ERR, IPSEC,
521 				"No capabilities registered\n");
522 			return -1;
523 		}
524 
525 		/* iterate until ESP tunnel*/
526 		while (sec_cap->action !=
527 				RTE_SECURITY_ACTION_TYPE_NONE) {
528 			if (sec_cap->action == ips->type &&
529 			    sec_cap->protocol ==
530 				RTE_SECURITY_PROTOCOL_IPSEC &&
531 			    sec_cap->ipsec.mode ==
532 				sess_conf.ipsec.mode &&
533 			    sec_cap->ipsec.direction == sa->direction)
534 				break;
535 			sec_cap++;
536 		}
537 
538 		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
539 			RTE_LOG(ERR, IPSEC,
540 				"No suitable security capability found\n");
541 			return -1;
542 		}
543 
544 		ips->security.ol_flags = sec_cap->ol_flags;
545 		ips->security.ctx = sec_ctx;
546 	}
547 
548 	return 0;
549 }
550 
551 int
552 create_ipsec_esp_flow(struct ipsec_sa *sa)
553 {
554 	int ret = 0;
555 	struct rte_flow_error err = {};
556 	if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
557 		RTE_LOG(ERR, IPSEC,
558 			"No Flow director rule for Egress traffic\n");
559 		return -1;
560 	}
561 	if (sa->flags == TRANSPORT) {
562 		RTE_LOG(ERR, IPSEC,
563 			"No Flow director rule for transport mode\n");
564 		return -1;
565 	}
566 	sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
567 	sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
568 	sa->action[0].conf = &(struct rte_flow_action_queue) {
569 				.index = sa->fdir_qid,
570 	};
571 	sa->attr.egress = 0;
572 	sa->attr.ingress = 1;
573 	if (IS_IP6(sa->flags)) {
574 		sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
575 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
576 		sa->pattern[1].spec = &sa->ipv6_spec;
577 		memcpy(sa->ipv6_spec.hdr.dst_addr,
578 			sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
579 		memcpy(sa->ipv6_spec.hdr.src_addr,
580 			sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
581 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
582 		sa->pattern[2].spec = &sa->esp_spec;
583 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
584 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
585 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
586 	} else if (IS_IP4(sa->flags)) {
587 		sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
588 		sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
589 		sa->pattern[1].spec = &sa->ipv4_spec;
590 		sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
591 		sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
592 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
593 		sa->pattern[2].spec = &sa->esp_spec;
594 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
595 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
596 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
597 	}
598 	sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
599 
600 	ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
601 				&err);
602 	if (ret < 0) {
603 		RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
604 		return ret;
605 	}
606 
607 	sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
608 					sa->action, &err);
609 	if (!sa->flow) {
610 		RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
611 		return -1;
612 	}
613 
614 	return 0;
615 }
616 
617 /*
618  * queue crypto-ops into PMD queue.
619  */
620 void
621 enqueue_cop_burst(struct cdev_qp *cqp)
622 {
623 	uint32_t i, len, ret;
624 
625 	len = cqp->len;
626 	ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
627 	if (ret < len) {
628 		RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
629 			" enqueued %u crypto ops out of %u\n",
630 			cqp->id, cqp->qp, ret, len);
631 			/* drop packets that we fail to enqueue */
632 			for (i = ret; i < len; i++)
633 				free_pkts(&cqp->buf[i]->sym->m_src, 1);
634 	}
635 	cqp->in_flight += ret;
636 	cqp->len = 0;
637 }
638 
639 static inline void
640 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
641 {
642 	cqp->buf[cqp->len++] = cop;
643 
644 	if (cqp->len == MAX_PKT_BURST)
645 		enqueue_cop_burst(cqp);
646 }
647 
648 static inline void
649 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
650 		struct rte_mbuf *pkts[], void *sas[],
651 		uint16_t nb_pkts)
652 {
653 	int32_t ret = 0, i;
654 	struct ipsec_mbuf_metadata *priv;
655 	struct rte_crypto_sym_op *sym_cop;
656 	struct ipsec_sa *sa;
657 	struct rte_ipsec_session *ips;
658 
659 	for (i = 0; i < nb_pkts; i++) {
660 		if (unlikely(sas[i] == NULL)) {
661 			free_pkts(&pkts[i], 1);
662 			continue;
663 		}
664 
665 		rte_prefetch0(sas[i]);
666 		rte_prefetch0(pkts[i]);
667 
668 		priv = get_priv(pkts[i]);
669 		sa = ipsec_mask_saptr(sas[i]);
670 		priv->sa = sa;
671 		ips = ipsec_get_primary_session(sa);
672 
673 		switch (ips->type) {
674 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
675 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
676 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
677 
678 			rte_prefetch0(&priv->sym_cop);
679 
680 			if (unlikely(ips->security.ses == NULL)) {
681 				free_pkts(&pkts[i], 1);
682 				continue;
683 			}
684 
685 			if (unlikely((pkts[i]->packet_type &
686 					(RTE_PTYPE_TUNNEL_MASK |
687 					RTE_PTYPE_L4_MASK)) ==
688 					MBUF_PTYPE_TUNNEL_ESP_IN_UDP &&
689 					sa->udp_encap != 1)) {
690 				free_pkts(&pkts[i], 1);
691 				continue;
692 			}
693 
694 			sym_cop = get_sym_cop(&priv->cop);
695 			sym_cop->m_src = pkts[i];
696 
697 			rte_security_attach_session(&priv->cop,
698 				ips->security.ses);
699 			break;
700 
701 		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
702 			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
703 					" legacy mode.");
704 			free_pkts(&pkts[i], 1);
705 			continue;
706 
707 		case RTE_SECURITY_ACTION_TYPE_NONE:
708 
709 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
710 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
711 
712 			rte_prefetch0(&priv->sym_cop);
713 
714 			if (unlikely(ips->crypto.ses == NULL)) {
715 				free_pkts(&pkts[i], 1);
716 				continue;
717 			}
718 
719 			rte_crypto_op_attach_sym_session(&priv->cop,
720 					ips->crypto.ses);
721 
722 			ret = xform_func(pkts[i], sa, &priv->cop);
723 			if (unlikely(ret)) {
724 				free_pkts(&pkts[i], 1);
725 				continue;
726 			}
727 			break;
728 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
729 			RTE_ASSERT(ips->security.ses != NULL);
730 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
731 			if (ips->security.ol_flags &
732 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
733 				rte_security_set_pkt_metadata(
734 					ips->security.ctx, ips->security.ses,
735 					pkts[i], NULL);
736 			continue;
737 		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
738 			RTE_ASSERT(ips->security.ses != NULL);
739 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
740 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
741 
742 			rte_prefetch0(&priv->sym_cop);
743 			rte_security_attach_session(&priv->cop,
744 					ips->security.ses);
745 
746 			ret = xform_func(pkts[i], sa, &priv->cop);
747 			if (unlikely(ret)) {
748 				free_pkts(&pkts[i], 1);
749 				continue;
750 			}
751 
752 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
753 			if (ips->security.ol_flags &
754 				RTE_SECURITY_TX_OLOAD_NEED_MDATA)
755 				rte_security_set_pkt_metadata(
756 					ips->security.ctx, ips->security.ses,
757 					pkts[i], NULL);
758 			continue;
759 		}
760 
761 		enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop);
762 	}
763 }
764 
765 static inline int32_t
766 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
767 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
768 {
769 	int32_t nb_pkts, ret;
770 	struct ipsec_mbuf_metadata *priv;
771 	struct ipsec_sa *sa;
772 	struct rte_mbuf *pkt;
773 
774 	nb_pkts = 0;
775 	while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
776 		pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
777 		rte_prefetch0(pkt);
778 		priv = get_priv(pkt);
779 		sa = priv->sa;
780 		ret = xform_func(pkt, sa, &priv->cop);
781 		if (unlikely(ret)) {
782 			free_pkts(&pkt, 1);
783 			continue;
784 		}
785 		pkts[nb_pkts++] = pkt;
786 	}
787 
788 	return nb_pkts;
789 }
790 
791 static inline int
792 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
793 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
794 {
795 	int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
796 	struct ipsec_mbuf_metadata *priv;
797 	struct rte_crypto_op *cops[max_pkts];
798 	struct ipsec_sa *sa;
799 	struct rte_mbuf *pkt;
800 
801 	for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
802 		struct cdev_qp *cqp;
803 
804 		cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
805 		if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
806 			ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
807 
808 		if (cqp->in_flight == 0)
809 			continue;
810 
811 		nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
812 				cops, max_pkts - nb_pkts);
813 
814 		cqp->in_flight -= nb_cops;
815 
816 		for (j = 0; j < nb_cops; j++) {
817 			pkt = cops[j]->sym->m_src;
818 			rte_prefetch0(pkt);
819 
820 			priv = get_priv(pkt);
821 			sa = priv->sa;
822 
823 			RTE_ASSERT(sa != NULL);
824 
825 			if (ipsec_get_action_type(sa) ==
826 				RTE_SECURITY_ACTION_TYPE_NONE) {
827 				ret = xform_func(pkt, sa, cops[j]);
828 				if (unlikely(ret)) {
829 					free_pkts(&pkt, 1);
830 					continue;
831 				}
832 			} else if (ipsec_get_action_type(sa) ==
833 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
834 				if (cops[j]->status) {
835 					free_pkts(&pkt, 1);
836 					continue;
837 				}
838 			}
839 			pkts[nb_pkts++] = pkt;
840 		}
841 	}
842 
843 	/* return packets */
844 	return nb_pkts;
845 }
846 
847 uint16_t
848 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
849 		uint16_t nb_pkts, uint16_t len)
850 {
851 	void *sas[nb_pkts];
852 
853 	inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
854 
855 	ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
856 
857 	return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
858 }
859 
860 uint16_t
861 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
862 		uint16_t len)
863 {
864 	return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
865 }
866 
867 uint16_t
868 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
869 		uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
870 {
871 	void *sas[nb_pkts];
872 
873 	outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
874 
875 	ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
876 
877 	return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
878 }
879 
880 uint16_t
881 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
882 		uint16_t len)
883 {
884 	return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
885 }
886