xref: /dpdk/examples/ipsec-secgw/ipsec.c (revision 1cde1b9a9b4dbf31cb5e5ccdfc5da3cb079f43a2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7 
8 #include <rte_branch_prediction.h>
9 #include <rte_log.h>
10 #include <rte_crypto.h>
11 #include <rte_security.h>
12 #include <rte_cryptodev.h>
13 #include <rte_ethdev.h>
14 #include <rte_mbuf.h>
15 #include <rte_hash.h>
16 
17 #include "ipsec.h"
18 #include "esp.h"
19 
20 static inline void
21 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
22 {
23 	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
24 		struct rte_security_ipsec_tunnel_param *tunnel =
25 				&ipsec->tunnel;
26 		if (IS_IP4_TUNNEL(sa->flags)) {
27 			tunnel->type =
28 				RTE_SECURITY_IPSEC_TUNNEL_IPV4;
29 			tunnel->ipv4.ttl = IPDEFTTL;
30 
31 			memcpy((uint8_t *)&tunnel->ipv4.src_ip,
32 				(uint8_t *)&sa->src.ip.ip4, 4);
33 
34 			memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
35 				(uint8_t *)&sa->dst.ip.ip4, 4);
36 		} else if (IS_IP6_TUNNEL(sa->flags)) {
37 			tunnel->type =
38 				RTE_SECURITY_IPSEC_TUNNEL_IPV6;
39 			tunnel->ipv6.hlimit = IPDEFTTL;
40 			tunnel->ipv6.dscp = 0;
41 			tunnel->ipv6.flabel = 0;
42 
43 			memcpy((uint8_t *)&tunnel->ipv6.src_addr,
44 				(uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
45 
46 			memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
47 				(uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
48 		}
49 		/* TODO support for Transport */
50 	}
51 	ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
52 }
53 
54 int
55 create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
56 {
57 	struct rte_cryptodev_info cdev_info;
58 	unsigned long cdev_id_qp = 0;
59 	int32_t ret = 0;
60 	struct cdev_key key = { 0 };
61 
62 	key.lcore_id = (uint8_t)rte_lcore_id();
63 
64 	key.cipher_algo = (uint8_t)sa->cipher_algo;
65 	key.auth_algo = (uint8_t)sa->auth_algo;
66 	key.aead_algo = (uint8_t)sa->aead_algo;
67 
68 	ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
69 			(void **)&cdev_id_qp);
70 	if (ret < 0) {
71 		RTE_LOG(ERR, IPSEC,
72 				"No cryptodev: core %u, cipher_algo %u, "
73 				"auth_algo %u, aead_algo %u\n",
74 				key.lcore_id,
75 				key.cipher_algo,
76 				key.auth_algo,
77 				key.aead_algo);
78 		return -1;
79 	}
80 
81 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
82 			"%u qp %u\n", sa->spi,
83 			ipsec_ctx->tbl[cdev_id_qp].id,
84 			ipsec_ctx->tbl[cdev_id_qp].qp);
85 
86 	if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
87 		struct rte_security_session_conf sess_conf = {
88 			.action_type = sa->type,
89 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
90 			{.ipsec = {
91 				.spi = sa->spi,
92 				.salt = sa->salt,
93 				.options = { 0 },
94 				.direction = sa->direction,
95 				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
96 				.mode = (IS_TUNNEL(sa->flags)) ?
97 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
98 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
99 			} },
100 			.crypto_xform = sa->xforms,
101 			.userdata = NULL,
102 
103 		};
104 
105 		if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
106 			struct rte_security_ctx *ctx = (struct rte_security_ctx *)
107 							rte_cryptodev_get_sec_ctx(
108 							ipsec_ctx->tbl[cdev_id_qp].id);
109 
110 			/* Set IPsec parameters in conf */
111 			set_ipsec_conf(sa, &(sess_conf.ipsec));
112 
113 			sa->sec_session = rte_security_session_create(ctx,
114 					&sess_conf, ipsec_ctx->session_priv_pool);
115 			if (sa->sec_session == NULL) {
116 				RTE_LOG(ERR, IPSEC,
117 				"SEC Session init failed: err: %d\n", ret);
118 				return -1;
119 			}
120 		} else {
121 			RTE_LOG(ERR, IPSEC, "Inline not supported\n");
122 			return -1;
123 		}
124 	} else {
125 		sa->crypto_session = rte_cryptodev_sym_session_create(
126 				ipsec_ctx->session_pool);
127 		rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
128 				sa->crypto_session, sa->xforms,
129 				ipsec_ctx->session_priv_pool);
130 
131 		rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
132 				&cdev_info);
133 	}
134 
135 	sa->cdev_id_qp = cdev_id_qp;
136 
137 	return 0;
138 }
139 
140 int
141 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa)
142 {
143 	int32_t ret = 0;
144 	struct rte_security_ctx *sec_ctx;
145 	struct rte_security_session_conf sess_conf = {
146 		.action_type = sa->type,
147 		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
148 		{.ipsec = {
149 			.spi = sa->spi,
150 			.salt = sa->salt,
151 			.options = { 0 },
152 			.direction = sa->direction,
153 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
154 			.mode = (sa->flags == IP4_TUNNEL ||
155 					sa->flags == IP6_TUNNEL) ?
156 					RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
157 					RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
158 		} },
159 		.crypto_xform = sa->xforms,
160 		.userdata = NULL,
161 	};
162 
163 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
164 		sa->spi, sa->portid);
165 
166 	if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
167 		struct rte_flow_error err;
168 		const struct rte_security_capability *sec_cap;
169 		int ret = 0;
170 
171 		sec_ctx = (struct rte_security_ctx *)
172 					rte_eth_dev_get_sec_ctx(
173 					sa->portid);
174 		if (sec_ctx == NULL) {
175 			RTE_LOG(ERR, IPSEC,
176 				" rte_eth_dev_get_sec_ctx failed\n");
177 			return -1;
178 		}
179 
180 		sa->sec_session = rte_security_session_create(sec_ctx,
181 				&sess_conf, skt_ctx->session_pool);
182 		if (sa->sec_session == NULL) {
183 			RTE_LOG(ERR, IPSEC,
184 				"SEC Session init failed: err: %d\n", ret);
185 			return -1;
186 		}
187 
188 		sec_cap = rte_security_capabilities_get(sec_ctx);
189 
190 		/* iterate until ESP tunnel*/
191 		while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
192 			if (sec_cap->action == sa->type &&
193 			    sec_cap->protocol ==
194 				RTE_SECURITY_PROTOCOL_IPSEC &&
195 			    sec_cap->ipsec.mode ==
196 				RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
197 			    sec_cap->ipsec.direction == sa->direction)
198 				break;
199 			sec_cap++;
200 		}
201 
202 		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
203 			RTE_LOG(ERR, IPSEC,
204 				"No suitable security capability found\n");
205 			return -1;
206 		}
207 
208 		sa->ol_flags = sec_cap->ol_flags;
209 		sa->security_ctx = sec_ctx;
210 		sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
211 
212 		if (IS_IP6(sa->flags)) {
213 			sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
214 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
215 			sa->pattern[1].spec = &sa->ipv6_spec;
216 
217 			memcpy(sa->ipv6_spec.hdr.dst_addr,
218 				sa->dst.ip.ip6.ip6_b, 16);
219 			memcpy(sa->ipv6_spec.hdr.src_addr,
220 			       sa->src.ip.ip6.ip6_b, 16);
221 		} else if (IS_IP4(sa->flags)) {
222 			sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
223 			sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
224 			sa->pattern[1].spec = &sa->ipv4_spec;
225 
226 			sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
227 			sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
228 		}
229 
230 		sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
231 		sa->pattern[2].spec = &sa->esp_spec;
232 		sa->pattern[2].mask = &rte_flow_item_esp_mask;
233 		sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
234 
235 		sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
236 
237 		sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
238 		sa->action[0].conf = sa->sec_session;
239 
240 		sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
241 
242 		sa->attr.egress = (sa->direction ==
243 				RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
244 		sa->attr.ingress = (sa->direction ==
245 				RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
246 		if (sa->attr.ingress) {
247 			uint8_t rss_key[40];
248 			struct rte_eth_rss_conf rss_conf = {
249 				.rss_key = rss_key,
250 				.rss_key_len = 40,
251 			};
252 			struct rte_eth_dev_info dev_info;
253 			uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
254 			struct rte_flow_action_rss action_rss;
255 			unsigned int i;
256 			unsigned int j;
257 
258 			ret = rte_eth_dev_info_get(sa->portid, &dev_info);
259 			if (ret != 0) {
260 				RTE_LOG(ERR, IPSEC,
261 					"Error during getting device (port %u) info: %s\n",
262 					sa->portid, strerror(-ret));
263 				return ret;
264 			}
265 
266 			sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
267 			/* Try RSS. */
268 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
269 			sa->action[1].conf = &action_rss;
270 			ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
271 					&rss_conf);
272 			if (ret != 0) {
273 				RTE_LOG(ERR, IPSEC,
274 					"rte_eth_dev_rss_hash_conf_get:ret=%d\n",
275 					ret);
276 				return -1;
277 			}
278 			for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
279 				queue[j++] = i;
280 
281 			action_rss = (struct rte_flow_action_rss){
282 					.types = rss_conf.rss_hf,
283 					.key_len = rss_conf.rss_key_len,
284 					.queue_num = j,
285 					.key = rss_key,
286 					.queue = queue,
287 			};
288 			ret = rte_flow_validate(sa->portid, &sa->attr,
289 						sa->pattern, sa->action,
290 						&err);
291 			if (!ret)
292 				goto flow_create;
293 			/* Try Queue. */
294 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
295 			sa->action[1].conf =
296 				&(struct rte_flow_action_queue){
297 				.index = 0,
298 			};
299 			ret = rte_flow_validate(sa->portid, &sa->attr,
300 						sa->pattern, sa->action,
301 						&err);
302 			/* Try End. */
303 			sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
304 			sa->action[1].conf = NULL;
305 			ret = rte_flow_validate(sa->portid, &sa->attr,
306 						sa->pattern, sa->action,
307 						&err);
308 			if (ret)
309 				goto flow_create_failure;
310 		} else if (sa->attr.egress &&
311 			   (sa->ol_flags &
312 				    RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
313 			sa->action[1].type =
314 					RTE_FLOW_ACTION_TYPE_PASSTHRU;
315 			sa->action[2].type =
316 					RTE_FLOW_ACTION_TYPE_END;
317 		}
318 flow_create:
319 		sa->flow = rte_flow_create(sa->portid,
320 				&sa->attr, sa->pattern, sa->action, &err);
321 		if (sa->flow == NULL) {
322 flow_create_failure:
323 			RTE_LOG(ERR, IPSEC,
324 				"Failed to create ipsec flow msg: %s\n",
325 				err.message);
326 			return -1;
327 		}
328 	} else if (sa->type ==	RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
329 		const struct rte_security_capability *sec_cap;
330 
331 		sec_ctx = (struct rte_security_ctx *)
332 				rte_eth_dev_get_sec_ctx(sa->portid);
333 
334 		if (sec_ctx == NULL) {
335 			RTE_LOG(ERR, IPSEC,
336 				"Ethernet device doesn't have security features registered\n");
337 			return -1;
338 		}
339 
340 		/* Set IPsec parameters in conf */
341 		set_ipsec_conf(sa, &(sess_conf.ipsec));
342 
343 		/* Save SA as userdata for the security session. When
344 		 * the packet is received, this userdata will be
345 		 * retrieved using the metadata from the packet.
346 		 *
347 		 * The PMD is expected to set similar metadata for other
348 		 * operations, like rte_eth_event, which are tied to
349 		 * security session. In such cases, the userdata could
350 		 * be obtained to uniquely identify the security
351 		 * parameters denoted.
352 		 */
353 
354 		sess_conf.userdata = (void *) sa;
355 
356 		sa->sec_session = rte_security_session_create(sec_ctx,
357 					&sess_conf, skt_ctx->session_pool);
358 		if (sa->sec_session == NULL) {
359 			RTE_LOG(ERR, IPSEC,
360 				"SEC Session init failed: err: %d\n", ret);
361 			return -1;
362 		}
363 
364 		sec_cap = rte_security_capabilities_get(sec_ctx);
365 		if (sec_cap == NULL) {
366 			RTE_LOG(ERR, IPSEC,
367 				"No capabilities registered\n");
368 			return -1;
369 		}
370 
371 		/* iterate until ESP tunnel*/
372 		while (sec_cap->action !=
373 				RTE_SECURITY_ACTION_TYPE_NONE) {
374 			if (sec_cap->action == sa->type &&
375 			    sec_cap->protocol ==
376 				RTE_SECURITY_PROTOCOL_IPSEC &&
377 			    sec_cap->ipsec.mode ==
378 				sess_conf.ipsec.mode &&
379 			    sec_cap->ipsec.direction == sa->direction)
380 				break;
381 			sec_cap++;
382 		}
383 
384 		if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
385 			RTE_LOG(ERR, IPSEC,
386 				"No suitable security capability found\n");
387 			return -1;
388 		}
389 
390 		sa->ol_flags = sec_cap->ol_flags;
391 		sa->security_ctx = sec_ctx;
392 	}
393 	sa->cdev_id_qp = 0;
394 
395 	return 0;
396 }
397 
398 /*
399  * queue crypto-ops into PMD queue.
400  */
401 void
402 enqueue_cop_burst(struct cdev_qp *cqp)
403 {
404 	uint32_t i, len, ret;
405 
406 	len = cqp->len;
407 	ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
408 	if (ret < len) {
409 		RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
410 			" enqueued %u crypto ops out of %u\n",
411 			cqp->id, cqp->qp, ret, len);
412 			/* drop packets that we fail to enqueue */
413 			for (i = ret; i < len; i++)
414 				rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
415 	}
416 	cqp->in_flight += ret;
417 	cqp->len = 0;
418 }
419 
420 static inline void
421 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
422 {
423 	cqp->buf[cqp->len++] = cop;
424 
425 	if (cqp->len == MAX_PKT_BURST)
426 		enqueue_cop_burst(cqp);
427 }
428 
429 static inline void
430 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
431 		struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
432 		uint16_t nb_pkts)
433 {
434 	int32_t ret = 0, i;
435 	struct ipsec_mbuf_metadata *priv;
436 	struct rte_crypto_sym_op *sym_cop;
437 	struct ipsec_sa *sa;
438 
439 	for (i = 0; i < nb_pkts; i++) {
440 		if (unlikely(sas[i] == NULL)) {
441 			rte_pktmbuf_free(pkts[i]);
442 			continue;
443 		}
444 
445 		rte_prefetch0(sas[i]);
446 		rte_prefetch0(pkts[i]);
447 
448 		priv = get_priv(pkts[i]);
449 		sa = sas[i];
450 		priv->sa = sa;
451 
452 		switch (sa->type) {
453 		case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
454 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
455 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
456 
457 			rte_prefetch0(&priv->sym_cop);
458 
459 			if ((unlikely(sa->sec_session == NULL)) &&
460 				create_lookaside_session(ipsec_ctx, sa)) {
461 				rte_pktmbuf_free(pkts[i]);
462 				continue;
463 			}
464 
465 			sym_cop = get_sym_cop(&priv->cop);
466 			sym_cop->m_src = pkts[i];
467 
468 			rte_security_attach_session(&priv->cop,
469 					sa->sec_session);
470 			break;
471 		case RTE_SECURITY_ACTION_TYPE_NONE:
472 
473 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
474 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
475 
476 			rte_prefetch0(&priv->sym_cop);
477 
478 			if ((unlikely(sa->crypto_session == NULL)) &&
479 				create_lookaside_session(ipsec_ctx, sa)) {
480 				rte_pktmbuf_free(pkts[i]);
481 				continue;
482 			}
483 
484 			rte_crypto_op_attach_sym_session(&priv->cop,
485 					sa->crypto_session);
486 
487 			ret = xform_func(pkts[i], sa, &priv->cop);
488 			if (unlikely(ret)) {
489 				rte_pktmbuf_free(pkts[i]);
490 				continue;
491 			}
492 			break;
493 		case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
494 			RTE_ASSERT(sa->sec_session != NULL);
495 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
496 			if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
497 				rte_security_set_pkt_metadata(
498 						sa->security_ctx,
499 						sa->sec_session, pkts[i], NULL);
500 			continue;
501 		case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
502 			RTE_ASSERT(sa->sec_session != NULL);
503 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
504 			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
505 
506 			rte_prefetch0(&priv->sym_cop);
507 			rte_security_attach_session(&priv->cop,
508 					sa->sec_session);
509 
510 			ret = xform_func(pkts[i], sa, &priv->cop);
511 			if (unlikely(ret)) {
512 				rte_pktmbuf_free(pkts[i]);
513 				continue;
514 			}
515 
516 			ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
517 			if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
518 				rte_security_set_pkt_metadata(
519 						sa->security_ctx,
520 						sa->sec_session, pkts[i], NULL);
521 			continue;
522 		}
523 
524 		RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
525 		enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
526 	}
527 }
528 
529 static inline int32_t
530 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
531 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
532 {
533 	int32_t nb_pkts, ret;
534 	struct ipsec_mbuf_metadata *priv;
535 	struct ipsec_sa *sa;
536 	struct rte_mbuf *pkt;
537 
538 	nb_pkts = 0;
539 	while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
540 		pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
541 		rte_prefetch0(pkt);
542 		priv = get_priv(pkt);
543 		sa = priv->sa;
544 		ret = xform_func(pkt, sa, &priv->cop);
545 		if (unlikely(ret)) {
546 			rte_pktmbuf_free(pkt);
547 			continue;
548 		}
549 		pkts[nb_pkts++] = pkt;
550 	}
551 
552 	return nb_pkts;
553 }
554 
555 static inline int
556 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
557 	      struct rte_mbuf *pkts[], uint16_t max_pkts)
558 {
559 	int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
560 	struct ipsec_mbuf_metadata *priv;
561 	struct rte_crypto_op *cops[max_pkts];
562 	struct ipsec_sa *sa;
563 	struct rte_mbuf *pkt;
564 
565 	for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
566 		struct cdev_qp *cqp;
567 
568 		cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
569 		if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
570 			ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
571 
572 		if (cqp->in_flight == 0)
573 			continue;
574 
575 		nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
576 				cops, max_pkts - nb_pkts);
577 
578 		cqp->in_flight -= nb_cops;
579 
580 		for (j = 0; j < nb_cops; j++) {
581 			pkt = cops[j]->sym->m_src;
582 			rte_prefetch0(pkt);
583 
584 			priv = get_priv(pkt);
585 			sa = priv->sa;
586 
587 			RTE_ASSERT(sa != NULL);
588 
589 			if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
590 				ret = xform_func(pkt, sa, cops[j]);
591 				if (unlikely(ret)) {
592 					rte_pktmbuf_free(pkt);
593 					continue;
594 				}
595 			} else if (sa->type ==
596 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
597 				if (cops[j]->status) {
598 					rte_pktmbuf_free(pkt);
599 					continue;
600 				}
601 			}
602 			pkts[nb_pkts++] = pkt;
603 		}
604 	}
605 
606 	/* return packets */
607 	return nb_pkts;
608 }
609 
610 uint16_t
611 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
612 		uint16_t nb_pkts, uint16_t len)
613 {
614 	struct ipsec_sa *sas[nb_pkts];
615 
616 	inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
617 
618 	ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
619 
620 	return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
621 }
622 
623 uint16_t
624 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
625 		uint16_t len)
626 {
627 	return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
628 }
629 
630 uint16_t
631 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
632 		uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
633 {
634 	struct ipsec_sa *sas[nb_pkts];
635 
636 	outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
637 
638 	ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
639 
640 	return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
641 }
642 
643 uint16_t
644 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
645 		uint16_t len)
646 {
647 	return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
648 }
649